diff options
Diffstat (limited to 'llvm/test/CodeGen')
266 files changed, 45109 insertions, 11558 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll index 639b6fd..da171ed 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll @@ -13,12 +13,12 @@ define i32 @cse_gep(ptr %ptr, i32 %idx) { ; O0-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 ; O0-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) ; O0-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; O0-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] - ; O0-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64) + ; O0-NEXT: [[MUL:%[0-9]+]]:_(s64) = nsw G_MUL [[SEXT]], [[C]] + ; O0-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nusw inbounds G_PTR_ADD [[COPY]], [[MUL]](s64) ; O0-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0) ; O0-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load (s32) from %ir.gep1) - ; O0-NEXT: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] - ; O0-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL1]](s64) + ; O0-NEXT: [[MUL1:%[0-9]+]]:_(s64) = nsw G_MUL [[SEXT]], [[C]] + ; O0-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nusw inbounds G_PTR_ADD [[COPY]], [[MUL1]](s64) ; O0-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; O0-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; O0-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.gep2) @@ -34,8 +34,8 @@ define i32 @cse_gep(ptr %ptr, i32 %idx) { ; O3-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 ; O3-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) ; O3-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; O3-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] - ; O3-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64) + ; O3-NEXT: [[MUL:%[0-9]+]]:_(s64) = nsw G_MUL [[SEXT]], [[C]] + ; O3-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nusw inbounds G_PTR_ADD [[COPY]], [[MUL]](s64) ; O3-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0) ; O3-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load (s32) from %ir.gep1) ; O3-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll index 79b2e2e..02a8a4f 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll @@ -792,8 +792,8 @@ define void @jt_multiple_jump_tables(ptr %arg, i32 %arg1, ptr %arg2) { ; CHECK-NEXT: bb.56.bb57: ; CHECK-NEXT: [[PHI:%[0-9]+]]:_(s64) = G_PHI [[C56]](s64), %bb.1, [[C57]](s64), %bb.2, [[C58]](s64), %bb.3, [[C59]](s64), %bb.4, [[C60]](s64), %bb.5, [[C61]](s64), %bb.6, [[C62]](s64), %bb.7, [[C63]](s64), %bb.8, [[C64]](s64), %bb.9, [[C65]](s64), %bb.10, [[C66]](s64), %bb.11, [[C67]](s64), %bb.12, [[C68]](s64), %bb.13, [[C69]](s64), %bb.14, [[C70]](s64), %bb.15, [[C71]](s64), %bb.16, [[C72]](s64), %bb.17, [[C73]](s64), %bb.18, [[C74]](s64), %bb.19, [[C75]](s64), %bb.20, [[C76]](s64), %bb.21, [[C77]](s64), %bb.22, [[C78]](s64), %bb.23, [[C79]](s64), %bb.24, [[C80]](s64), %bb.25, [[C81]](s64), %bb.26, [[C82]](s64), %bb.27, [[C83]](s64), %bb.28, [[C84]](s64), %bb.29, [[C85]](s64), %bb.30, [[C86]](s64), %bb.31, [[C87]](s64), %bb.32, [[C88]](s64), %bb.33, [[C89]](s64), %bb.34, [[C90]](s64), %bb.35, [[C91]](s64), %bb.36, [[C92]](s64), %bb.37, [[C93]](s64), %bb.38, [[C94]](s64), %bb.39, [[C95]](s64), %bb.40, [[C96]](s64), %bb.41, [[C97]](s64), %bb.42, [[C98]](s64), %bb.43, [[C99]](s64), %bb.44, [[C100]](s64), %bb.45, [[C101]](s64), %bb.46, [[C102]](s64), %bb.47, [[C103]](s64), %bb.48, [[C104]](s64), %bb.49, [[C105]](s64), %bb.50, [[C106]](s64), %bb.51, [[C107]](s64), %bb.52, [[C108]](s64), %bb.53, [[C109]](s64), %bb.54, [[C110]](s64), %bb.55 ; CHECK-NEXT: [[C111:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[PHI]], [[C111]] - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[MUL]](s64) + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = nsw G_MUL [[PHI]], [[C111]] + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nusw inbounds G_PTR_ADD [[GV]], [[MUL]](s64) ; CHECK-NEXT: [[C112:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[PTR_ADD]], [[C112]](s64) ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD1]](p0) :: (load (p0) from %ir.tmp59) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll index 4a85d84..2779e89 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll @@ -12,7 +12,7 @@ define i32 @val_compare_and_swap(ptr %p, i32 %cmp, i32 %new) { ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK-NEXT: liveins: $w1, $w2, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p) + ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p) ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w1, 0, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -46,13 +46,13 @@ define i32 @val_compare_and_swap_from_load(ptr %p, i32 %cmp, ptr %pnew) { ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $w1, $x0, $x2 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w9 = LDRWui killed renamable $x2, 0, implicit-def renamable $x9, pcsections !0 :: (load (s32) from %ir.pnew) + ; CHECK-NEXT: renamable $w9 = LDRWui killed renamable $x2, 0, implicit-def $x9, pcsections !0 :: (load (s32) from %ir.pnew) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1.cmpxchg.start: ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0, $x9 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p) + ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p) ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w1, 0, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -91,7 +91,7 @@ define i32 @val_compare_and_swap_rel(ptr %p, i32 %cmp, i32 %new) { ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK-NEXT: liveins: $w1, $w2, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p) + ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p) ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w1, 0, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -243,7 +243,7 @@ define i32 @fetch_and_nand(ptr %p) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p) + ; CHECK-NEXT: renamable $w8 = LDXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p) ; CHECK-NEXT: renamable $w9 = ANDWri renamable $w8, 2, pcsections !0 ; CHECK-NEXT: $w9 = ORNWrs $wzr, killed renamable $w9, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STLXRW killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s32) into %ir.p) @@ -295,7 +295,7 @@ define i32 @fetch_and_or(ptr %p) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w9, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p) + ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p) ; CHECK-NEXT: $w10 = ORRWrs renamable $w8, renamable $w9, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w11 = STLXRW killed renamable $w10, renamable $x0, pcsections !0 :: (volatile store (s32) into %ir.p) ; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0 @@ -726,7 +726,7 @@ define i8 @atomicrmw_add_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: $w9 = ADDWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STLXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -750,7 +750,7 @@ define i8 @atomicrmw_xchg_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: early-clobber renamable $w9 = STXRB renamable $w1, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -773,7 +773,7 @@ define i8 @atomicrmw_sub_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: $w9 = SUBWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -797,7 +797,7 @@ define i8 @atomicrmw_and_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: $w9 = ANDWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STLXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -821,7 +821,7 @@ define i8 @atomicrmw_or_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: $w9 = ORRWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STLXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -845,7 +845,7 @@ define i8 @atomicrmw_xor_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: $w9 = EORWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -869,7 +869,7 @@ define i8 @atomicrmw_min_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 7, pcsections !0 ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 32, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 11, implicit killed $nzcv, pcsections !0 @@ -895,7 +895,7 @@ define i8 @atomicrmw_max_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 7, pcsections !0 ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 32, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 12, implicit killed $nzcv, pcsections !0 @@ -923,10 +923,10 @@ define i8 @atomicrmw_umin_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w9, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 7, implicit killed $x8 ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0 - ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0 + ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def $x10, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w11 = STLXRB renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -951,10 +951,10 @@ define i8 @atomicrmw_umax_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w9, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 7, implicit killed $x8 ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0 - ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0 + ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def $x10, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w11 = STXRB renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -977,7 +977,7 @@ define i16 @atomicrmw_add_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: $w9 = ADDWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STLXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -1001,7 +1001,7 @@ define i16 @atomicrmw_xchg_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: early-clobber renamable $w9 = STXRH renamable $w1, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -1024,7 +1024,7 @@ define i16 @atomicrmw_sub_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: $w9 = SUBWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -1048,7 +1048,7 @@ define i16 @atomicrmw_and_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: $w9 = ANDWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STLXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -1072,7 +1072,7 @@ define i16 @atomicrmw_or_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: $w9 = ORRWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STLXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -1096,7 +1096,7 @@ define i16 @atomicrmw_xor_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: $w9 = EORWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -1120,7 +1120,7 @@ define i16 @atomicrmw_min_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 15, pcsections !0 ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 40, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 11, implicit killed $nzcv, pcsections !0 @@ -1146,7 +1146,7 @@ define i16 @atomicrmw_max_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 15, pcsections !0 ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 40, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 12, implicit killed $nzcv, pcsections !0 @@ -1174,10 +1174,10 @@ define i16 @atomicrmw_umin_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w9, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 15, implicit killed $x8 ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0 - ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0 + ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def $x10, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w11 = STLXRH renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -1202,10 +1202,10 @@ define i16 @atomicrmw_umax_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w9, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 15, implicit killed $x8 ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0 - ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0 + ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def $x10, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w11 = STXRH renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -1230,7 +1230,7 @@ define { i8, i1 } @cmpxchg_i8(ptr %ptr, i8 %desired, i8 %new) { ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.4(0x04000000) ; CHECK-NEXT: liveins: $w1, $w2, $x8 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w0 = LDXRB renamable $x8, implicit-def renamable $x0, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w0 = LDXRB renamable $x8, implicit-def $x0, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: renamable $w9 = ANDWri renamable $w0, 7, pcsections !0 ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 0, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: Bcc 1, %bb.4, implicit killed $nzcv, pcsections !0 @@ -1272,7 +1272,7 @@ define { i16, i1 } @cmpxchg_i16(ptr %ptr, i16 %desired, i16 %new) { ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.4(0x04000000) ; CHECK-NEXT: liveins: $w1, $w2, $x8 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w0 = LDXRH renamable $x8, implicit-def renamable $x0, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w0 = LDXRH renamable $x8, implicit-def $x0, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: renamable $w9 = ANDWri renamable $w0, 15, pcsections !0 ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 8, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: Bcc 1, %bb.4, implicit killed $nzcv, pcsections !0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-gep-flags.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-gep-flags.ll index 8a6f266..b7cf9b3 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-gep-flags.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-gep-flags.ll @@ -10,12 +10,12 @@ define i32 @gep_nusw_nuw(ptr %ptr, i32 %idx) { ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64) + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = nsw G_MUL [[SEXT]], [[C]] + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nusw inbounds G_PTR_ADD [[COPY]], [[MUL]](s64) ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0) ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load (s32) from %ir.gep1) - ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL1]](s64) + ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(s64) = nuw nsw G_MUL [[SEXT]], [[C]] + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[MUL1]](s64) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw nusw G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.gep2) @@ -40,12 +40,12 @@ define i32 @gep_nuw(ptr %ptr, i32 %idx) { ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64) + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = nsw G_MUL [[SEXT]], [[C]] + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nusw inbounds G_PTR_ADD [[COPY]], [[MUL]](s64) ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0) ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load (s32) from %ir.gep1) - ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL1]](s64) + ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(s64) = nuw G_MUL [[SEXT]], [[C]] + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[MUL1]](s64) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.gep2) @@ -70,14 +70,14 @@ define i32 @gep_nusw(ptr %ptr, i32 %idx) { ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64) + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = nsw G_MUL [[SEXT]], [[C]] + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nusw inbounds G_PTR_ADD [[COPY]], [[MUL]](s64) ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0) ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load (s32) from %ir.gep1) - ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL1]](s64) + ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(s64) = nsw G_MUL [[SEXT]], [[C]] + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nusw G_PTR_ADD [[COPY]], [[MUL1]](s64) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nusw G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw nusw G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.gep2) ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[LOAD1]] ; CHECK-NEXT: $w0 = COPY [[ADD]](s32) @@ -100,8 +100,8 @@ define i32 @gep_none(ptr %ptr, i32 %idx) { ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64) + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = nsw G_MUL [[SEXT]], [[C]] + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nusw inbounds G_PTR_ADD [[COPY]], [[MUL]](s64) ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0) ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load (s32) from %ir.gep1) ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] @@ -120,3 +120,166 @@ define i32 @gep_none(ptr %ptr, i32 %idx) { %res = add i32 %v1, %v2 ret i32 %res } + +define i32 @gep_nusw_negative(ptr %ptr, i32 %idx) { + ; CHECK-LABEL: name: gep_nusw_negative + ; CHECK: bb.1 (%ir-block.0): + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = nsw G_MUL [[SEXT]], [[C]] + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nusw inbounds G_PTR_ADD [[COPY]], [[MUL]](s64) + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0) + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load (s32) from %ir.gep1) + ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(s64) = nsw G_MUL [[SEXT]], [[C]] + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nusw G_PTR_ADD [[COPY]], [[MUL1]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4 + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nusw G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) + ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.gep2) + ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[LOAD1]] + ; CHECK-NEXT: $w0 = COPY [[ADD]](s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 + %sidx = sext i32 %idx to i64 + %gep1 = getelementptr inbounds [4 x i32], ptr %ptr, i64 %sidx, i64 0 + %v1 = load i32, ptr %gep1 + %gep2 = getelementptr nusw [4 x i32], ptr %ptr, i64 %sidx, i64 -1 + %v2 = load i32, ptr %gep2 + %res = add i32 %v1, %v2 + ret i32 %res + } + +define ptr @gep_many_indices(ptr %ptr, i32 %idx) { + ; CHECK-LABEL: name: gep_many_indices + ; CHECK: bb.1 (%ir-block.0): + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 108 + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C1]] + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[MUL]](s64) + ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4 + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C2]](s64) + ; CHECK-NEXT: $x0 = COPY [[PTR_ADD2]](p0) + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %sidx = sext i32 %idx to i64 + %gep = getelementptr {i32, [4 x [3 x i32]]}, ptr %ptr, i64 2, i32 1, i64 %sidx, i64 -1 + ret ptr %gep + } + +define ptr @gep_nuw_many_indices(ptr %ptr, i32 %idx) { + ; CHECK-LABEL: name: gep_nuw_many_indices + ; CHECK: bb.1 (%ir-block.0): + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 108 + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = nuw G_MUL [[SEXT]], [[C1]] + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[PTR_ADD]], [[MUL]](s64) + ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4 + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[PTR_ADD1]], [[C2]](s64) + ; CHECK-NEXT: $x0 = COPY [[PTR_ADD2]](p0) + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %sidx = sext i32 %idx to i64 + %gep = getelementptr nuw {i32, [4 x [3 x i32]]}, ptr %ptr, i64 2, i32 1, i64 %sidx, i64 -1 + ret ptr %gep + } + +define ptr @gep_nusw_many_indices(ptr %ptr, i32 %idx) { + ; CHECK-LABEL: name: gep_nusw_many_indices + ; CHECK: bb.1 (%ir-block.0): + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 108 + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = nsw G_MUL [[SEXT]], [[C1]] + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nusw G_PTR_ADD [[PTR_ADD]], [[MUL]](s64) + ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4 + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nusw G_PTR_ADD [[PTR_ADD1]], [[C2]](s64) + ; CHECK-NEXT: $x0 = COPY [[PTR_ADD2]](p0) + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %sidx = sext i32 %idx to i64 + %gep = getelementptr nusw {i32, [4 x [3 x i32]]}, ptr %ptr, i64 2, i32 1, i64 %sidx, i64 -1 + ret ptr %gep + } + +define ptr @gep_inbounds_many_indices(ptr %ptr, i32 %idx) { + ; CHECK-LABEL: name: gep_inbounds_many_indices + ; CHECK: bb.1 (%ir-block.0): + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 108 + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = nsw G_MUL [[SEXT]], [[C1]] + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nusw inbounds G_PTR_ADD [[PTR_ADD]], [[MUL]](s64) + ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4 + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nusw inbounds G_PTR_ADD [[PTR_ADD1]], [[C2]](s64) + ; CHECK-NEXT: $x0 = COPY [[PTR_ADD2]](p0) + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %sidx = sext i32 %idx to i64 + %gep = getelementptr inbounds {i32, [4 x [3 x i32]]}, ptr %ptr, i64 2, i32 1, i64 %sidx, i64 -1 + ret ptr %gep + } + +define ptr @gep_nuw_nusw_many_indices(ptr %ptr, i32 %idx) { + ; CHECK-LABEL: name: gep_nuw_nusw_many_indices + ; CHECK: bb.1 (%ir-block.0): + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 108 + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = nuw nsw G_MUL [[SEXT]], [[C1]] + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw nusw G_PTR_ADD [[PTR_ADD]], [[MUL]](s64) + ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4 + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw nusw G_PTR_ADD [[PTR_ADD1]], [[C2]](s64) + ; CHECK-NEXT: $x0 = COPY [[PTR_ADD2]](p0) + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %sidx = sext i32 %idx to i64 + %gep = getelementptr nuw nusw {i32, [4 x [3 x i32]]}, ptr %ptr, i64 2, i32 1, i64 %sidx, i64 -1 + ret ptr %gep + } + +define ptr @gep_nuw_inbounds_many_indices(ptr %ptr, i32 %idx) { + ; CHECK-LABEL: name: gep_nuw_inbounds_many_indices + ; CHECK: bb.1 (%ir-block.0): + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 108 + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = nuw nsw G_MUL [[SEXT]], [[C1]] + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[PTR_ADD]], [[MUL]](s64) + ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4 + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[PTR_ADD1]], [[C2]](s64) + ; CHECK-NEXT: $x0 = COPY [[PTR_ADD2]](p0) + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %sidx = sext i32 %idx to i64 + %gep = getelementptr nuw inbounds {i32, [4 x [3 x i32]]}, ptr %ptr, i64 2, i32 1, i64 %sidx, i64 -1 + ret ptr %gep + } + diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll b/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll index 29763f2..5b2d660 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/translate-gep.ll @@ -136,8 +136,8 @@ define <2 x ptr> @vec_gep_scalar_base(<2 x i64> %offs) { ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p0>) = G_BUILD_VECTOR [[GV]](p0), [[GV]](p0) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) - ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[COPY]], [[BUILD_VECTOR1]] - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(<2 x p0>) = G_PTR_ADD [[BUILD_VECTOR]], [[MUL]](<2 x s64>) + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<2 x s64>) = nsw G_MUL [[COPY]], [[BUILD_VECTOR1]] + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(<2 x p0>) = nusw inbounds G_PTR_ADD [[BUILD_VECTOR]], [[MUL]](<2 x s64>) ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x p0>) = COPY [[PTR_ADD]](<2 x p0>) ; CHECK-NEXT: $q0 = COPY [[COPY1]](<2 x p0>) ; CHECK-NEXT: RET_ReallyLR implicit $q0 diff --git a/llvm/test/CodeGen/AArch64/abds-neg.ll b/llvm/test/CodeGen/AArch64/abds-neg.ll index 7524782..02c76ba 100644 --- a/llvm/test/CodeGen/AArch64/abds-neg.ll +++ b/llvm/test/CodeGen/AArch64/abds-neg.ll @@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -26,8 +25,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -43,8 +41,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -60,8 +57,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i16 %a to i64 @@ -93,8 +89,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i16 %a to i64 diff --git a/llvm/test/CodeGen/AArch64/abds.ll b/llvm/test/CodeGen/AArch64/abds.ll index bbdb116..bf52e71 100644 --- a/llvm/test/CodeGen/AArch64/abds.ll +++ b/llvm/test/CodeGen/AArch64/abds.ll @@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -25,8 +24,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -41,8 +39,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -57,8 +54,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i16 %a to i64 @@ -88,8 +84,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i16 %a to i64 @@ -215,8 +210,7 @@ define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_minmax_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i8 @llvm.smin.i8(i8 %a, i8 %b) @@ -229,8 +223,7 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_minmax_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i16 @llvm.smin.i16(i16 %a, i16 %b) @@ -287,8 +280,7 @@ define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_cmp_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp sgt i8 %a, %b @@ -302,8 +294,7 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_cmp_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp sge i16 %a, %b @@ -508,9 +499,8 @@ define i64 @vector_legalized(i16 %a, i16 %b) { ; CHECK: // %bb.0: ; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: addp d0, v0.2d -; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: cneg w8, w8, mi ; CHECK-NEXT: fmov x9, d0 ; CHECK-NEXT: add x0, x9, x8 @@ -533,8 +523,7 @@ define i8 @abd_select_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_select_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp slt i8 %a, %b @@ -548,8 +537,7 @@ define i16 @abd_select_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_select_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp sle i16 %a, %b diff --git a/llvm/test/CodeGen/AArch64/abdu-neg.ll b/llvm/test/CodeGen/AArch64/abdu-neg.ll index d07f099a..400031b 100644 --- a/llvm/test/CodeGen/AArch64/abdu-neg.ll +++ b/llvm/test/CodeGen/AArch64/abdu-neg.ll @@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -26,8 +25,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -43,8 +41,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -60,8 +57,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i16 %a to i64 @@ -93,8 +89,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i16 %a to i64 diff --git a/llvm/test/CodeGen/AArch64/abdu.ll b/llvm/test/CodeGen/AArch64/abdu.ll index 1045ee2..8d2b0b0 100644 --- a/llvm/test/CodeGen/AArch64/abdu.ll +++ b/llvm/test/CodeGen/AArch64/abdu.ll @@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -25,8 +24,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -41,8 +39,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -57,8 +54,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i16 %a to i64 @@ -88,8 +84,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i16 %a to i64 @@ -219,8 +214,7 @@ define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_minmax_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i8 @llvm.umin.i8(i8 %a, i8 %b) @@ -233,8 +227,7 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_minmax_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i16 @llvm.umin.i16(i16 %a, i16 %b) @@ -293,8 +286,7 @@ define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_cmp_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp ugt i8 %a, %b @@ -308,8 +300,7 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_cmp_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp uge i16 %a, %b @@ -373,10 +364,9 @@ define i64 @vector_legalized(i16 %a, i16 %b) { ; CHECK: // %bb.0: ; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: addp d0, v0.2d +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w8, w8, mi +; CHECK-NEXT: addp d0, v0.2d ; CHECK-NEXT: fmov x9, d0 ; CHECK-NEXT: add x0, x9, x8 ; CHECK-NEXT: ret @@ -398,8 +388,7 @@ define i8 @abd_select_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_select_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp ult i8 %a, %b @@ -413,8 +402,7 @@ define i16 @abd_select_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_select_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp ule i16 %a, %b diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll index 3a808f5..dd018a6 100644 --- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll +++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll @@ -11,7 +11,7 @@ define void @array_1D(ptr %addr) #0 { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr z0, [x0] ; CHECK-NEXT: ldr z1, [x0, #2, mul vl] @@ -34,7 +34,7 @@ define %my_subtype @array_1D_extract(ptr %addr) #0 { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr z0, [x0, #1, mul vl] ; CHECK-NEXT: addvl sp, sp, #3 @@ -52,7 +52,7 @@ define void @array_1D_insert(ptr %addr, %my_subtype %elt) #0 { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr z1, [x0, #2, mul vl] ; CHECK-NEXT: ldr z2, [x0] @@ -75,7 +75,7 @@ define void @array_2D(ptr %addr) #0 { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-6 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 48 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x30, 0x1e, 0x22 // sp + 16 + 48 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr z0, [x0] ; CHECK-NEXT: ldr z1, [x0, #5, mul vl] diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll index e7d8f4f..be73dc9 100644 --- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll +++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll @@ -10,7 +10,7 @@ define void @test(ptr %addr) #0 { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr z0, [x0] ; CHECK-NEXT: ldr z1, [x0, #2, mul vl] diff --git a/llvm/test/CodeGen/AArch64/arm64-ext.ll b/llvm/test/CodeGen/AArch64/arm64-ext.ll index 50df6a0..8bf2b82 100644 --- a/llvm/test/CodeGen/AArch64/arm64-ext.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ext.ll @@ -135,3 +135,68 @@ define <2 x ptr> @test_v2p0(<2 x ptr> %a, <2 x ptr> %b) { %s = shufflevector <2 x ptr> %a, <2 x ptr> %b, <2 x i32> <i32 3, i32 0> ret <2 x ptr> %s } + +define <16 x i8> @reverse_vector_s8x16b(<16 x i8> noundef %x) { +; CHECK-SD-LABEL: reverse_vector_s8x16b: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: rev64 v1.16b, v0.16b +; CHECK-SD-NEXT: ext v0.16b, v1.16b, v1.16b, #8 +; CHECK-SD-NEXT: mov v0.d[1], v1.d[0] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: reverse_vector_s8x16b: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: rev64 v1.16b, v0.16b +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: mov v0.d[1], v1.d[0] +; CHECK-GI-NEXT: ret +entry: + %shuffle.i = shufflevector <16 x i8> %x, <16 x i8> poison, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8> + %shuffle.i6 = shufflevector <16 x i8> %shuffle.i, <16 x i8> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + %shuffle.i7 = shufflevector <16 x i8> %shuffle.i, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + %shuffle.i5 = shufflevector <8 x i8> %shuffle.i6, <8 x i8> %shuffle.i7, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + ret <16 x i8> %shuffle.i5 +} + +define <8 x i16> @reverse_vector_s16x8b(<8 x i16> noundef %x) { +; CHECK-SD-LABEL: reverse_vector_s16x8b: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: rev64 v1.8h, v0.8h +; CHECK-SD-NEXT: ext v0.16b, v1.16b, v1.16b, #8 +; CHECK-SD-NEXT: mov v0.d[1], v1.d[0] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: reverse_vector_s16x8b: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: rev64 v1.8h, v0.8h +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: mov v0.d[1], v1.d[0] +; CHECK-GI-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %x, <8 x i16> poison, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4> + %shuffle.i6 = shufflevector <8 x i16> %shuffle.i, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7> + %shuffle.i7 = shufflevector <8 x i16> %shuffle.i, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %shuffle.i5 = shufflevector <4 x i16> %shuffle.i6, <4 x i16> %shuffle.i7, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + ret <8 x i16> %shuffle.i5 +} + +define <4 x i32> @reverse_vector_s32x4b(<4 x i32> noundef %x) { +; CHECK-SD-LABEL: reverse_vector_s32x4b: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: rev64 v0.4s, v0.4s +; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: reverse_vector_s32x4b: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: rev64 v1.4s, v0.4s +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: mov v0.d[1], v1.d[0] +; CHECK-GI-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2> + %shuffle.i6 = shufflevector <4 x i32> %shuffle.i, <4 x i32> poison, <2 x i32> <i32 2, i32 3> + %shuffle.i7 = shufflevector <4 x i32> %shuffle.i, <4 x i32> poison, <2 x i32> <i32 0, i32 1> + %shuffle.i5 = shufflevector <2 x i32> %shuffle.i6, <2 x i32> %shuffle.i7, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + ret <4 x i32> %shuffle.i5 +} diff --git a/llvm/test/CodeGen/AArch64/arm64-vext.ll b/llvm/test/CodeGen/AArch64/arm64-vext.ll index a56bd6b..e522c05 100644 --- a/llvm/test/CodeGen/AArch64/arm64-vext.ll +++ b/llvm/test/CodeGen/AArch64/arm64-vext.ll @@ -1,8 +1,16 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define void @test_vext_s8() nounwind ssp { - ; CHECK-LABEL: test_vext_s8: - ; CHECK: {{ext.8.*#1}} +; CHECK-LABEL: test_vext_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: ldr d0, [sp, #24] +; CHECK-NEXT: ext.8b v1, v0, v0, #1 +; CHECK-NEXT: stp d0, d0, [sp, #8] +; CHECK-NEXT: str d1, [sp, #24] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret %xS8x8 = alloca <8 x i8>, align 8 %__a = alloca <8 x i8>, align 8 %__b = alloca <8 x i8>, align 8 @@ -18,8 +26,15 @@ define void @test_vext_s8() nounwind ssp { } define void @test_vext_u8() nounwind ssp { - ; CHECK-LABEL: test_vext_u8: - ; CHECK: {{ext.8.*#2}} +; CHECK-LABEL: test_vext_u8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: ldr d0, [sp, #24] +; CHECK-NEXT: ext.8b v1, v0, v0, #2 +; CHECK-NEXT: stp d0, d0, [sp, #8] +; CHECK-NEXT: str d1, [sp, #24] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret %xU8x8 = alloca <8 x i8>, align 8 %__a = alloca <8 x i8>, align 8 %__b = alloca <8 x i8>, align 8 @@ -35,8 +50,15 @@ define void @test_vext_u8() nounwind ssp { } define void @test_vext_p8() nounwind ssp { - ; CHECK-LABEL: test_vext_p8: - ; CHECK: {{ext.8.*#3}} +; CHECK-LABEL: test_vext_p8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: ldr d0, [sp, #24] +; CHECK-NEXT: ext.8b v1, v0, v0, #3 +; CHECK-NEXT: stp d0, d0, [sp, #8] +; CHECK-NEXT: str d1, [sp, #24] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret %xP8x8 = alloca <8 x i8>, align 8 %__a = alloca <8 x i8>, align 8 %__b = alloca <8 x i8>, align 8 @@ -52,8 +74,15 @@ define void @test_vext_p8() nounwind ssp { } define void @test_vext_s16() nounwind ssp { - ; CHECK-LABEL: test_vext_s16: - ; CHECK: {{ext.8.*#2}} +; CHECK-LABEL: test_vext_s16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: ldr d0, [sp, #24] +; CHECK-NEXT: ext.8b v1, v0, v0, #2 +; CHECK-NEXT: stp d0, d0, [sp, #8] +; CHECK-NEXT: str d1, [sp, #24] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret %xS16x4 = alloca <4 x i16>, align 8 %__a = alloca <4 x i16>, align 8 %__b = alloca <4 x i16>, align 8 @@ -73,8 +102,15 @@ define void @test_vext_s16() nounwind ssp { } define void @test_vext_u16() nounwind ssp { - ; CHECK-LABEL: test_vext_u16: - ; CHECK: {{ext.8.*#4}} +; CHECK-LABEL: test_vext_u16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: ldr d0, [sp, #24] +; CHECK-NEXT: ext.8b v1, v0, v0, #4 +; CHECK-NEXT: stp d0, d0, [sp, #8] +; CHECK-NEXT: str d1, [sp, #24] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret %xU16x4 = alloca <4 x i16>, align 8 %__a = alloca <4 x i16>, align 8 %__b = alloca <4 x i16>, align 8 @@ -94,8 +130,15 @@ define void @test_vext_u16() nounwind ssp { } define void @test_vext_p16() nounwind ssp { - ; CHECK-LABEL: test_vext_p16: - ; CHECK: {{ext.8.*#6}} +; CHECK-LABEL: test_vext_p16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: ldr d0, [sp, #24] +; CHECK-NEXT: ext.8b v1, v0, v0, #6 +; CHECK-NEXT: stp d0, d0, [sp, #8] +; CHECK-NEXT: str d1, [sp, #24] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret %xP16x4 = alloca <4 x i16>, align 8 %__a = alloca <4 x i16>, align 8 %__b = alloca <4 x i16>, align 8 @@ -115,8 +158,15 @@ define void @test_vext_p16() nounwind ssp { } define void @test_vext_s32() nounwind ssp { - ; CHECK-LABEL: test_vext_s32: - ; CHECK: {{rev64.2s.*}} +; CHECK-LABEL: test_vext_s32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: ldr d0, [sp, #24] +; CHECK-NEXT: rev64.2s v1, v0 +; CHECK-NEXT: stp d0, d0, [sp, #8] +; CHECK-NEXT: str d1, [sp, #24] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret %xS32x2 = alloca <2 x i32>, align 8 %__a = alloca <2 x i32>, align 8 %__b = alloca <2 x i32>, align 8 @@ -136,8 +186,15 @@ define void @test_vext_s32() nounwind ssp { } define void @test_vext_u32() nounwind ssp { - ; CHECK-LABEL: test_vext_u32: - ; CHECK: {{rev64.2s.*}} +; CHECK-LABEL: test_vext_u32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: ldr d0, [sp, #24] +; CHECK-NEXT: rev64.2s v1, v0 +; CHECK-NEXT: stp d0, d0, [sp, #8] +; CHECK-NEXT: str d1, [sp, #24] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret %xU32x2 = alloca <2 x i32>, align 8 %__a = alloca <2 x i32>, align 8 %__b = alloca <2 x i32>, align 8 @@ -157,8 +214,15 @@ define void @test_vext_u32() nounwind ssp { } define void @test_vext_f32() nounwind ssp { - ; CHECK-LABEL: test_vext_f32: - ; CHECK: {{rev64.2s.*}} +; CHECK-LABEL: test_vext_f32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: ldr d0, [sp, #24] +; CHECK-NEXT: rev64.2s v1, v0 +; CHECK-NEXT: stp d0, d0, [sp, #8] +; CHECK-NEXT: str d1, [sp, #24] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret %xF32x2 = alloca <2 x float>, align 8 %__a = alloca <2 x float>, align 8 %__b = alloca <2 x float>, align 8 @@ -178,7 +242,13 @@ define void @test_vext_f32() nounwind ssp { } define void @test_vext_s64() nounwind ssp { - ; CHECK-LABEL: test_vext_s64: +; CHECK-LABEL: test_vext_s64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: ldr d0, [sp, #24] +; CHECK-NEXT: stp d0, d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret ; CHECK_FIXME: {{rev64.2s.*}} ; this just turns into a load of the second element %xS64x1 = alloca <1 x i64>, align 8 @@ -200,7 +270,13 @@ define void @test_vext_s64() nounwind ssp { } define void @test_vext_u64() nounwind ssp { - ; CHECK-LABEL: test_vext_u64: +; CHECK-LABEL: test_vext_u64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: ldr d0, [sp, #24] +; CHECK-NEXT: stp d0, d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret ; CHECK_FIXME: {{ext.8.*#1}} ; this is turned into a simple load of the 2nd element %xU64x1 = alloca <1 x i64>, align 8 @@ -222,8 +298,15 @@ define void @test_vext_u64() nounwind ssp { } define void @test_vextq_s8() nounwind ssp { - ; CHECK-LABEL: test_vextq_s8: - ; CHECK: {{ext.16.*#4}} +; CHECK-LABEL: test_vextq_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: ldr q0, [sp, #32] +; CHECK-NEXT: ext.16b v1, v0, v0, #4 +; CHECK-NEXT: stp q0, q0, [sp] +; CHECK-NEXT: str q1, [sp, #32] +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: ret %xS8x16 = alloca <16 x i8>, align 16 %__a = alloca <16 x i8>, align 16 %__b = alloca <16 x i8>, align 16 @@ -239,8 +322,15 @@ define void @test_vextq_s8() nounwind ssp { } define void @test_vextq_u8() nounwind ssp { - ; CHECK-LABEL: test_vextq_u8: - ; CHECK: {{ext.16.*#5}} +; CHECK-LABEL: test_vextq_u8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: ldr q0, [sp, #32] +; CHECK-NEXT: ext.16b v1, v0, v0, #5 +; CHECK-NEXT: stp q0, q0, [sp] +; CHECK-NEXT: str q1, [sp, #32] +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: ret %xU8x16 = alloca <16 x i8>, align 16 %__a = alloca <16 x i8>, align 16 %__b = alloca <16 x i8>, align 16 @@ -256,8 +346,15 @@ define void @test_vextq_u8() nounwind ssp { } define void @test_vextq_p8() nounwind ssp { - ; CHECK-LABEL: test_vextq_p8: - ; CHECK: {{ext.16.*#6}} +; CHECK-LABEL: test_vextq_p8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: ldr q0, [sp, #32] +; CHECK-NEXT: ext.16b v1, v0, v0, #6 +; CHECK-NEXT: stp q0, q0, [sp] +; CHECK-NEXT: str q1, [sp, #32] +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: ret %xP8x16 = alloca <16 x i8>, align 16 %__a = alloca <16 x i8>, align 16 %__b = alloca <16 x i8>, align 16 @@ -273,8 +370,15 @@ define void @test_vextq_p8() nounwind ssp { } define void @test_vextq_s16() nounwind ssp { - ; CHECK-LABEL: test_vextq_s16: - ; CHECK: {{ext.16.*#14}} +; CHECK-LABEL: test_vextq_s16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: ldr q0, [sp, #32] +; CHECK-NEXT: ext.16b v1, v0, v0, #14 +; CHECK-NEXT: stp q0, q0, [sp] +; CHECK-NEXT: str q1, [sp, #32] +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: ret %xS16x8 = alloca <8 x i16>, align 16 %__a = alloca <8 x i16>, align 16 %__b = alloca <8 x i16>, align 16 @@ -294,8 +398,15 @@ define void @test_vextq_s16() nounwind ssp { } define void @test_vextq_u16() nounwind ssp { - ; CHECK-LABEL: test_vextq_u16: - ; CHECK: {{ext.16.*#8}} +; CHECK-LABEL: test_vextq_u16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: ldr q0, [sp, #32] +; CHECK-NEXT: ext.16b v1, v0, v0, #8 +; CHECK-NEXT: stp q0, q0, [sp] +; CHECK-NEXT: str q1, [sp, #32] +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: ret %xU16x8 = alloca <8 x i16>, align 16 %__a = alloca <8 x i16>, align 16 %__b = alloca <8 x i16>, align 16 @@ -315,8 +426,15 @@ define void @test_vextq_u16() nounwind ssp { } define void @test_vextq_p16() nounwind ssp { - ; CHECK-LABEL: test_vextq_p16: - ; CHECK: {{ext.16.*#10}} +; CHECK-LABEL: test_vextq_p16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: ldr q0, [sp, #32] +; CHECK-NEXT: ext.16b v1, v0, v0, #10 +; CHECK-NEXT: stp q0, q0, [sp] +; CHECK-NEXT: str q1, [sp, #32] +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: ret %xP16x8 = alloca <8 x i16>, align 16 %__a = alloca <8 x i16>, align 16 %__b = alloca <8 x i16>, align 16 @@ -336,8 +454,15 @@ define void @test_vextq_p16() nounwind ssp { } define void @test_vextq_s32() nounwind ssp { - ; CHECK-LABEL: test_vextq_s32: - ; CHECK: {{ext.16.*#4}} +; CHECK-LABEL: test_vextq_s32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: ldr q0, [sp, #32] +; CHECK-NEXT: ext.16b v1, v0, v0, #4 +; CHECK-NEXT: stp q0, q0, [sp] +; CHECK-NEXT: str q1, [sp, #32] +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: ret %xS32x4 = alloca <4 x i32>, align 16 %__a = alloca <4 x i32>, align 16 %__b = alloca <4 x i32>, align 16 @@ -357,8 +482,15 @@ define void @test_vextq_s32() nounwind ssp { } define void @test_vextq_u32() nounwind ssp { - ; CHECK-LABEL: test_vextq_u32: - ; CHECK: {{ext.16.*#8}} +; CHECK-LABEL: test_vextq_u32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: ldr q0, [sp, #32] +; CHECK-NEXT: ext.16b v1, v0, v0, #8 +; CHECK-NEXT: stp q0, q0, [sp] +; CHECK-NEXT: str q1, [sp, #32] +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: ret %xU32x4 = alloca <4 x i32>, align 16 %__a = alloca <4 x i32>, align 16 %__b = alloca <4 x i32>, align 16 @@ -378,8 +510,15 @@ define void @test_vextq_u32() nounwind ssp { } define void @test_vextq_f32() nounwind ssp { - ; CHECK-LABEL: test_vextq_f32: - ; CHECK: {{ext.16.*#12}} +; CHECK-LABEL: test_vextq_f32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: ldr q0, [sp, #32] +; CHECK-NEXT: ext.16b v1, v0, v0, #12 +; CHECK-NEXT: stp q0, q0, [sp] +; CHECK-NEXT: str q1, [sp, #32] +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: ret %xF32x4 = alloca <4 x float>, align 16 %__a = alloca <4 x float>, align 16 %__b = alloca <4 x float>, align 16 @@ -399,8 +538,15 @@ define void @test_vextq_f32() nounwind ssp { } define void @test_vextq_s64() nounwind ssp { - ; CHECK-LABEL: test_vextq_s64: - ; CHECK: {{ext.16.*#8}} +; CHECK-LABEL: test_vextq_s64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: ldr q0, [sp, #32] +; CHECK-NEXT: ext.16b v1, v0, v0, #8 +; CHECK-NEXT: stp q0, q0, [sp] +; CHECK-NEXT: str q1, [sp, #32] +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: ret %xS64x2 = alloca <2 x i64>, align 16 %__a = alloca <2 x i64>, align 16 %__b = alloca <2 x i64>, align 16 @@ -420,8 +566,15 @@ define void @test_vextq_s64() nounwind ssp { } define void @test_vextq_u64() nounwind ssp { - ; CHECK-LABEL: test_vextq_u64: - ; CHECK: {{ext.16.*#8}} +; CHECK-LABEL: test_vextq_u64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: ldr q0, [sp, #32] +; CHECK-NEXT: ext.16b v1, v0, v0, #8 +; CHECK-NEXT: stp q0, q0, [sp] +; CHECK-NEXT: str q1, [sp, #32] +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: ret %xU64x2 = alloca <2 x i64>, align 16 %__a = alloca <2 x i64>, align 16 %__b = alloca <2 x i64>, align 16 @@ -445,18 +598,21 @@ define void @test_vextq_u64() nounwind ssp { ; rdar://12051674 define <16 x i8> @vext1(<16 x i8> %_a) nounwind { ; CHECK-LABEL: vext1: -; CHECK: ext.16b v0, v0, v0, #8 +; CHECK: // %bb.0: +; CHECK-NEXT: ext.16b v0, v0, v0, #8 +; CHECK-NEXT: ret %vext = shufflevector <16 x i8> %_a, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> ret <16 x i8> %vext } ; <rdar://problem/12212062> define <2 x i64> @vext2(<2 x i64> %p0, <2 x i64> %p1) nounwind readnone ssp { -entry: ; CHECK-LABEL: vext2: -; CHECK: add.2d v0, v0, v1 -; CHECK-NEXT: ext.16b v0, v0, v0, #8 -; CHECK-NEXT: ret +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: add.2d v0, v0, v1 +; CHECK-NEXT: ext.16b v0, v0, v0, #8 +; CHECK-NEXT: ret +entry: %t0 = shufflevector <2 x i64> %p1, <2 x i64> undef, <2 x i32> <i32 1, i32 0> %t1 = shufflevector <2 x i64> %p0, <2 x i64> undef, <2 x i32> <i32 1, i32 0> %t2 = add <2 x i64> %t1, %t0 diff --git a/llvm/test/CodeGen/AArch64/arm64-vext_reverse.ll b/llvm/test/CodeGen/AArch64/arm64-vext_reverse.ll index c51ea17..9829ca3 100644 --- a/llvm/test/CodeGen/AArch64/arm64-vext_reverse.ll +++ b/llvm/test/CodeGen/AArch64/arm64-vext_reverse.ll @@ -1,172 +1,217 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=arm64-linux-gnuabi < %s | FileCheck %s -; The following tests is to check the correctness of reversing input operand +; The following tests is to check the correctness of reversing input operand ; of vext by enumerating all cases of using two undefs in shuffle masks. define <4 x i16> @vext_6701_0(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_6701_0: -; CHECK: ext v0.8b, v1.8b, v0.8b, #4 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #4 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 6, i32 7, i32 0, i32 1> ret <4 x i16> %x } define <4 x i16> @vext_6701_12(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_6701_12: -; CHECK: dup v0.2s, v0.s[0] +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-NEXT: dup v0.2s, v0.s[0] +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1> ret <4 x i16> %x } define <4 x i16> @vext_6701_13(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_6701_13: -; CHECK: ext v0.8b, v1.8b, v0.8b, #4 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #4 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 7, i32 undef, i32 1> ret <4 x i16> %x } define <4 x i16> @vext_6701_14(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_6701_14: -; CHECK: ext v0.8b, v1.8b, v0.8b, #4 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #4 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 7, i32 0, i32 undef> ret <4 x i16> %x } define <4 x i16> @vext_6701_23(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_6701_23: -; CHECK: ext v0.8b, v1.8b, v0.8b, #4 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #4 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 6, i32 undef, i32 undef, i32 1> ret <4 x i16> %x } define <4 x i16> @vext_6701_24(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_6701_24: -; CHECK: ext v0.8b, v1.8b, v0.8b, #4 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #4 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 6, i32 undef, i32 0, i32 undef> ret <4 x i16> %x } define <4 x i16> @vext_6701_34(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_6701_34: -; CHECK: dup v0.2s, v1.s[1] +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-NEXT: dup v0.2s, v1.s[1] +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 6, i32 7, i32 undef, i32 undef> ret <4 x i16> %x } define <4 x i16> @vext_5670_0(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_5670_0: -; CHECK: ext v0.8b, v1.8b, v0.8b, #2 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #2 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 5, i32 6, i32 7, i32 0> ret <4 x i16> %x } define <4 x i16> @vext_5670_12(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_5670_12: -; CHECK: ext v0.8b, v1.8b, v0.8b, #2 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #2 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 undef, i32 7, i32 0> ret <4 x i16> %x } define <4 x i16> @vext_5670_13(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_5670_13: -; CHECK: ext v0.8b, v1.8b, v0.8b, #2 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #2 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 6, i32 undef, i32 0> ret <4 x i16> %x } define <4 x i16> @vext_5670_14(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_5670_14: -; CHECK: ext v0.8b, v1.8b, v0.8b, #2 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #2 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 6, i32 7, i32 undef> ret <4 x i16> %x } define <4 x i16> @vext_5670_23(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_5670_23: -; CHECK: ext v0.8b, v1.8b, v0.8b, #2 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #2 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 5, i32 undef, i32 undef, i32 0> ret <4 x i16> %x } define <4 x i16> @vext_5670_24(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_5670_24: -; CHECK: rev32 v0.4h, v1.4h +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: rev32 v0.4h, v1.4h +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 5, i32 undef, i32 7, i32 undef> ret <4 x i16> %x } define <4 x i16> @vext_5670_34(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_5670_34: -; CHECK: ext v0.8b, v1.8b, v0.8b, #2 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #2 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 5, i32 6, i32 undef, i32 undef> ret <4 x i16> %x } define <4 x i16> @vext_7012_0(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_7012_0: -; CHECK: ext v0.8b, v1.8b, v0.8b, #6 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #6 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 7, i32 0, i32 1, i32 2> ret <4 x i16> %x } define <4 x i16> @vext_7012_12(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_7012_12: -; CHECK: ext v0.8b, v0.8b, v0.8b, #6 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v0.8b, v0.8b, #6 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 undef, i32 1, i32 2> ret <4 x i16> %x } define <4 x i16> @vext_7012_13(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_7012_13: -; CHECK: rev32 v0.4h, v0.4h +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: rev32 v0.4h, v0.4h +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 0, i32 undef, i32 2> ret <4 x i16> %x } define <4 x i16> @vext_7012_14(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_7012_14: -; CHECK: ext v0.8b, v0.8b, v0.8b, #6 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v0.8b, v0.8b, #6 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 undef, i32 0, i32 1, i32 undef> ret <4 x i16> %x } define <4 x i16> @vext_7012_23(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_7012_23: -; CHECK: ext v0.8b, v1.8b, v0.8b, #6 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #6 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 7, i32 undef, i32 undef, i32 2> ret <4 x i16> %x } define <4 x i16> @vext_7012_24(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_7012_24: -; CHECK: ext v0.8b, v1.8b, v0.8b, #6 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #6 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 7, i32 undef, i32 1, i32 undef> ret <4 x i16> %x } define <4 x i16> @vext_7012_34(<4 x i16> %a1, <4 x i16> %a2) { -entry: ; CHECK-LABEL: vext_7012_34: -; CHECK: ext v0.8b, v1.8b, v0.8b, #6 +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ext v0.8b, v1.8b, v0.8b, #6 +; CHECK-NEXT: ret +entry: %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> <i32 7, i32 0, i32 undef, i32 undef> ret <4 x i16> %x } diff --git a/llvm/test/CodeGen/AArch64/arm64ec-empty-name.ll b/llvm/test/CodeGen/AArch64/arm64ec-empty-name.ll new file mode 100644 index 0000000..c7c9ee5 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/arm64ec-empty-name.ll @@ -0,0 +1,15 @@ +; RUN: llc -mtriple=arm64ec-pc-windows-msvc %s -o - | FileCheck %s + +; Regression test: Arm64EC needs to look at the first character of a function +; to decide if it will be mangled like a C or C++ function name, which caused +; it to crash for empty function names. +define void @""() { + ret void +} + +define void @""() { + ret void +} + +; CHECK: "#__unnamed": +; CHECK: "#__unnamed.1": diff --git a/llvm/test/CodeGen/AArch64/combine-storetomstore.ll b/llvm/test/CodeGen/AArch64/combine-storetomstore.ll new file mode 100644 index 0000000..c2e54d3 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/combine-storetomstore.ll @@ -0,0 +1,1193 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=aarch64-- -mattr=+sve | FileCheck %s -check-prefix=SVE + +define void @test_masked_store_success_v4i8(<4 x i8> %x, ptr %ptr, <4 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v4i8: +; SVE: // %bb.0: +; SVE-NEXT: shl v1.4h, v1.4h, #15 +; SVE-NEXT: ldr s2, [x0] +; SVE-NEXT: zip1 v2.8b, v2.8b, v2.8b +; SVE-NEXT: cmlt v1.4h, v1.4h, #0 +; SVE-NEXT: bif v0.8b, v2.8b, v1.8b +; SVE-NEXT: uzp1 v0.8b, v0.8b, v0.8b +; SVE-NEXT: str s0, [x0] +; SVE-NEXT: ret + %load = load <4 x i8>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x i8> %x, <4 x i8> %load + store <4 x i8> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4i16(<4 x i16> %x, ptr %ptr, <4 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v4i16: +; SVE: // %bb.0: +; SVE-NEXT: shl v1.4h, v1.4h, #15 +; SVE-NEXT: ptrue p0.h, vl4 +; SVE-NEXT: // kill: def $d0 killed $d0 def $z0 +; SVE-NEXT: cmlt v1.4h, v1.4h, #0 +; SVE-NEXT: cmpne p0.h, p0/z, z1.h, #0 +; SVE-NEXT: st1h { z0.h }, p0, [x0] +; SVE-NEXT: ret + %load = load <4 x i16>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x i16> %x, <4 x i16> %load + store <4 x i16> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4i32(<4 x i32> %x, ptr %ptr, <4 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v4i32: +; SVE: // %bb.0: +; SVE-NEXT: ushll v1.4s, v1.4h, #0 +; SVE-NEXT: ptrue p0.s, vl4 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: shl v1.4s, v1.4s, #31 +; SVE-NEXT: cmlt v1.4s, v1.4s, #0 +; SVE-NEXT: cmpne p0.s, p0/z, z1.s, #0 +; SVE-NEXT: st1w { z0.s }, p0, [x0] +; SVE-NEXT: ret + %load = load <4 x i32>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %load + store <4 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4i64(<4 x i64> %x, ptr %ptr, <4 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v4i64: +; SVE: // %bb.0: +; SVE-NEXT: ushll v2.4s, v2.4h, #0 +; SVE-NEXT: ptrue p0.d, vl2 +; SVE-NEXT: mov x8, #2 // =0x2 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: ushll2 v3.2d, v2.4s, #0 +; SVE-NEXT: ushll v2.2d, v2.2s, #0 +; SVE-NEXT: shl v3.2d, v3.2d, #63 +; SVE-NEXT: shl v2.2d, v2.2d, #63 +; SVE-NEXT: cmlt v3.2d, v3.2d, #0 +; SVE-NEXT: cmlt v2.2d, v2.2d, #0 +; SVE-NEXT: cmpne p1.d, p0/z, z3.d, #0 +; SVE-NEXT: cmpne p0.d, p0/z, z2.d, #0 +; SVE-NEXT: st1d { z1.d }, p1, [x0, x8, lsl #3] +; SVE-NEXT: st1d { z0.d }, p0, [x0] +; SVE-NEXT: ret + %load = load <4 x i64>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %load + store <4 x i64> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4f16(<4 x half> %x, ptr %ptr, <4 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v4f16: +; SVE: // %bb.0: +; SVE-NEXT: shl v1.4h, v1.4h, #15 +; SVE-NEXT: ptrue p0.h, vl4 +; SVE-NEXT: // kill: def $d0 killed $d0 def $z0 +; SVE-NEXT: cmlt v1.4h, v1.4h, #0 +; SVE-NEXT: cmpne p0.h, p0/z, z1.h, #0 +; SVE-NEXT: st1h { z0.h }, p0, [x0] +; SVE-NEXT: ret + %load = load <4 x half>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x half> %x, <4 x half> %load + store <4 x half> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4f32(<4 x float> %x, ptr %ptr, <4 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v4f32: +; SVE: // %bb.0: +; SVE-NEXT: ushll v1.4s, v1.4h, #0 +; SVE-NEXT: ptrue p0.s, vl4 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: shl v1.4s, v1.4s, #31 +; SVE-NEXT: cmlt v1.4s, v1.4s, #0 +; SVE-NEXT: cmpne p0.s, p0/z, z1.s, #0 +; SVE-NEXT: st1w { z0.s }, p0, [x0] +; SVE-NEXT: ret + %load = load <4 x float>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x float> %x, <4 x float> %load + store <4 x float> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4f64(<4 x double> %x, ptr %ptr, <4 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v4f64: +; SVE: // %bb.0: +; SVE-NEXT: ushll v2.4s, v2.4h, #0 +; SVE-NEXT: ptrue p0.d, vl2 +; SVE-NEXT: mov x8, #2 // =0x2 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: ushll2 v3.2d, v2.4s, #0 +; SVE-NEXT: ushll v2.2d, v2.2s, #0 +; SVE-NEXT: shl v3.2d, v3.2d, #63 +; SVE-NEXT: shl v2.2d, v2.2d, #63 +; SVE-NEXT: cmlt v3.2d, v3.2d, #0 +; SVE-NEXT: cmlt v2.2d, v2.2d, #0 +; SVE-NEXT: cmpne p1.d, p0/z, z3.d, #0 +; SVE-NEXT: cmpne p0.d, p0/z, z2.d, #0 +; SVE-NEXT: st1d { z1.d }, p1, [x0, x8, lsl #3] +; SVE-NEXT: st1d { z0.d }, p0, [x0] +; SVE-NEXT: ret + %load = load <4 x double>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x double> %x, <4 x double> %load + store <4 x double> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8i8(<8 x i8> %x, ptr %ptr, <8 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v8i8: +; SVE: // %bb.0: +; SVE-NEXT: shl v1.8b, v1.8b, #7 +; SVE-NEXT: ptrue p0.b, vl8 +; SVE-NEXT: // kill: def $d0 killed $d0 def $z0 +; SVE-NEXT: cmlt v1.8b, v1.8b, #0 +; SVE-NEXT: cmpne p0.b, p0/z, z1.b, #0 +; SVE-NEXT: st1b { z0.b }, p0, [x0] +; SVE-NEXT: ret + %load = load <8 x i8>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i8> %x, <8 x i8> %load + store <8 x i8> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8i16(<8 x i16> %x, ptr %ptr, <8 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v8i16: +; SVE: // %bb.0: +; SVE-NEXT: ushll v1.8h, v1.8b, #0 +; SVE-NEXT: ptrue p0.h, vl8 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: shl v1.8h, v1.8h, #15 +; SVE-NEXT: cmlt v1.8h, v1.8h, #0 +; SVE-NEXT: cmpne p0.h, p0/z, z1.h, #0 +; SVE-NEXT: st1h { z0.h }, p0, [x0] +; SVE-NEXT: ret + %load = load <8 x i16>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %load + store <8 x i16> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8i32(<8 x i32> %x, ptr %ptr, <8 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v8i32: +; SVE: // %bb.0: +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: zip2 v3.8b, v2.8b, v0.8b +; SVE-NEXT: zip1 v2.8b, v2.8b, v0.8b +; SVE-NEXT: mov x8, #4 // =0x4 +; SVE-NEXT: ptrue p0.s, vl4 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: ushll v3.4s, v3.4h, #0 +; SVE-NEXT: ushll v2.4s, v2.4h, #0 +; SVE-NEXT: shl v3.4s, v3.4s, #31 +; SVE-NEXT: shl v2.4s, v2.4s, #31 +; SVE-NEXT: cmlt v3.4s, v3.4s, #0 +; SVE-NEXT: cmlt v2.4s, v2.4s, #0 +; SVE-NEXT: cmpne p1.s, p0/z, z3.s, #0 +; SVE-NEXT: cmpne p0.s, p0/z, z2.s, #0 +; SVE-NEXT: st1w { z1.s }, p1, [x0, x8, lsl #2] +; SVE-NEXT: st1w { z0.s }, p0, [x0] +; SVE-NEXT: ret + %load = load <8 x i32>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %load + store <8 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8i64(<8 x i64> %x, ptr %ptr, <8 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v8i64: +; SVE: // %bb.0: +; SVE-NEXT: // kill: def $d4 killed $d4 def $q4 +; SVE-NEXT: mov b5, v4.b[4] +; SVE-NEXT: mov b6, v4.b[6] +; SVE-NEXT: mov x8, #4 // =0x4 +; SVE-NEXT: mov b7, v4.b[2] +; SVE-NEXT: mov b16, v4.b[0] +; SVE-NEXT: // kill: def $q2 killed $q2 def $z2 +; SVE-NEXT: mov x9, #6 // =0x6 +; SVE-NEXT: ptrue p0.d, vl2 +; SVE-NEXT: // kill: def $q3 killed $q3 def $z3 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: mov v5.b[4], v4.b[5] +; SVE-NEXT: mov v6.b[4], v4.b[7] +; SVE-NEXT: mov v7.b[4], v4.b[3] +; SVE-NEXT: mov v16.b[4], v4.b[1] +; SVE-NEXT: ushll v4.2d, v5.2s, #0 +; SVE-NEXT: ushll v5.2d, v6.2s, #0 +; SVE-NEXT: ushll v6.2d, v7.2s, #0 +; SVE-NEXT: ushll v7.2d, v16.2s, #0 +; SVE-NEXT: shl v4.2d, v4.2d, #63 +; SVE-NEXT: shl v5.2d, v5.2d, #63 +; SVE-NEXT: shl v6.2d, v6.2d, #63 +; SVE-NEXT: shl v7.2d, v7.2d, #63 +; SVE-NEXT: cmlt v4.2d, v4.2d, #0 +; SVE-NEXT: cmlt v5.2d, v5.2d, #0 +; SVE-NEXT: cmlt v6.2d, v6.2d, #0 +; SVE-NEXT: cmpne p1.d, p0/z, z4.d, #0 +; SVE-NEXT: cmlt v4.2d, v7.2d, #0 +; SVE-NEXT: cmpne p2.d, p0/z, z5.d, #0 +; SVE-NEXT: cmpne p3.d, p0/z, z6.d, #0 +; SVE-NEXT: cmpne p0.d, p0/z, z4.d, #0 +; SVE-NEXT: st1d { z2.d }, p1, [x0, x8, lsl #3] +; SVE-NEXT: mov x8, #2 // =0x2 +; SVE-NEXT: st1d { z3.d }, p2, [x0, x9, lsl #3] +; SVE-NEXT: st1d { z1.d }, p3, [x0, x8, lsl #3] +; SVE-NEXT: st1d { z0.d }, p0, [x0] +; SVE-NEXT: ret + %load = load <8 x i64>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %load + store <8 x i64> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8f16(<8 x half> %x, ptr %ptr, <8 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v8f16: +; SVE: // %bb.0: +; SVE-NEXT: ushll v1.8h, v1.8b, #0 +; SVE-NEXT: ptrue p0.h, vl8 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: shl v1.8h, v1.8h, #15 +; SVE-NEXT: cmlt v1.8h, v1.8h, #0 +; SVE-NEXT: cmpne p0.h, p0/z, z1.h, #0 +; SVE-NEXT: st1h { z0.h }, p0, [x0] +; SVE-NEXT: ret + %load = load <8 x half>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x half> %x, <8 x half> %load + store <8 x half> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8f32(<8 x float> %x, ptr %ptr, <8 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v8f32: +; SVE: // %bb.0: +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: zip2 v3.8b, v2.8b, v0.8b +; SVE-NEXT: zip1 v2.8b, v2.8b, v0.8b +; SVE-NEXT: mov x8, #4 // =0x4 +; SVE-NEXT: ptrue p0.s, vl4 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: ushll v3.4s, v3.4h, #0 +; SVE-NEXT: ushll v2.4s, v2.4h, #0 +; SVE-NEXT: shl v3.4s, v3.4s, #31 +; SVE-NEXT: shl v2.4s, v2.4s, #31 +; SVE-NEXT: cmlt v3.4s, v3.4s, #0 +; SVE-NEXT: cmlt v2.4s, v2.4s, #0 +; SVE-NEXT: cmpne p1.s, p0/z, z3.s, #0 +; SVE-NEXT: cmpne p0.s, p0/z, z2.s, #0 +; SVE-NEXT: st1w { z1.s }, p1, [x0, x8, lsl #2] +; SVE-NEXT: st1w { z0.s }, p0, [x0] +; SVE-NEXT: ret + %load = load <8 x float>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x float> %x, <8 x float> %load + store <8 x float> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8f64(<8 x double> %x, ptr %ptr, <8 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v8f64: +; SVE: // %bb.0: +; SVE-NEXT: // kill: def $d4 killed $d4 def $q4 +; SVE-NEXT: mov b5, v4.b[4] +; SVE-NEXT: mov b6, v4.b[6] +; SVE-NEXT: mov x8, #4 // =0x4 +; SVE-NEXT: mov b7, v4.b[2] +; SVE-NEXT: mov b16, v4.b[0] +; SVE-NEXT: // kill: def $q2 killed $q2 def $z2 +; SVE-NEXT: mov x9, #6 // =0x6 +; SVE-NEXT: ptrue p0.d, vl2 +; SVE-NEXT: // kill: def $q3 killed $q3 def $z3 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: mov v5.b[4], v4.b[5] +; SVE-NEXT: mov v6.b[4], v4.b[7] +; SVE-NEXT: mov v7.b[4], v4.b[3] +; SVE-NEXT: mov v16.b[4], v4.b[1] +; SVE-NEXT: ushll v4.2d, v5.2s, #0 +; SVE-NEXT: ushll v5.2d, v6.2s, #0 +; SVE-NEXT: ushll v6.2d, v7.2s, #0 +; SVE-NEXT: ushll v7.2d, v16.2s, #0 +; SVE-NEXT: shl v4.2d, v4.2d, #63 +; SVE-NEXT: shl v5.2d, v5.2d, #63 +; SVE-NEXT: shl v6.2d, v6.2d, #63 +; SVE-NEXT: shl v7.2d, v7.2d, #63 +; SVE-NEXT: cmlt v4.2d, v4.2d, #0 +; SVE-NEXT: cmlt v5.2d, v5.2d, #0 +; SVE-NEXT: cmlt v6.2d, v6.2d, #0 +; SVE-NEXT: cmpne p1.d, p0/z, z4.d, #0 +; SVE-NEXT: cmlt v4.2d, v7.2d, #0 +; SVE-NEXT: cmpne p2.d, p0/z, z5.d, #0 +; SVE-NEXT: cmpne p3.d, p0/z, z6.d, #0 +; SVE-NEXT: cmpne p0.d, p0/z, z4.d, #0 +; SVE-NEXT: st1d { z2.d }, p1, [x0, x8, lsl #3] +; SVE-NEXT: mov x8, #2 // =0x2 +; SVE-NEXT: st1d { z3.d }, p2, [x0, x9, lsl #3] +; SVE-NEXT: st1d { z1.d }, p3, [x0, x8, lsl #3] +; SVE-NEXT: st1d { z0.d }, p0, [x0] +; SVE-NEXT: ret + %load = load <8 x double>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x double> %x, <8 x double> %load + store <8 x double> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v16i8(<16 x i8> %x, ptr %ptr, <16 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v16i8: +; SVE: // %bb.0: +; SVE-NEXT: shl v1.16b, v1.16b, #7 +; SVE-NEXT: ptrue p0.b, vl16 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: cmlt v1.16b, v1.16b, #0 +; SVE-NEXT: cmpne p0.b, p0/z, z1.b, #0 +; SVE-NEXT: st1b { z0.b }, p0, [x0] +; SVE-NEXT: ret + %load = load <16 x i8>, ptr %ptr, align 32 + %sel = select <16 x i1> %mask, <16 x i8> %x, <16 x i8> %load + store <16 x i8> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v16i16(<16 x i16> %x, ptr %ptr, <16 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v16i16: +; SVE: // %bb.0: +; SVE-NEXT: ushll2 v3.8h, v2.16b, #0 +; SVE-NEXT: ushll v2.8h, v2.8b, #0 +; SVE-NEXT: mov x8, #8 // =0x8 +; SVE-NEXT: ptrue p0.h, vl8 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: shl v3.8h, v3.8h, #15 +; SVE-NEXT: shl v2.8h, v2.8h, #15 +; SVE-NEXT: cmlt v3.8h, v3.8h, #0 +; SVE-NEXT: cmlt v2.8h, v2.8h, #0 +; SVE-NEXT: cmpne p1.h, p0/z, z3.h, #0 +; SVE-NEXT: cmpne p0.h, p0/z, z2.h, #0 +; SVE-NEXT: st1h { z1.h }, p1, [x0, x8, lsl #1] +; SVE-NEXT: st1h { z0.h }, p0, [x0] +; SVE-NEXT: ret + %load = load <16 x i16>, ptr %ptr, align 32 + %sel = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %load + store <16 x i16> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v16i32(<16 x i32> %x, ptr %ptr, <16 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v16i32: +; SVE: // %bb.0: +; SVE-NEXT: ext v5.16b, v4.16b, v4.16b, #8 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: zip2 v6.8b, v4.8b, v0.8b +; SVE-NEXT: mov x8, #4 // =0x4 +; SVE-NEXT: zip1 v4.8b, v4.8b, v0.8b +; SVE-NEXT: ptrue p0.s, vl4 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: mov x9, #8 // =0x8 +; SVE-NEXT: // kill: def $q3 killed $q3 def $z3 +; SVE-NEXT: // kill: def $q2 killed $q2 def $z2 +; SVE-NEXT: zip1 v7.8b, v5.8b, v0.8b +; SVE-NEXT: zip2 v5.8b, v5.8b, v0.8b +; SVE-NEXT: ushll v6.4s, v6.4h, #0 +; SVE-NEXT: ushll v4.4s, v4.4h, #0 +; SVE-NEXT: shl v6.4s, v6.4s, #31 +; SVE-NEXT: ushll v7.4s, v7.4h, #0 +; SVE-NEXT: ushll v5.4s, v5.4h, #0 +; SVE-NEXT: shl v4.4s, v4.4s, #31 +; SVE-NEXT: cmlt v6.4s, v6.4s, #0 +; SVE-NEXT: shl v7.4s, v7.4s, #31 +; SVE-NEXT: shl v5.4s, v5.4s, #31 +; SVE-NEXT: cmlt v4.4s, v4.4s, #0 +; SVE-NEXT: cmpne p1.s, p0/z, z6.s, #0 +; SVE-NEXT: cmlt v7.4s, v7.4s, #0 +; SVE-NEXT: cmlt v5.4s, v5.4s, #0 +; SVE-NEXT: cmpne p2.s, p0/z, z7.s, #0 +; SVE-NEXT: cmpne p3.s, p0/z, z5.s, #0 +; SVE-NEXT: cmpne p0.s, p0/z, z4.s, #0 +; SVE-NEXT: st1w { z1.s }, p1, [x0, x8, lsl #2] +; SVE-NEXT: mov x8, #12 // =0xc +; SVE-NEXT: st1w { z2.s }, p2, [x0, x9, lsl #2] +; SVE-NEXT: st1w { z3.s }, p3, [x0, x8, lsl #2] +; SVE-NEXT: st1w { z0.s }, p0, [x0] +; SVE-NEXT: ret + %load = load <16 x i32>, ptr %ptr, align 32 + %sel = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %load + store <16 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v32i8(<32 x i8> %x, ptr %ptr, <32 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v32i8: +; SVE: // %bb.0: +; SVE-NEXT: ldr w8, [sp, #72] +; SVE-NEXT: fmov s2, w1 +; SVE-NEXT: ldr w9, [sp, #80] +; SVE-NEXT: ptrue p0.b, vl16 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: fmov s3, w8 +; SVE-NEXT: ldr w8, [sp, #88] +; SVE-NEXT: mov v2.b[1], w2 +; SVE-NEXT: mov v3.b[1], w9 +; SVE-NEXT: ldr w9, [sp] +; SVE-NEXT: mov v2.b[2], w3 +; SVE-NEXT: mov v3.b[2], w8 +; SVE-NEXT: ldr w8, [sp, #96] +; SVE-NEXT: mov v2.b[3], w4 +; SVE-NEXT: mov v3.b[3], w8 +; SVE-NEXT: ldr w8, [sp, #104] +; SVE-NEXT: mov v2.b[4], w5 +; SVE-NEXT: mov v3.b[4], w8 +; SVE-NEXT: ldr w8, [sp, #112] +; SVE-NEXT: mov v2.b[5], w6 +; SVE-NEXT: mov v3.b[5], w8 +; SVE-NEXT: ldr w8, [sp, #120] +; SVE-NEXT: mov v2.b[6], w7 +; SVE-NEXT: mov v3.b[6], w8 +; SVE-NEXT: ldr w8, [sp, #128] +; SVE-NEXT: mov v2.b[7], w9 +; SVE-NEXT: ldr w9, [sp, #8] +; SVE-NEXT: mov v3.b[7], w8 +; SVE-NEXT: ldr w8, [sp, #136] +; SVE-NEXT: mov v2.b[8], w9 +; SVE-NEXT: ldr w9, [sp, #16] +; SVE-NEXT: mov v3.b[8], w8 +; SVE-NEXT: ldr w8, [sp, #144] +; SVE-NEXT: mov v2.b[9], w9 +; SVE-NEXT: ldr w9, [sp, #24] +; SVE-NEXT: mov v3.b[9], w8 +; SVE-NEXT: ldr w8, [sp, #152] +; SVE-NEXT: mov v2.b[10], w9 +; SVE-NEXT: ldr w9, [sp, #32] +; SVE-NEXT: mov v3.b[10], w8 +; SVE-NEXT: ldr w8, [sp, #160] +; SVE-NEXT: mov v2.b[11], w9 +; SVE-NEXT: ldr w9, [sp, #40] +; SVE-NEXT: mov v3.b[11], w8 +; SVE-NEXT: ldr w8, [sp, #168] +; SVE-NEXT: mov v2.b[12], w9 +; SVE-NEXT: ldr w9, [sp, #48] +; SVE-NEXT: mov v3.b[12], w8 +; SVE-NEXT: ldr w8, [sp, #176] +; SVE-NEXT: mov v2.b[13], w9 +; SVE-NEXT: ldr w9, [sp, #56] +; SVE-NEXT: mov v3.b[13], w8 +; SVE-NEXT: ldr w8, [sp, #184] +; SVE-NEXT: mov v2.b[14], w9 +; SVE-NEXT: ldr w9, [sp, #64] +; SVE-NEXT: mov v3.b[14], w8 +; SVE-NEXT: ldr w8, [sp, #192] +; SVE-NEXT: mov v2.b[15], w9 +; SVE-NEXT: mov v3.b[15], w8 +; SVE-NEXT: mov w8, #16 // =0x10 +; SVE-NEXT: shl v2.16b, v2.16b, #7 +; SVE-NEXT: shl v3.16b, v3.16b, #7 +; SVE-NEXT: cmlt v2.16b, v2.16b, #0 +; SVE-NEXT: cmlt v3.16b, v3.16b, #0 +; SVE-NEXT: cmpne p1.b, p0/z, z3.b, #0 +; SVE-NEXT: cmpne p0.b, p0/z, z2.b, #0 +; SVE-NEXT: st1b { z1.b }, p1, [x0, x8] +; SVE-NEXT: st1b { z0.b }, p0, [x0] +; SVE-NEXT: ret + %load = load <32 x i8>, ptr %ptr, align 32 + %sel = select <32 x i1> %mask, <32 x i8> %x, <32 x i8> %load + store <32 x i8> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v32i16(<32 x i16> %x, ptr %ptr, <32 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v32i16: +; SVE: // %bb.0: +; SVE-NEXT: ldr w9, [sp, #72] +; SVE-NEXT: ldr w11, [sp, #136] +; SVE-NEXT: fmov s7, w1 +; SVE-NEXT: ldr w8, [sp, #80] +; SVE-NEXT: ldr w10, [sp, #144] +; SVE-NEXT: ptrue p0.h, vl8 +; SVE-NEXT: fmov s4, w9 +; SVE-NEXT: ldr w9, [sp, #8] +; SVE-NEXT: fmov s5, w11 +; SVE-NEXT: mov v7.b[1], w2 +; SVE-NEXT: // kill: def $q2 killed $q2 def $z2 +; SVE-NEXT: // kill: def $q3 killed $q3 def $z3 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: fmov s6, w9 +; SVE-NEXT: ldr w9, [sp, #152] +; SVE-NEXT: mov v4.b[1], w8 +; SVE-NEXT: ldr w8, [sp, #16] +; SVE-NEXT: mov v5.b[1], w10 +; SVE-NEXT: mov v6.b[1], w8 +; SVE-NEXT: ldr w8, [sp, #88] +; SVE-NEXT: mov v7.b[2], w3 +; SVE-NEXT: mov v4.b[2], w8 +; SVE-NEXT: ldr w8, [sp, #24] +; SVE-NEXT: mov v5.b[2], w9 +; SVE-NEXT: ldr w9, [sp, #160] +; SVE-NEXT: mov v6.b[2], w8 +; SVE-NEXT: ldr w8, [sp, #96] +; SVE-NEXT: mov v7.b[3], w4 +; SVE-NEXT: mov v4.b[3], w8 +; SVE-NEXT: ldr w8, [sp, #32] +; SVE-NEXT: mov v5.b[3], w9 +; SVE-NEXT: ldr w9, [sp, #168] +; SVE-NEXT: mov v6.b[3], w8 +; SVE-NEXT: ldr w8, [sp, #104] +; SVE-NEXT: mov v7.b[4], w5 +; SVE-NEXT: mov v4.b[4], w8 +; SVE-NEXT: ldr w8, [sp, #40] +; SVE-NEXT: mov v5.b[4], w9 +; SVE-NEXT: ldr w9, [sp, #176] +; SVE-NEXT: mov v6.b[4], w8 +; SVE-NEXT: ldr w8, [sp, #112] +; SVE-NEXT: mov v7.b[5], w6 +; SVE-NEXT: mov v4.b[5], w8 +; SVE-NEXT: ldr w8, [sp, #48] +; SVE-NEXT: mov v5.b[5], w9 +; SVE-NEXT: ldr w9, [sp, #184] +; SVE-NEXT: mov v6.b[5], w8 +; SVE-NEXT: ldr w8, [sp, #120] +; SVE-NEXT: mov v7.b[6], w7 +; SVE-NEXT: mov v4.b[6], w8 +; SVE-NEXT: ldr w8, [sp, #56] +; SVE-NEXT: mov v5.b[6], w9 +; SVE-NEXT: ldr w9, [sp, #192] +; SVE-NEXT: mov v6.b[6], w8 +; SVE-NEXT: ldr w8, [sp, #128] +; SVE-NEXT: mov v4.b[7], w8 +; SVE-NEXT: ldr w8, [sp, #64] +; SVE-NEXT: mov v5.b[7], w9 +; SVE-NEXT: ldr w9, [sp] +; SVE-NEXT: mov v6.b[7], w8 +; SVE-NEXT: mov x8, #16 // =0x10 +; SVE-NEXT: mov v7.b[7], w9 +; SVE-NEXT: ushll v4.8h, v4.8b, #0 +; SVE-NEXT: ushll v5.8h, v5.8b, #0 +; SVE-NEXT: ushll v6.8h, v6.8b, #0 +; SVE-NEXT: ushll v7.8h, v7.8b, #0 +; SVE-NEXT: shl v4.8h, v4.8h, #15 +; SVE-NEXT: shl v5.8h, v5.8h, #15 +; SVE-NEXT: shl v6.8h, v6.8h, #15 +; SVE-NEXT: shl v7.8h, v7.8h, #15 +; SVE-NEXT: cmlt v4.8h, v4.8h, #0 +; SVE-NEXT: cmlt v5.8h, v5.8h, #0 +; SVE-NEXT: cmlt v6.8h, v6.8h, #0 +; SVE-NEXT: cmpne p1.h, p0/z, z4.h, #0 +; SVE-NEXT: cmlt v4.8h, v7.8h, #0 +; SVE-NEXT: cmpne p2.h, p0/z, z5.h, #0 +; SVE-NEXT: cmpne p3.h, p0/z, z6.h, #0 +; SVE-NEXT: cmpne p0.h, p0/z, z4.h, #0 +; SVE-NEXT: st1h { z2.h }, p1, [x0, x8, lsl #1] +; SVE-NEXT: mov x8, #24 // =0x18 +; SVE-NEXT: st1h { z3.h }, p2, [x0, x8, lsl #1] +; SVE-NEXT: mov x8, #8 // =0x8 +; SVE-NEXT: st1h { z1.h }, p3, [x0, x8, lsl #1] +; SVE-NEXT: st1h { z0.h }, p0, [x0] +; SVE-NEXT: ret + %load = load <32 x i16>, ptr %ptr, align 32 + %sel = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %load + store <32 x i16> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v64i8(<64 x i8> %x, ptr %ptr, <64 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_v64i8: +; SVE: // %bb.0: +; SVE-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; SVE-NEXT: .cfi_def_cfa_offset 16 +; SVE-NEXT: .cfi_offset w29, -16 +; SVE-NEXT: ldr w8, [sp, #216] +; SVE-NEXT: ldr w9, [sp, #344] +; SVE-NEXT: fmov s7, w1 +; SVE-NEXT: ldr w11, [sp, #88] +; SVE-NEXT: ldr w10, [sp, #224] +; SVE-NEXT: ptrue p0.b, vl16 +; SVE-NEXT: fmov s4, w8 +; SVE-NEXT: fmov s5, w9 +; SVE-NEXT: ldr w8, [sp, #352] +; SVE-NEXT: fmov s6, w11 +; SVE-NEXT: ldr w9, [sp, #96] +; SVE-NEXT: mov v7.b[1], w2 +; SVE-NEXT: // kill: def $q2 killed $q2 def $z2 +; SVE-NEXT: // kill: def $q3 killed $q3 def $z3 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: mov v4.b[1], w10 +; SVE-NEXT: mov v5.b[1], w8 +; SVE-NEXT: ldr w8, [sp, #232] +; SVE-NEXT: mov v6.b[1], w9 +; SVE-NEXT: ldr w9, [sp, #360] +; SVE-NEXT: ldr w10, [sp, #112] +; SVE-NEXT: mov v7.b[2], w3 +; SVE-NEXT: mov v4.b[2], w8 +; SVE-NEXT: ldr w8, [sp, #104] +; SVE-NEXT: mov v5.b[2], w9 +; SVE-NEXT: ldr w9, [sp, #368] +; SVE-NEXT: mov v6.b[2], w8 +; SVE-NEXT: ldr w8, [sp, #240] +; SVE-NEXT: mov v7.b[3], w4 +; SVE-NEXT: mov v4.b[3], w8 +; SVE-NEXT: mov v5.b[3], w9 +; SVE-NEXT: ldr w8, [sp, #248] +; SVE-NEXT: ldr w9, [sp, #376] +; SVE-NEXT: mov v6.b[3], w10 +; SVE-NEXT: ldr w10, [sp, #120] +; SVE-NEXT: mov v7.b[4], w5 +; SVE-NEXT: mov v4.b[4], w8 +; SVE-NEXT: mov v5.b[4], w9 +; SVE-NEXT: ldr w8, [sp, #256] +; SVE-NEXT: ldr w9, [sp, #384] +; SVE-NEXT: mov v6.b[4], w10 +; SVE-NEXT: ldr w10, [sp, #128] +; SVE-NEXT: mov v7.b[5], w6 +; SVE-NEXT: mov v4.b[5], w8 +; SVE-NEXT: mov v5.b[5], w9 +; SVE-NEXT: ldr w8, [sp, #264] +; SVE-NEXT: ldr w9, [sp, #392] +; SVE-NEXT: mov v6.b[5], w10 +; SVE-NEXT: ldr w10, [sp, #136] +; SVE-NEXT: mov v7.b[6], w7 +; SVE-NEXT: mov v4.b[6], w8 +; SVE-NEXT: mov v5.b[6], w9 +; SVE-NEXT: ldr w8, [sp, #272] +; SVE-NEXT: ldr w9, [sp, #400] +; SVE-NEXT: mov v6.b[6], w10 +; SVE-NEXT: ldr w10, [sp, #144] +; SVE-NEXT: mov v4.b[7], w8 +; SVE-NEXT: ldr w8, [sp, #16] +; SVE-NEXT: mov v5.b[7], w9 +; SVE-NEXT: ldr w9, [sp, #280] +; SVE-NEXT: mov v6.b[7], w10 +; SVE-NEXT: mov v7.b[7], w8 +; SVE-NEXT: ldr w10, [sp, #408] +; SVE-NEXT: ldr w8, [sp, #152] +; SVE-NEXT: mov v4.b[8], w9 +; SVE-NEXT: ldr w9, [sp, #24] +; SVE-NEXT: mov v5.b[8], w10 +; SVE-NEXT: ldr w10, [sp, #288] +; SVE-NEXT: mov v6.b[8], w8 +; SVE-NEXT: mov v7.b[8], w9 +; SVE-NEXT: ldr w8, [sp, #416] +; SVE-NEXT: ldr w9, [sp, #160] +; SVE-NEXT: mov v4.b[9], w10 +; SVE-NEXT: ldr w10, [sp, #32] +; SVE-NEXT: mov v5.b[9], w8 +; SVE-NEXT: ldr w8, [sp, #296] +; SVE-NEXT: mov v6.b[9], w9 +; SVE-NEXT: mov v7.b[9], w10 +; SVE-NEXT: ldr w9, [sp, #424] +; SVE-NEXT: ldr w10, [sp, #168] +; SVE-NEXT: mov v4.b[10], w8 +; SVE-NEXT: ldr w8, [sp, #40] +; SVE-NEXT: mov v5.b[10], w9 +; SVE-NEXT: ldr w9, [sp, #304] +; SVE-NEXT: mov v6.b[10], w10 +; SVE-NEXT: mov v7.b[10], w8 +; SVE-NEXT: ldr w10, [sp, #432] +; SVE-NEXT: ldr w8, [sp, #176] +; SVE-NEXT: mov v4.b[11], w9 +; SVE-NEXT: ldr w9, [sp, #48] +; SVE-NEXT: mov v5.b[11], w10 +; SVE-NEXT: ldr w10, [sp, #312] +; SVE-NEXT: mov v6.b[11], w8 +; SVE-NEXT: mov v7.b[11], w9 +; SVE-NEXT: ldr w8, [sp, #440] +; SVE-NEXT: ldr w9, [sp, #184] +; SVE-NEXT: mov v4.b[12], w10 +; SVE-NEXT: ldr w10, [sp, #56] +; SVE-NEXT: mov v5.b[12], w8 +; SVE-NEXT: ldr w8, [sp, #320] +; SVE-NEXT: mov v6.b[12], w9 +; SVE-NEXT: mov v7.b[12], w10 +; SVE-NEXT: ldr w9, [sp, #448] +; SVE-NEXT: ldr w10, [sp, #192] +; SVE-NEXT: mov v4.b[13], w8 +; SVE-NEXT: ldr w8, [sp, #64] +; SVE-NEXT: mov v5.b[13], w9 +; SVE-NEXT: ldr w9, [sp, #328] +; SVE-NEXT: mov v6.b[13], w10 +; SVE-NEXT: mov v7.b[13], w8 +; SVE-NEXT: ldr w10, [sp, #456] +; SVE-NEXT: ldr w8, [sp, #200] +; SVE-NEXT: mov v4.b[14], w9 +; SVE-NEXT: ldr w9, [sp, #72] +; SVE-NEXT: mov v5.b[14], w10 +; SVE-NEXT: ldr w10, [sp, #336] +; SVE-NEXT: mov v6.b[14], w8 +; SVE-NEXT: mov v7.b[14], w9 +; SVE-NEXT: ldr w8, [sp, #464] +; SVE-NEXT: ldr w9, [sp, #208] +; SVE-NEXT: mov v4.b[15], w10 +; SVE-NEXT: ldr w10, [sp, #80] +; SVE-NEXT: mov v5.b[15], w8 +; SVE-NEXT: mov w8, #32 // =0x20 +; SVE-NEXT: mov v6.b[15], w9 +; SVE-NEXT: mov v7.b[15], w10 +; SVE-NEXT: mov w9, #48 // =0x30 +; SVE-NEXT: shl v4.16b, v4.16b, #7 +; SVE-NEXT: shl v5.16b, v5.16b, #7 +; SVE-NEXT: shl v6.16b, v6.16b, #7 +; SVE-NEXT: shl v7.16b, v7.16b, #7 +; SVE-NEXT: cmlt v4.16b, v4.16b, #0 +; SVE-NEXT: cmlt v5.16b, v5.16b, #0 +; SVE-NEXT: cmlt v6.16b, v6.16b, #0 +; SVE-NEXT: cmpne p1.b, p0/z, z4.b, #0 +; SVE-NEXT: cmlt v4.16b, v7.16b, #0 +; SVE-NEXT: cmpne p2.b, p0/z, z5.b, #0 +; SVE-NEXT: cmpne p3.b, p0/z, z6.b, #0 +; SVE-NEXT: cmpne p0.b, p0/z, z4.b, #0 +; SVE-NEXT: st1b { z2.b }, p1, [x0, x8] +; SVE-NEXT: mov w8, #16 // =0x10 +; SVE-NEXT: st1b { z3.b }, p2, [x0, x9] +; SVE-NEXT: st1b { z1.b }, p3, [x0, x8] +; SVE-NEXT: st1b { z0.b }, p0, [x0] +; SVE-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; SVE-NEXT: ret + %load = load <64 x i8>, ptr %ptr, align 32 + %sel = select <64 x i1> %mask, <64 x i8> %x, <64 x i8> %load + store <64 x i8> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_invert_mask_v4i32(<4 x i32> %x, ptr %ptr, <4 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_invert_mask_v4i32: +; SVE: // %bb.0: +; SVE-NEXT: movi v2.4h, #1 +; SVE-NEXT: ptrue p0.s, vl4 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: eor v1.8b, v1.8b, v2.8b +; SVE-NEXT: ushll v1.4s, v1.4h, #0 +; SVE-NEXT: shl v1.4s, v1.4s, #31 +; SVE-NEXT: cmlt v1.4s, v1.4s, #0 +; SVE-NEXT: cmpne p0.s, p0/z, z1.s, #0 +; SVE-NEXT: st1w { z0.s }, p0, [x0] +; SVE-NEXT: ret + %load = load <4 x i32>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x i32> %load, <4 x i32> %x + store <4 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_invert_mask_v8i32(<8 x i32> %x, ptr %ptr, <8 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_invert_mask_v8i32: +; SVE: // %bb.0: +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: zip2 v3.8b, v2.8b, v0.8b +; SVE-NEXT: zip1 v2.8b, v2.8b, v0.8b +; SVE-NEXT: mov x8, #4 // =0x4 +; SVE-NEXT: ptrue p0.s, vl4 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: ushll v3.4s, v3.4h, #0 +; SVE-NEXT: ushll v2.4s, v2.4h, #0 +; SVE-NEXT: shl v3.4s, v3.4s, #31 +; SVE-NEXT: shl v2.4s, v2.4s, #31 +; SVE-NEXT: cmpge p1.s, p0/z, z3.s, #0 +; SVE-NEXT: cmpge p0.s, p0/z, z2.s, #0 +; SVE-NEXT: st1w { z1.s }, p1, [x0, x8, lsl #2] +; SVE-NEXT: st1w { z0.s }, p0, [x0] +; SVE-NEXT: ret + %load = load <8 x i32>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i32> %load, <8 x i32> %x + store <8 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_invert_mask_v16i32(<16 x i32> %x, ptr %ptr, <16 x i1> %mask) { +; SVE-LABEL: test_masked_store_success_invert_mask_v16i32: +; SVE: // %bb.0: +; SVE-NEXT: ext v5.16b, v4.16b, v4.16b, #8 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: zip2 v6.8b, v4.8b, v0.8b +; SVE-NEXT: mov x8, #4 // =0x4 +; SVE-NEXT: zip1 v4.8b, v4.8b, v0.8b +; SVE-NEXT: ptrue p0.s, vl4 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: // kill: def $q2 killed $q2 def $z2 +; SVE-NEXT: // kill: def $q3 killed $q3 def $z3 +; SVE-NEXT: zip1 v7.8b, v5.8b, v0.8b +; SVE-NEXT: zip2 v5.8b, v5.8b, v0.8b +; SVE-NEXT: ushll v6.4s, v6.4h, #0 +; SVE-NEXT: ushll v4.4s, v4.4h, #0 +; SVE-NEXT: shl v6.4s, v6.4s, #31 +; SVE-NEXT: ushll v7.4s, v7.4h, #0 +; SVE-NEXT: ushll v5.4s, v5.4h, #0 +; SVE-NEXT: shl v4.4s, v4.4s, #31 +; SVE-NEXT: cmpge p1.s, p0/z, z6.s, #0 +; SVE-NEXT: shl v7.4s, v7.4s, #31 +; SVE-NEXT: shl v5.4s, v5.4s, #31 +; SVE-NEXT: cmpge p2.s, p0/z, z7.s, #0 +; SVE-NEXT: cmpge p3.s, p0/z, z5.s, #0 +; SVE-NEXT: cmpge p0.s, p0/z, z4.s, #0 +; SVE-NEXT: st1w { z1.s }, p1, [x0, x8, lsl #2] +; SVE-NEXT: mov x8, #8 // =0x8 +; SVE-NEXT: st1w { z2.s }, p2, [x0, x8, lsl #2] +; SVE-NEXT: mov x8, #12 // =0xc +; SVE-NEXT: st1w { z3.s }, p3, [x0, x8, lsl #2] +; SVE-NEXT: st1w { z0.s }, p0, [x0] +; SVE-NEXT: ret + %load = load <16 x i32>, ptr %ptr, align 32 + %sel = select <16 x i1> %mask, <16 x i32> %load, <16 x i32> %x + store <16 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_zextload(<4 x i64> %x, ptr %ptr, <4 x i1> %mask) { +; SVE-LABEL: test_masked_store_zextload: +; SVE: // %bb.0: +; SVE-NEXT: ushll v2.4s, v2.4h, #0 +; SVE-NEXT: ldr q4, [x0] +; SVE-NEXT: ushll2 v5.2d, v4.4s, #0 +; SVE-NEXT: ushll v4.2d, v4.2s, #0 +; SVE-NEXT: ushll2 v3.2d, v2.4s, #0 +; SVE-NEXT: ushll v2.2d, v2.2s, #0 +; SVE-NEXT: shl v3.2d, v3.2d, #63 +; SVE-NEXT: shl v2.2d, v2.2d, #63 +; SVE-NEXT: cmlt v3.2d, v3.2d, #0 +; SVE-NEXT: cmlt v2.2d, v2.2d, #0 +; SVE-NEXT: bif v1.16b, v5.16b, v3.16b +; SVE-NEXT: bif v0.16b, v4.16b, v2.16b +; SVE-NEXT: stp q0, q1, [x0] +; SVE-NEXT: ret + %load = load <4 x i32>, ptr %ptr, align 32 + %zext = zext <4 x i32> %load to <4 x i64> + %masked = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %zext + store <4 x i64> %masked, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_volatile_load(<8 x i32> %x, ptr %ptr, <8 x i1> %mask) { +; SVE-LABEL: test_masked_store_volatile_load: +; SVE: // %bb.0: +; SVE-NEXT: zip1 v3.8b, v2.8b, v0.8b +; SVE-NEXT: zip2 v2.8b, v2.8b, v0.8b +; SVE-NEXT: ldr q4, [x0] +; SVE-NEXT: ldr q5, [x0, #16] +; SVE-NEXT: ushll v3.4s, v3.4h, #0 +; SVE-NEXT: ushll v2.4s, v2.4h, #0 +; SVE-NEXT: shl v3.4s, v3.4s, #31 +; SVE-NEXT: shl v2.4s, v2.4s, #31 +; SVE-NEXT: cmlt v3.4s, v3.4s, #0 +; SVE-NEXT: cmlt v2.4s, v2.4s, #0 +; SVE-NEXT: bif v0.16b, v4.16b, v3.16b +; SVE-NEXT: bif v1.16b, v5.16b, v2.16b +; SVE-NEXT: stp q0, q1, [x0] +; SVE-NEXT: ret + %load = load volatile <8 x i32>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %load + store <8 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_volatile_store(<8 x i32> %x, ptr %ptr, <8 x i1> %mask) { +; SVE-LABEL: test_masked_store_volatile_store: +; SVE: // %bb.0: +; SVE-NEXT: zip1 v3.8b, v2.8b, v0.8b +; SVE-NEXT: zip2 v2.8b, v2.8b, v0.8b +; SVE-NEXT: ldp q4, q5, [x0] +; SVE-NEXT: ushll v3.4s, v3.4h, #0 +; SVE-NEXT: ushll v2.4s, v2.4h, #0 +; SVE-NEXT: shl v3.4s, v3.4s, #31 +; SVE-NEXT: shl v2.4s, v2.4s, #31 +; SVE-NEXT: cmlt v3.4s, v3.4s, #0 +; SVE-NEXT: cmlt v2.4s, v2.4s, #0 +; SVE-NEXT: bif v0.16b, v4.16b, v3.16b +; SVE-NEXT: bif v1.16b, v5.16b, v2.16b +; SVE-NEXT: str q0, [x0] +; SVE-NEXT: str q1, [x0, #16] +; SVE-NEXT: ret + %load = load <8 x i32>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %load + store volatile <8 x i32> %sel, ptr %ptr, align 32 + ret void +} + +declare void @use_vec(<8 x i32>) + +define void @test_masked_store_intervening(<8 x i32> %x, ptr %ptr, <8 x i1> %mask) nounwind { +; SVE-LABEL: test_masked_store_intervening: +; SVE: // %bb.0: +; SVE-NEXT: sub sp, sp, #96 +; SVE-NEXT: stp q1, q0, [sp, #32] // 32-byte Folded Spill +; SVE-NEXT: ldp q1, q3, [x0] +; SVE-NEXT: movi v0.2d, #0000000000000000 +; SVE-NEXT: str d8, [sp, #64] // 8-byte Folded Spill +; SVE-NEXT: fmov d8, d2 +; SVE-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill +; SVE-NEXT: mov x19, x0 +; SVE-NEXT: stp q1, q3, [sp] // 32-byte Folded Spill +; SVE-NEXT: movi v1.2d, #0000000000000000 +; SVE-NEXT: stp q0, q0, [x0] +; SVE-NEXT: bl use_vec +; SVE-NEXT: zip2 v0.8b, v8.8b, v0.8b +; SVE-NEXT: ldp q3, q2, [sp, #16] // 32-byte Folded Reload +; SVE-NEXT: zip1 v1.8b, v8.8b, v0.8b +; SVE-NEXT: ushll v0.4s, v0.4h, #0 +; SVE-NEXT: ldr d8, [sp, #64] // 8-byte Folded Reload +; SVE-NEXT: shl v0.4s, v0.4s, #31 +; SVE-NEXT: ushll v1.4s, v1.4h, #0 +; SVE-NEXT: cmlt v0.4s, v0.4s, #0 +; SVE-NEXT: shl v1.4s, v1.4s, #31 +; SVE-NEXT: bsl v0.16b, v2.16b, v3.16b +; SVE-NEXT: ldr q2, [sp, #48] // 16-byte Folded Reload +; SVE-NEXT: ldr q3, [sp] // 16-byte Folded Reload +; SVE-NEXT: cmlt v1.4s, v1.4s, #0 +; SVE-NEXT: bsl v1.16b, v2.16b, v3.16b +; SVE-NEXT: stp q1, q0, [x19] +; SVE-NEXT: ldp x30, x19, [sp, #80] // 16-byte Folded Reload +; SVE-NEXT: add sp, sp, #96 +; SVE-NEXT: ret + %load = load <8 x i32>, ptr %ptr, align 32 + store <8 x i32> zeroinitializer, ptr %ptr, align 32 + %tmp = load <8 x i32>, ptr %ptr + call void @use_vec(<8 x i32> %tmp) + %sel = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %load + store <8 x i32> %sel, ptr %ptr, align 32 + ret void +} + + +define void @test_masked_store_multiple_v8i32(<8 x i32> %x, <8 x i32> %y, ptr %ptr1, ptr %ptr2, <8 x i1> %mask, <8 x i1> %mask2) { +; SVE-LABEL: test_masked_store_multiple_v8i32: +; SVE: // %bb.0: +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: zip2 v6.8b, v4.8b, v0.8b +; SVE-NEXT: zip1 v4.8b, v4.8b, v0.8b +; SVE-NEXT: mov x8, #4 // =0x4 +; SVE-NEXT: zip1 v7.8b, v5.8b, v0.8b +; SVE-NEXT: zip2 v5.8b, v5.8b, v0.8b +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: ptrue p0.s, vl4 +; SVE-NEXT: ushll v6.4s, v6.4h, #0 +; SVE-NEXT: ushll v4.4s, v4.4h, #0 +; SVE-NEXT: ushll v7.4s, v7.4h, #0 +; SVE-NEXT: ushll v5.4s, v5.4h, #0 +; SVE-NEXT: shl v6.4s, v6.4s, #31 +; SVE-NEXT: shl v4.4s, v4.4s, #31 +; SVE-NEXT: shl v7.4s, v7.4s, #31 +; SVE-NEXT: shl v5.4s, v5.4s, #31 +; SVE-NEXT: cmlt v6.4s, v6.4s, #0 +; SVE-NEXT: cmlt v4.4s, v4.4s, #0 +; SVE-NEXT: cmlt v7.4s, v7.4s, #0 +; SVE-NEXT: cmlt v5.4s, v5.4s, #0 +; SVE-NEXT: cmpne p1.s, p0/z, z6.s, #0 +; SVE-NEXT: ldp q6, q16, [x1] +; SVE-NEXT: cmpne p0.s, p0/z, z4.s, #0 +; SVE-NEXT: bif v2.16b, v6.16b, v7.16b +; SVE-NEXT: bif v3.16b, v16.16b, v5.16b +; SVE-NEXT: st1w { z1.s }, p1, [x0, x8, lsl #2] +; SVE-NEXT: st1w { z0.s }, p0, [x0] +; SVE-NEXT: stp q2, q3, [x1] +; SVE-NEXT: ret + %load = load <8 x i32>, ptr %ptr1, align 32 + %load2 = load <8 x i32>, ptr %ptr2, align 32 + %sel = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %load + %sel2 = select <8 x i1> %mask2, <8 x i32> %y, <8 x i32> %load2 + store <8 x i32> %sel, ptr %ptr1, align 32 + store <8 x i32> %sel2, ptr %ptr2, align 32 + ret void +} + +define void @test_masked_store_multiple_v8i64(<8 x i64> %x, <8 x i64> %y, ptr %ptr1, ptr %ptr2, <8 x i1> %mask, <8 x i1> %mask2) { +; SVE-LABEL: test_masked_store_multiple_v8i64: +; SVE: // %bb.0: +; SVE-NEXT: ldp d16, d18, [sp] +; SVE-NEXT: ptrue p0.d, vl2 +; SVE-NEXT: // kill: def $q3 killed $q3 def $z3 +; SVE-NEXT: // kill: def $q2 killed $q2 def $z2 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: mov x8, #6 // =0x6 +; SVE-NEXT: mov x9, #4 // =0x4 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: mov b17, v16.b[4] +; SVE-NEXT: mov b19, v16.b[2] +; SVE-NEXT: mov b20, v16.b[6] +; SVE-NEXT: mov b21, v16.b[0] +; SVE-NEXT: mov b22, v18.b[4] +; SVE-NEXT: mov b23, v18.b[6] +; SVE-NEXT: mov b24, v18.b[0] +; SVE-NEXT: mov b25, v18.b[2] +; SVE-NEXT: mov v17.b[4], v16.b[5] +; SVE-NEXT: mov v19.b[4], v16.b[3] +; SVE-NEXT: mov v20.b[4], v16.b[7] +; SVE-NEXT: mov v21.b[4], v16.b[1] +; SVE-NEXT: mov v22.b[4], v18.b[5] +; SVE-NEXT: mov v23.b[4], v18.b[7] +; SVE-NEXT: mov v24.b[4], v18.b[1] +; SVE-NEXT: mov v25.b[4], v18.b[3] +; SVE-NEXT: ushll v17.2d, v17.2s, #0 +; SVE-NEXT: ushll v18.2d, v21.2s, #0 +; SVE-NEXT: ushll v21.2d, v24.2s, #0 +; SVE-NEXT: shl v16.2d, v17.2d, #63 +; SVE-NEXT: ushll v17.2d, v19.2s, #0 +; SVE-NEXT: ushll v19.2d, v20.2s, #0 +; SVE-NEXT: ushll v20.2d, v22.2s, #0 +; SVE-NEXT: shl v18.2d, v18.2d, #63 +; SVE-NEXT: ushll v22.2d, v25.2s, #0 +; SVE-NEXT: shl v21.2d, v21.2d, #63 +; SVE-NEXT: cmlt v16.2d, v16.2d, #0 +; SVE-NEXT: shl v17.2d, v17.2d, #63 +; SVE-NEXT: shl v19.2d, v19.2d, #63 +; SVE-NEXT: shl v20.2d, v20.2d, #63 +; SVE-NEXT: cmlt v18.2d, v18.2d, #0 +; SVE-NEXT: shl v22.2d, v22.2d, #63 +; SVE-NEXT: cmlt v21.2d, v21.2d, #0 +; SVE-NEXT: cmpne p1.d, p0/z, z16.d, #0 +; SVE-NEXT: ushll v16.2d, v23.2s, #0 +; SVE-NEXT: cmlt v17.2d, v17.2d, #0 +; SVE-NEXT: cmlt v19.2d, v19.2d, #0 +; SVE-NEXT: cmlt v20.2d, v20.2d, #0 +; SVE-NEXT: shl v16.2d, v16.2d, #63 +; SVE-NEXT: cmpne p2.d, p0/z, z17.d, #0 +; SVE-NEXT: cmpne p3.d, p0/z, z19.d, #0 +; SVE-NEXT: ldp q17, q19, [x1, #32] +; SVE-NEXT: cmpne p0.d, p0/z, z18.d, #0 +; SVE-NEXT: cmlt v16.2d, v16.2d, #0 +; SVE-NEXT: bif v6.16b, v17.16b, v20.16b +; SVE-NEXT: cmlt v20.2d, v22.2d, #0 +; SVE-NEXT: ldp q17, q18, [x1] +; SVE-NEXT: st1d { z2.d }, p1, [x0, x9, lsl #3] +; SVE-NEXT: mov v2.16b, v16.16b +; SVE-NEXT: st1d { z3.d }, p3, [x0, x8, lsl #3] +; SVE-NEXT: mov v3.16b, v21.16b +; SVE-NEXT: st1d { z0.d }, p0, [x0] +; SVE-NEXT: mov v0.16b, v20.16b +; SVE-NEXT: mov x9, #2 // =0x2 +; SVE-NEXT: st1d { z1.d }, p2, [x0, x9, lsl #3] +; SVE-NEXT: bsl v2.16b, v7.16b, v19.16b +; SVE-NEXT: bsl v3.16b, v4.16b, v17.16b +; SVE-NEXT: bsl v0.16b, v5.16b, v18.16b +; SVE-NEXT: stp q6, q2, [x1, #32] +; SVE-NEXT: stp q3, q0, [x1] +; SVE-NEXT: ret + %load = load <8 x i64>, ptr %ptr1, align 32 + %load2 = load <8 x i64>, ptr %ptr2, align 32 + %sel = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %load + %sel2 = select <8 x i1> %mask2, <8 x i64> %y, <8 x i64> %load2 + store <8 x i64> %sel, ptr %ptr1, align 32 + store <8 x i64> %sel2, ptr %ptr2, align 32 + ret void +} + +define void @test_masked_store_unaligned_v4i32(<4 x i32> %data, ptr %ptr, <4 x i1> %mask) { +; SVE-LABEL: test_masked_store_unaligned_v4i32: +; SVE: // %bb.0: +; SVE-NEXT: ushll v1.4s, v1.4h, #0 +; SVE-NEXT: ptrue p0.s, vl4 +; SVE-NEXT: add x8, x0, #1 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: shl v1.4s, v1.4s, #31 +; SVE-NEXT: cmlt v1.4s, v1.4s, #0 +; SVE-NEXT: cmpne p0.s, p0/z, z1.s, #0 +; SVE-NEXT: st1w { z0.s }, p0, [x8] +; SVE-NEXT: ret + %ptr_i8 = getelementptr i8, ptr %ptr, i32 1 + %ptr_vec = bitcast ptr %ptr_i8 to ptr + %load = load <4 x i32>, ptr %ptr_vec, align 1 + %sel = select <4 x i1> %mask, <4 x i32> %data, <4 x i32> %load + store <4 x i32> %sel, ptr %ptr_vec, align 1 + ret void +} + +define void @test_masked_store_unaligned_v4i64(<4 x i64> %data, ptr %ptr, <4 x i1> %mask) { +; SVE-LABEL: test_masked_store_unaligned_v4i64: +; SVE: // %bb.0: +; SVE-NEXT: ushll v2.4s, v2.4h, #0 +; SVE-NEXT: ptrue p0.d, vl2 +; SVE-NEXT: add x8, x0, #17 +; SVE-NEXT: add x9, x0, #1 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: ushll2 v3.2d, v2.4s, #0 +; SVE-NEXT: ushll v2.2d, v2.2s, #0 +; SVE-NEXT: shl v3.2d, v3.2d, #63 +; SVE-NEXT: shl v2.2d, v2.2d, #63 +; SVE-NEXT: cmlt v3.2d, v3.2d, #0 +; SVE-NEXT: cmlt v2.2d, v2.2d, #0 +; SVE-NEXT: cmpne p1.d, p0/z, z3.d, #0 +; SVE-NEXT: cmpne p0.d, p0/z, z2.d, #0 +; SVE-NEXT: st1d { z1.d }, p1, [x8] +; SVE-NEXT: st1d { z0.d }, p0, [x9] +; SVE-NEXT: ret + %ptr_i8 = getelementptr i8, ptr %ptr, i64 1 + %ptr_vec = bitcast ptr %ptr_i8 to ptr + %load = load <4 x i64>, ptr %ptr_vec, align 1 + %sel = select <4 x i1> %mask, <4 x i64> %data, <4 x i64> %load + store <4 x i64> %sel, ptr %ptr_vec, align 1 + ret void +} + +define void @test_masked_store_unaligned_v8i32(<8 x i32> %data, ptr %ptr, <8 x i1> %mask) { +; SVE-LABEL: test_masked_store_unaligned_v8i32: +; SVE: // %bb.0: +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: zip1 v3.8b, v2.8b, v0.8b +; SVE-NEXT: zip2 v2.8b, v2.8b, v0.8b +; SVE-NEXT: add x8, x0, #1 +; SVE-NEXT: ptrue p0.s, vl4 +; SVE-NEXT: add x9, x0, #17 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: ushll v3.4s, v3.4h, #0 +; SVE-NEXT: ushll v2.4s, v2.4h, #0 +; SVE-NEXT: shl v3.4s, v3.4s, #31 +; SVE-NEXT: shl v2.4s, v2.4s, #31 +; SVE-NEXT: cmlt v3.4s, v3.4s, #0 +; SVE-NEXT: cmlt v2.4s, v2.4s, #0 +; SVE-NEXT: cmpne p1.s, p0/z, z3.s, #0 +; SVE-NEXT: cmpne p0.s, p0/z, z2.s, #0 +; SVE-NEXT: st1w { z0.s }, p1, [x8] +; SVE-NEXT: st1w { z1.s }, p0, [x9] +; SVE-NEXT: ret + %ptr_i8 = getelementptr i8, ptr %ptr, i32 1 + %ptr_vec = bitcast ptr %ptr_i8 to ptr + %load = load <8 x i32>, ptr %ptr_vec, align 1 + %sel = select <8 x i1> %mask, <8 x i32> %data, <8 x i32> %load + store <8 x i32> %sel, ptr %ptr_vec, align 1 + ret void +} + +define void @test_masked_store_unaligned_v8i64(<8 x i64> %data, ptr %ptr, <8 x i1> %mask) { +; SVE-LABEL: test_masked_store_unaligned_v8i64: +; SVE: // %bb.0: +; SVE-NEXT: // kill: def $d4 killed $d4 def $q4 +; SVE-NEXT: mov b5, v4.b[4] +; SVE-NEXT: mov b6, v4.b[6] +; SVE-NEXT: add x8, x0, #33 +; SVE-NEXT: mov b7, v4.b[0] +; SVE-NEXT: mov b16, v4.b[2] +; SVE-NEXT: add x9, x0, #49 +; SVE-NEXT: ptrue p0.d, vl2 +; SVE-NEXT: // kill: def $q3 killed $q3 def $z3 +; SVE-NEXT: // kill: def $q2 killed $q2 def $z2 +; SVE-NEXT: // kill: def $q1 killed $q1 def $z1 +; SVE-NEXT: // kill: def $q0 killed $q0 def $z0 +; SVE-NEXT: mov v5.b[4], v4.b[5] +; SVE-NEXT: mov v6.b[4], v4.b[7] +; SVE-NEXT: mov v7.b[4], v4.b[1] +; SVE-NEXT: mov v16.b[4], v4.b[3] +; SVE-NEXT: ushll v4.2d, v5.2s, #0 +; SVE-NEXT: ushll v5.2d, v6.2s, #0 +; SVE-NEXT: ushll v6.2d, v7.2s, #0 +; SVE-NEXT: ushll v7.2d, v16.2s, #0 +; SVE-NEXT: shl v4.2d, v4.2d, #63 +; SVE-NEXT: shl v5.2d, v5.2d, #63 +; SVE-NEXT: shl v6.2d, v6.2d, #63 +; SVE-NEXT: shl v7.2d, v7.2d, #63 +; SVE-NEXT: cmlt v4.2d, v4.2d, #0 +; SVE-NEXT: cmlt v5.2d, v5.2d, #0 +; SVE-NEXT: cmlt v6.2d, v6.2d, #0 +; SVE-NEXT: cmpne p1.d, p0/z, z4.d, #0 +; SVE-NEXT: cmlt v4.2d, v7.2d, #0 +; SVE-NEXT: cmpne p2.d, p0/z, z5.d, #0 +; SVE-NEXT: cmpne p3.d, p0/z, z6.d, #0 +; SVE-NEXT: cmpne p0.d, p0/z, z4.d, #0 +; SVE-NEXT: st1d { z2.d }, p1, [x8] +; SVE-NEXT: add x8, x0, #1 +; SVE-NEXT: st1d { z3.d }, p2, [x9] +; SVE-NEXT: add x9, x0, #17 +; SVE-NEXT: st1d { z0.d }, p3, [x8] +; SVE-NEXT: st1d { z1.d }, p0, [x9] +; SVE-NEXT: ret + %ptr_i8 = getelementptr i8, ptr %ptr, i64 1 + %ptr_vec = bitcast ptr %ptr_i8 to ptr + %load = load <8 x i64>, ptr %ptr_vec, align 1 + %sel = select <8 x i1> %mask, <8 x i64> %data, <8 x i64> %load + store <8 x i64> %sel, ptr %ptr_vec, align 1 + ret void +} diff --git a/llvm/test/CodeGen/AArch64/csel-subs-dag-combine.ll b/llvm/test/CodeGen/AArch64/csel-subs-dag-combine.ll new file mode 100644 index 0000000..5036be9 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/csel-subs-dag-combine.ll @@ -0,0 +1,112 @@ +; RUN: llc -debug-only=isel -o /dev/null < %s 2>&1 | FileCheck %s + +; REQUIRES: asserts + +; These tests ensure that we don't combine +; CSEL a, b, cc, SUBS(SUB(x,y), 0) -> CSEL a, b, cc, SUBS(x,y) +; if the flags set by SUBS(SUB(x,y), 0) have more than one use. +; +; This restriction exists because combining SUBS(SUB(x,y), 0) -> SUBS(x,y) is +; only valid if there are no users of the overflow flags (C/V) generated by the +; SUBS. Currently, we only check the flags used by the CSEL, and therefore we +; conservatively reject cases where the SUBS's flags have other uses. + +target triple = "aarch64-unknown-linux-gnu" + +; CHECK-LABEL: Legalized selection DAG: %bb.0 'combine_subs:' +; CHECK-NEXT: SelectionDAG has 13 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t5: i32 = sub t2, t4 +; CHECK-NEXT: t14: i32,i32 = AArch64ISD::SUBS t5, Constant:i32<0> +; CHECK-NEXT: t16: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t14:1 +; CHECK-NEXT: t11: ch,glue = CopyToReg t0, Register:i32 $w0, t16 +; CHECK-NEXT: t12: ch = AArch64ISD::RET_GLUE t11, Register:i32 $w0, t11:1 + +; CHECK-LABEL: Optimized legalized selection DAG: %bb.0 'combine_subs:' +; CHECK-NEXT: SelectionDAG has 11 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t18: i32,i32 = AArch64ISD::SUBS t2, t4 +; CHECK-NEXT: t16: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t18:1 +; CHECK-NEXT: t11: ch,glue = CopyToReg t0, Register:i32 $w0, t16 +; CHECK-NEXT: t12: ch = AArch64ISD::RET_GLUE t11, Register:i32 $w0, t11:1 + +define i32 @combine_subs(i32 %a, i32 %b) { + %sub = sub i32 %a, %b + %cc = icmp ne i32 %sub, 0 + %sel = select i1 %cc, i32 %a, i32 %b + ret i32 %sel +} + +; CHECK-LABEL: Legalized selection DAG: %bb.0 'combine_subs_multiple_sub_uses:' +; CHECK-NEXT: SelectionDAG has 14 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t5: i32 = sub t2, t4 +; CHECK-NEXT: t15: i32,i32 = AArch64ISD::SUBS t5, Constant:i32<0> +; CHECK-NEXT: t17: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t15:1 +; CHECK-NEXT: t10: i32 = add t17, t5 +; CHECK-NEXT: t12: ch,glue = CopyToReg t0, Register:i32 $w0, t10 +; CHECK-NEXT: t13: ch = AArch64ISD::RET_GLUE t12, Register:i32 $w0, t12:1 + +; CHECK-LABEL: Optimized legalized selection DAG: %bb.0 'combine_subs_multiple_sub_uses:' +; CHECK-NEXT: SelectionDAG has 12 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t17: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t19:1 +; CHECK-NEXT: t10: i32 = add t17, t19 +; CHECK-NEXT: t12: ch,glue = CopyToReg t0, Register:i32 $w0, t10 +; CHECK-NEXT: t19: i32,i32 = AArch64ISD::SUBS t2, t4 +; CHECK-NEXT: t13: ch = AArch64ISD::RET_GLUE t12, Register:i32 $w0, t12:1 + +define i32 @combine_subs_multiple_sub_uses(i32 %a, i32 %b) { + %sub = sub i32 %a, %b + %cc = icmp ne i32 %sub, 0 + %sel = select i1 %cc, i32 %a, i32 %b + %add = add i32 %sel, %sub + ret i32 %add +} + +; CHECK-LABEL: Legalized selection DAG: %bb.0 'do_not_combine_subs_multiple_flag_uses:' +; CHECK-NEXT: SelectionDAG has 19 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t24: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t21:1 +; CHECK-NEXT: t6: i32,ch = CopyFromReg t0, Register:i32 %2 +; CHECK-NEXT: t8: i32,ch = CopyFromReg t0, Register:i32 %3 +; CHECK-NEXT: t23: i32 = AArch64ISD::CSEL t6, t8, Constant:i32<1>, t21:1 +; CHECK-NEXT: t15: i32 = add t24, t23 +; CHECK-NEXT: t17: ch,glue = CopyToReg t0, Register:i32 $w0, t15 +; CHECK-NEXT: t9: i32 = sub t2, t4 +; CHECK-NEXT: t21: i32,i32 = AArch64ISD::SUBS t9, Constant:i32<0> +; CHECK-NEXT: t18: ch = AArch64ISD::RET_GLUE t17, Register:i32 $w0, t17:1 + +; CHECK-LABEL: Optimized legalized selection DAG: %bb.0 'do_not_combine_subs_multiple_flag_uses:' +; CHECK-NEXT: SelectionDAG has 19 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t24: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t21:1 +; CHECK-NEXT: t6: i32,ch = CopyFromReg t0, Register:i32 %2 +; CHECK-NEXT: t8: i32,ch = CopyFromReg t0, Register:i32 %3 +; CHECK-NEXT: t23: i32 = AArch64ISD::CSEL t6, t8, Constant:i32<1>, t21:1 +; CHECK-NEXT: t15: i32 = add t24, t23 +; CHECK-NEXT: t17: ch,glue = CopyToReg t0, Register:i32 $w0, t15 +; CHECK-NEXT: t9: i32 = sub t2, t4 +; CHECK-NEXT: t21: i32,i32 = AArch64ISD::SUBS t9, Constant:i32<0> +; CHECK-NEXT: t18: ch = AArch64ISD::RET_GLUE t17, Register:i32 $w0, t17:1 + +define i32 @do_not_combine_subs_multiple_flag_uses(i32 %a, i32 %b, i32 %c, i32 %d) { + %sub = sub i32 %a, %b + %cc = icmp ne i32 %sub, 0 + %sel = select i1 %cc, i32 %a, i32 %b + %other = select i1 %cc, i32 %c, i32 %d + %add = add i32 %sel, %other + ret i32 %add +} diff --git a/llvm/test/CodeGen/AArch64/extend_inreg_of_concat_subvectors.ll b/llvm/test/CodeGen/AArch64/extend_inreg_of_concat_subvectors.ll index 1f1bfe6..6df8d2b 100644 --- a/llvm/test/CodeGen/AArch64/extend_inreg_of_concat_subvectors.ll +++ b/llvm/test/CodeGen/AArch64/extend_inreg_of_concat_subvectors.ll @@ -1,20 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=arm64-apple-ios -mattr=+sve -o - %s | FileCheck %s -; RUN: llc -mtriple=aarch64_be-unknown-linux -mattr=+sve -o - %s | FileCheck --check-prefix=CHECK-BE %s -; RUN: llc -mtriple=arm64-apple-ios -mattr=+global-isel -mattr=+sve -o - %s | FileCheck %s -; RUN: llc -mtriple=aarch64_be-unknown-linux -mattr=+global-isel -mattr=+sve -o - %s | FileCheck --check-prefix=CHECK-BE %s +; RUN: llc -mtriple=arm64-apple-ios -mattr=+sve -o - %s | FileCheck %s --check-prefix=CHECK-LE-SD +; RUN: llc -mtriple=aarch64_be-unknown-linux -mattr=+sve -o - %s | FileCheck %s --check-prefix=CHECK-BE +; RUN: llc -mtriple=arm64-apple-ios -global-isel -mattr=+sve -o - %s | FileCheck %s --check-prefix=CHECK-LE-GI define void @zext_of_concat(ptr %a, ptr %b, ptr %c, ptr %d) nounwind { -; CHECK-LABEL: zext_of_concat: -; CHECK: ; %bb.0: -; CHECK-NEXT: ldr d0, [x0] -; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: add.2s v0, v0, v1 -; CHECK-NEXT: ldr q1, [x2] -; CHECK-NEXT: ushll.2d v0, v0, #0 -; CHECK-NEXT: add.4s v0, v0, v1 -; CHECK-NEXT: str q0, [x2] -; CHECK-NEXT: ret +; CHECK-LE-SD-LABEL: zext_of_concat: +; CHECK-LE-SD: ; %bb.0: +; CHECK-LE-SD-NEXT: ldr d0, [x0] +; CHECK-LE-SD-NEXT: ldr d1, [x1] +; CHECK-LE-SD-NEXT: add.2s v0, v0, v1 +; CHECK-LE-SD-NEXT: ldr q1, [x2] +; CHECK-LE-SD-NEXT: ushll.2d v0, v0, #0 +; CHECK-LE-SD-NEXT: add.4s v0, v0, v1 +; CHECK-LE-SD-NEXT: str q0, [x2] +; CHECK-LE-SD-NEXT: ret ; ; CHECK-BE-LABEL: zext_of_concat: ; CHECK-BE: // %bb.0: @@ -28,6 +27,23 @@ define void @zext_of_concat(ptr %a, ptr %b, ptr %c, ptr %d) nounwind { ; CHECK-BE-NEXT: add v0.4s, v0.4s, v1.4s ; CHECK-BE-NEXT: st1 { v0.4s }, [x2] ; CHECK-BE-NEXT: ret +; +; CHECK-LE-GI-LABEL: zext_of_concat: +; CHECK-LE-GI: ; %bb.0: +; CHECK-LE-GI-NEXT: ldr d0, [x0] +; CHECK-LE-GI-NEXT: ldr d1, [x1] +; CHECK-LE-GI-NEXT: movi.2d v3, #0000000000000000 +; CHECK-LE-GI-NEXT: Lloh0: +; CHECK-LE-GI-NEXT: adrp x8, lCPI0_0@PAGE +; CHECK-LE-GI-NEXT: add.2s v2, v0, v1 +; CHECK-LE-GI-NEXT: Lloh1: +; CHECK-LE-GI-NEXT: ldr q0, [x8, lCPI0_0@PAGEOFF] +; CHECK-LE-GI-NEXT: ldr q1, [x2] +; CHECK-LE-GI-NEXT: tbl.16b v0, { v2, v3 }, v0 +; CHECK-LE-GI-NEXT: add.4s v0, v0, v1 +; CHECK-LE-GI-NEXT: str q0, [x2] +; CHECK-LE-GI-NEXT: ret +; CHECK-LE-GI-NEXT: .loh AdrpLdr Lloh0, Lloh1 %i0.a = load <2 x i32>, ptr %a %i0.b = load <2 x i32>, ptr %b %i0 = add <2 x i32> %i0.a, %i0.b @@ -40,19 +56,19 @@ define void @zext_of_concat(ptr %a, ptr %b, ptr %c, ptr %d) nounwind { } define void @zext_of_concat_extrause(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) nounwind { -; CHECK-LABEL: zext_of_concat_extrause: -; CHECK: ; %bb.0: -; CHECK-NEXT: ldr d0, [x1] -; CHECK-NEXT: ldr d1, [x0] -; CHECK-NEXT: add.2s v0, v1, v0 -; CHECK-NEXT: movi.2d v1, #0000000000000000 -; CHECK-NEXT: mov.d v0[1], v0[0] -; CHECK-NEXT: zip1.4s v1, v0, v1 -; CHECK-NEXT: str q0, [x4] -; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: add.4s v0, v1, v0 -; CHECK-NEXT: str q0, [x2] -; CHECK-NEXT: ret +; CHECK-LE-SD-LABEL: zext_of_concat_extrause: +; CHECK-LE-SD: ; %bb.0: +; CHECK-LE-SD-NEXT: ldr d0, [x1] +; CHECK-LE-SD-NEXT: ldr d1, [x0] +; CHECK-LE-SD-NEXT: add.2s v0, v1, v0 +; CHECK-LE-SD-NEXT: movi.2d v1, #0000000000000000 +; CHECK-LE-SD-NEXT: mov.d v0[1], v0[0] +; CHECK-LE-SD-NEXT: zip1.4s v1, v0, v1 +; CHECK-LE-SD-NEXT: str q0, [x4] +; CHECK-LE-SD-NEXT: ldr q0, [x2] +; CHECK-LE-SD-NEXT: add.4s v0, v1, v0 +; CHECK-LE-SD-NEXT: str q0, [x2] +; CHECK-LE-SD-NEXT: ret ; ; CHECK-BE-LABEL: zext_of_concat_extrause: ; CHECK-BE: // %bb.0: @@ -68,6 +84,25 @@ define void @zext_of_concat_extrause(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) nou ; CHECK-BE-NEXT: add v0.4s, v0.4s, v1.4s ; CHECK-BE-NEXT: st1 { v0.4s }, [x2] ; CHECK-BE-NEXT: ret +; +; CHECK-LE-GI-LABEL: zext_of_concat_extrause: +; CHECK-LE-GI: ; %bb.0: +; CHECK-LE-GI-NEXT: ldr d0, [x0] +; CHECK-LE-GI-NEXT: ldr d1, [x1] +; CHECK-LE-GI-NEXT: movi.2d v3, #0000000000000000 +; CHECK-LE-GI-NEXT: Lloh2: +; CHECK-LE-GI-NEXT: adrp x8, lCPI1_0@PAGE +; CHECK-LE-GI-NEXT: add.2s v2, v0, v1 +; CHECK-LE-GI-NEXT: Lloh3: +; CHECK-LE-GI-NEXT: ldr q0, [x8, lCPI1_0@PAGEOFF] +; CHECK-LE-GI-NEXT: mov.d v2[1], v2[0] +; CHECK-LE-GI-NEXT: tbl.16b v0, { v2, v3 }, v0 +; CHECK-LE-GI-NEXT: str q2, [x4] +; CHECK-LE-GI-NEXT: ldr q1, [x2] +; CHECK-LE-GI-NEXT: add.4s v0, v0, v1 +; CHECK-LE-GI-NEXT: str q0, [x2] +; CHECK-LE-GI-NEXT: ret +; CHECK-LE-GI-NEXT: .loh AdrpLdr Lloh2, Lloh3 %i0.a = load <2 x i32>, ptr %a %i0.b = load <2 x i32>, ptr %b %i0 = add <2 x i32> %i0.a, %i0.b @@ -81,16 +116,16 @@ define void @zext_of_concat_extrause(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) nou } define void @aext_of_concat(ptr %a, ptr %b, ptr %c, ptr %d) nounwind { -; CHECK-LABEL: aext_of_concat: -; CHECK: ; %bb.0: -; CHECK-NEXT: ldr d0, [x0] -; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: add.2s v0, v0, v1 -; CHECK-NEXT: ldr q1, [x2] -; CHECK-NEXT: ushll.2d v0, v0, #0 -; CHECK-NEXT: add.4s v0, v0, v1 -; CHECK-NEXT: str q0, [x2] -; CHECK-NEXT: ret +; CHECK-LE-SD-LABEL: aext_of_concat: +; CHECK-LE-SD: ; %bb.0: +; CHECK-LE-SD-NEXT: ldr d0, [x0] +; CHECK-LE-SD-NEXT: ldr d1, [x1] +; CHECK-LE-SD-NEXT: add.2s v0, v0, v1 +; CHECK-LE-SD-NEXT: ldr q1, [x2] +; CHECK-LE-SD-NEXT: ushll.2d v0, v0, #0 +; CHECK-LE-SD-NEXT: add.4s v0, v0, v1 +; CHECK-LE-SD-NEXT: str q0, [x2] +; CHECK-LE-SD-NEXT: ret ; ; CHECK-BE-LABEL: aext_of_concat: ; CHECK-BE: // %bb.0: @@ -102,6 +137,17 @@ define void @aext_of_concat(ptr %a, ptr %b, ptr %c, ptr %d) nounwind { ; CHECK-BE-NEXT: add v0.4s, v0.4s, v1.4s ; CHECK-BE-NEXT: st1 { v0.4s }, [x2] ; CHECK-BE-NEXT: ret +; +; CHECK-LE-GI-LABEL: aext_of_concat: +; CHECK-LE-GI: ; %bb.0: +; CHECK-LE-GI-NEXT: ldr d0, [x0] +; CHECK-LE-GI-NEXT: ldr d1, [x1] +; CHECK-LE-GI-NEXT: add.2s v0, v0, v1 +; CHECK-LE-GI-NEXT: ldr q1, [x2] +; CHECK-LE-GI-NEXT: zip1.4s v0, v0, v0 +; CHECK-LE-GI-NEXT: add.4s v0, v0, v1 +; CHECK-LE-GI-NEXT: str q0, [x2] +; CHECK-LE-GI-NEXT: ret %i0.a = load <2 x i32>, ptr %a %i0.b = load <2 x i32>, ptr %b %i0 = add <2 x i32> %i0.a, %i0.b @@ -114,19 +160,19 @@ define void @aext_of_concat(ptr %a, ptr %b, ptr %c, ptr %d) nounwind { } define void @aext_of_concat_extrause(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) nounwind { -; CHECK-LABEL: aext_of_concat_extrause: -; CHECK: ; %bb.0: -; CHECK-NEXT: ldr d0, [x1] -; CHECK-NEXT: ldr d1, [x0] -; CHECK-NEXT: add.2s v0, v1, v0 -; CHECK-NEXT: mov.16b v1, v0 -; CHECK-NEXT: mov.d v1[1], v0[0] -; CHECK-NEXT: zip1.4s v0, v0, v0 -; CHECK-NEXT: str q1, [x4] -; CHECK-NEXT: ldr q1, [x2] -; CHECK-NEXT: add.4s v0, v0, v1 -; CHECK-NEXT: str q0, [x2] -; CHECK-NEXT: ret +; CHECK-LE-SD-LABEL: aext_of_concat_extrause: +; CHECK-LE-SD: ; %bb.0: +; CHECK-LE-SD-NEXT: ldr d0, [x1] +; CHECK-LE-SD-NEXT: ldr d1, [x0] +; CHECK-LE-SD-NEXT: add.2s v0, v1, v0 +; CHECK-LE-SD-NEXT: mov.16b v1, v0 +; CHECK-LE-SD-NEXT: mov.d v1[1], v0[0] +; CHECK-LE-SD-NEXT: zip1.4s v0, v0, v0 +; CHECK-LE-SD-NEXT: str q1, [x4] +; CHECK-LE-SD-NEXT: ldr q1, [x2] +; CHECK-LE-SD-NEXT: add.4s v0, v0, v1 +; CHECK-LE-SD-NEXT: str q0, [x2] +; CHECK-LE-SD-NEXT: ret ; ; CHECK-BE-LABEL: aext_of_concat_extrause: ; CHECK-BE: // %bb.0: @@ -141,6 +187,19 @@ define void @aext_of_concat_extrause(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) nou ; CHECK-BE-NEXT: add v0.4s, v0.4s, v1.4s ; CHECK-BE-NEXT: st1 { v0.4s }, [x2] ; CHECK-BE-NEXT: ret +; +; CHECK-LE-GI-LABEL: aext_of_concat_extrause: +; CHECK-LE-GI: ; %bb.0: +; CHECK-LE-GI-NEXT: ldr d0, [x0] +; CHECK-LE-GI-NEXT: ldr d1, [x1] +; CHECK-LE-GI-NEXT: add.2s v0, v0, v1 +; CHECK-LE-GI-NEXT: mov.d v0[1], v0[0] +; CHECK-LE-GI-NEXT: zip1.4s v1, v0, v0 +; CHECK-LE-GI-NEXT: str q0, [x4] +; CHECK-LE-GI-NEXT: ldr q0, [x2] +; CHECK-LE-GI-NEXT: add.4s v0, v1, v0 +; CHECK-LE-GI-NEXT: str q0, [x2] +; CHECK-LE-GI-NEXT: ret %i0.a = load <2 x i32>, ptr %a %i0.b = load <2 x i32>, ptr %b %i0 = add <2 x i32> %i0.a, %i0.b diff --git a/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll b/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll index d1e0729..6a91d85 100644 --- a/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll +++ b/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll @@ -11,10 +11,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @cvtn_f16_tuple(i64 %stride, p ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z2.h, z10.h }, pn8/z, [x1] @@ -52,10 +52,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @cvtnt_f32_tuple(i64 %stride, ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: mov z1.d, z0.d diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir index aed3145..e970d83 100644 --- a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir +++ b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir @@ -9,16 +9,16 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 - ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG + ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill - ; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG + ; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK-NEXT: addvl sp, sp, #-1 - ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG + ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: // implicit-def: $z8 ; CHECK-NEXT: // implicit-def: $p4 ; CHECK-NEXT: addvl sp, sp, #1 - ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG + ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #2 diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir index 17b1ad2..03a6aab 100644 --- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir +++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir @@ -64,7 +64,7 @@ # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 32 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 32 @@ -79,7 +79,8 @@ # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 # ASM: .cfi_def_cfa_offset 32 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 16 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 16 * VG # ASM: .cfi_def_cfa wsp, 32 # ASM: .cfi_def_cfa_offset 16 # ASM: .cfi_def_cfa_offset 0 @@ -88,8 +89,8 @@ # # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_offset: +32 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_offset: +32 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_lit16, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +32 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO: DW_CFA_def_cfa_offset: +0 @@ -129,7 +130,7 @@ body: | # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 48 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # # CHECK-NEXT: $x20 = IMPLICIT_DEF @@ -152,7 +153,8 @@ body: | # ASM-NEXT: .cfi_offset w21, -16 # ASM-NEXT: .cfi_offset w29, -32 # ASM: .cfi_def_cfa_offset 48 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 16 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 48 + 16 * VG # # ASM: .cfi_def_cfa wsp, 48 # ASM: .cfi_def_cfa_offset 32 @@ -166,9 +168,8 @@ body: | # UNWINDINFO: DW_CFA_offset: reg20 -8 # UNWINDINFO-NEXT: DW_CFA_offset: reg21 -16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -32 -# UNWINDINFO: DW_CFA_def_cfa_offset: +48 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# +# UNWINDINFO: DW_CFA_def_cfa_offset: +48 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +48, DW_OP_bregx 0x2e +0, DW_OP_lit16, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +48 # UNWINDINFO: DW_CFA_def_cfa_offset: +32 # UNWINDINFO: DW_CFA_def_cfa_offset: +0 @@ -272,7 +273,7 @@ body: | # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 32 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 16 # CHECK-NEXT: STR_ZXI $z0, killed $[[TMP]], 2 @@ -295,7 +296,8 @@ body: | # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 # ASM: .cfi_def_cfa_offset 32 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 24 * VG # # ASM: .cfi_def_cfa wsp, 32 # ASM: .cfi_def_cfa_offset 16 @@ -305,7 +307,7 @@ body: | # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 # UNWINDINFO: DW_CFA_def_cfa_offset: +32 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus # # UNWINDINFO: DW_CFA_def_cfa: reg31 +32 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 @@ -434,7 +436,7 @@ body: | # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 32 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $[[TMP:x[0-9]+]] = ADDVL_XXI $sp, 1 # CHECK-NEXT: $x0 = LDRXui killed $[[TMP]], 4 @@ -451,7 +453,8 @@ body: | # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 # ASM: .cfi_def_cfa_offset 32 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 8 * VG # # ASM: .cfi_def_cfa wsp, 32 # ASM: .cfi_def_cfa_offset 16 @@ -461,7 +464,7 @@ body: | # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 # UNWINDINFO: DW_CFA_def_cfa_offset: +32 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus # # UNWINDINFO: DW_CFA_def_cfa: reg31 +32 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 @@ -504,23 +507,23 @@ body: | # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 # CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $[[TMP2:x[0-9]+]] = ADDVL_XXI $sp, 1 # CHECK-NEXT: STR_ZXI $z0, killed $[[TMP2]], 255 @@ -529,21 +532,21 @@ body: | # CHECK-NEXT: STR_PXI $p0, killed $[[TMP2]], 255 # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 9 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 # CHECK-NEXT: $sp, $[[SCRATCH]] = frame-destroy LDRXpost $sp, 16 @@ -554,48 +557,65 @@ body: | # ASM-LABEL: test_address_sve_out_of_range: # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 256 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 512 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 768 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1024 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1280 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1536 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1792 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2048 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2056 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 256 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 512 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 768 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1024 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1280 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1536 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1792 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 2048 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 2056 * VG # -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1808 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1560 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1312 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1064 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 816 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 568 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 320 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1808 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1560 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1312 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1064 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 816 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 568 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 320 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 72 * VG # ASM: .cfi_def_cfa wsp, 16 # ASM: .cfi_def_cfa_offset 0 # ASM-NEXT: .cfi_restore w29 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +256, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +512, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +768, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1024, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1280, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1536, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1792, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +2048, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +2056, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +256, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +512, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +768, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1024, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1280, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1536, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1792, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +2048, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +2056, DW_OP_mul, DW_OP_plus # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1808, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1560, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1312, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1064, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +816, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +568, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +320, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +72, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1808, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1560, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1312, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1064, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +816, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +568, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +320, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +72, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +16 # UNWINDINFO: DW_CFA_def_cfa_offset: +0 # UNWINDINFO-NEXT: DW_CFA_restore: reg29 @@ -702,15 +722,15 @@ body: | # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 # CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 # CHECK: $sp = frame-setup ADDVL_XXI $sp, -1 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: frame-setup STR_PXI killed $p6, $sp, 5 # CHECK: frame-setup STR_PXI killed $p5, $sp, 6 # CHECK: frame-setup STR_PXI killed $p4, $sp, 7 # CHECK: $sp = frame-setup SUBXri $sp, 32, 0 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK: $p6 = frame-destroy LDR_PXI $sp, 5 # CHECK: $p5 = frame-destroy LDR_PXI $sp, 6 # CHECK: $p4 = frame-destroy LDR_PXI $sp, 7 @@ -725,20 +745,23 @@ body: | # ASM-LABEL: save_restore_pregs_sve: # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 8 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 8 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 48 + 8 * VG # -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 8 * VG # ASM: .cfi_def_cfa wsp, 16 # ASM: .cfi_def_cfa_offset 0 # ASM-NEXT: .cfi_restore w29 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +48, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +16 # UNWINDINFO: DW_CFA_def_cfa_offset: +0 # UNWINDINFO-NEXT: DW_CFA_restore: reg29 @@ -761,18 +784,18 @@ body: | # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 # CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: frame-setup STR_ZXI killed $z10, $sp, 0 # CHECK-NEXT: frame-setup STR_ZXI killed $z9, $sp, 1 # CHECK-NEXT: frame-setup STR_ZXI killed $z8, $sp, 2 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 32, 0 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0 # CHECK-NEXT: $z9 = frame-destroy LDR_ZXI $sp, 1 # CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2 @@ -789,13 +812,19 @@ body: | # ASM-LABEL: save_restore_zregs_sve: # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG -# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // $d8 @ cfa - 8 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d9 @ cfa - 16 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d10 @ cfa - 24 * VG - 16 +# ASM: .cfi_escape +# ASM-SAME: // sp + 48 + 24 * VG # -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 24 * VG # ASM: .cfi_def_cfa wsp, 16 # ASM-NEXT: .cfi_restore z8 # ASM-NEXT: .cfi_restore z9 @@ -805,13 +834,13 @@ body: | # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +48, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +16 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105 @@ -848,7 +877,7 @@ body: | # CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -32 # CHECK: $sp = frame-setup ADDVL_XXI $sp, -18 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: frame-setup STR_PXI killed $p15, $sp, 4 # CHECK: frame-setup STR_PXI killed $p14, $sp, 5 # CHECK: frame-setup STR_PXI killed $p5, $sp, 14 @@ -857,23 +886,23 @@ body: | # CHECK: frame-setup STR_ZXI killed $z22, $sp, 3 # CHECK: frame-setup STR_ZXI killed $z9, $sp, 16 # CHECK: frame-setup STR_ZXI killed $z8, $sp, 17 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-setup SUBXri $sp, 32, 0 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-setup ADDVL_XXI $sp, -1 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 1 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK: $z23 = frame-destroy LDR_ZXI $sp, 2 # CHECK: $z22 = frame-destroy LDR_ZXI $sp, 3 # CHECK: $z9 = frame-destroy LDR_ZXI $sp, 16 @@ -909,20 +938,33 @@ body: | # ASM-NEXT: .cfi_offset w20, -16 # ASM-NEXT: .cfi_offset w21, -24 # ASM-NEXT: .cfi_offset w29, -32 -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG -# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 32 - 32 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 32 - 40 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG -# ASM: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 144 * VG -# ASM: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 152 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 144 * VG +# ASM: .cfi_escape +# ASM-SAME: // $d8 @ cfa - 8 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d9 @ cfa - 16 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d10 @ cfa - 24 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d11 @ cfa - 32 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d12 @ cfa - 40 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d13 @ cfa - 48 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d14 @ cfa - 56 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d15 @ cfa - 64 * VG - 32 +# ASM: .cfi_escape +# ASM-SAME: // sp + 64 + 144 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 64 + 152 * VG # -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 152 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 152 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 144 * VG # ASM: .cfi_def_cfa wsp, 32 # ASM-NEXT: .cfi_restore z8 # ASM-NEXT: .cfi_restore z9 @@ -943,20 +985,20 @@ body: | # UNWINDINFO-NEXT: DW_CFA_offset: reg20 -16 # UNWINDINFO-NEXT: DW_CFA_offset: reg21 -24 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -32 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +144, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -32, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -40, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -48, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -56, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -64, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +64, DW_OP_plus, DW_OP_consts +144, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +64, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_consts +144, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_bregx 0x2e +0, DW_OP_consts -32, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_bregx 0x2e +0, DW_OP_consts -40, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_bregx 0x2e +0, DW_OP_consts -48, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_bregx 0x2e +0, DW_OP_consts -56, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_bregx 0x2e +0, DW_OP_consts -64, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +64, DW_OP_bregx 0x2e +0, DW_OP_consts +144, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +64, DW_OP_bregx 0x2e +0, DW_OP_consts +152, DW_OP_mul, DW_OP_plus # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +144, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_consts +152, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_consts +144, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +32 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105 @@ -1025,14 +1067,14 @@ body: | # CHECK-NEXT: STR_ZXI killed $z22, $sp, 3 # CHECK: STR_ZXI killed $z9, $sp, 16 # CHECK-NEXT: STR_ZXI killed $z8, $sp, 17 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $[[TMP:x[0-9]+]] = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: $[[TMP]] = frame-setup ADDVL_XXI $[[TMP]], -1 # CHECK-NEXT: $sp = frame-setup ANDXri killed $[[TMP]] @@ -1067,14 +1109,22 @@ body: | # ASM: .cfi_def_cfa w29, 16 # ASM-NEXT: .cfi_offset w30, -8 # ASM-NEXT: .cfi_offset w29, -16 -# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +# ASM: .cfi_escape +# ASM-SAME: // $d8 @ cfa - 8 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d9 @ cfa - 16 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d10 @ cfa - 24 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d11 @ cfa - 32 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d12 @ cfa - 40 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d13 @ cfa - 48 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d14 @ cfa - 56 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d15 @ cfa - 64 * VG - 16 # # ASM: .cfi_restore z8 # ASM-NEXT: .cfi_restore z9 @@ -1093,14 +1143,14 @@ body: | # UNWINDINFO: DW_CFA_def_cfa: reg29 +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -32, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -40, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -48, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -56, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -64, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_bregx 0x2e +0, DW_OP_consts -32, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_bregx 0x2e +0, DW_OP_consts -40, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_bregx 0x2e +0, DW_OP_consts -48, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_bregx 0x2e +0, DW_OP_consts -56, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_bregx 0x2e +0, DW_OP_consts -64, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus # # UNWINDINFO: DW_CFA_restore_extended: reg104 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105 @@ -1188,17 +1238,17 @@ body: | # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 # CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: STR_PXI killed $p15, $sp, 6 # CHECK-NEXT: STR_PXI killed $p4, $sp, 7 # CHECK-NEXT: STR_ZXI killed $z23, $sp, 1 # CHECK-NEXT: STR_ZXI killed $z8, $sp, 2 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -7 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 7 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 1 # CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2 # CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 6 @@ -1214,11 +1264,15 @@ body: | # ASM-LABEL: frame_layout: # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG -# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 80 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // $d8 @ cfa - 8 * VG - 16 +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 80 * VG # -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 24 * VG # ASM: .cfi_def_cfa wsp, 16 # ASM-NEXT: .cfi_restore z8 # ASM: .cfi_def_cfa_offset 0 @@ -1226,11 +1280,11 @@ body: | # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +80, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +80, DW_OP_mul, DW_OP_plus # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +16 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104 # UNWINDINFO: DW_CFA_def_cfa_offset: +0 diff --git a/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll b/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll index 374def5..0f208f8 100644 --- a/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll +++ b/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll @@ -1,6 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 -; RUN: llc -aarch64-min-jump-table-entries=4 -mtriple=arm64-apple-ios -enable-subreg-liveness=false < %s | sed -e "/; kill: /d" | FileCheck %s -; RUN: llc -aarch64-min-jump-table-entries=4 -mtriple=arm64-apple-ios -enable-subreg-liveness=true < %s | FileCheck %s +; RUN: llc -aarch64-min-jump-table-entries=4 -mtriple=arm64-apple-ios < %s | FileCheck %s ; Check there's no assert in spilling from implicit-def operands on an ; IMPLICIT_DEF. @@ -93,6 +92,7 @@ define void @widget(i32 %arg, i32 %arg1, ptr %arg2, ptr %arg3, ptr %arg4, i32 %a ; CHECK-NEXT: ldr x8, [sp, #40] ; 8-byte Folded Reload ; CHECK-NEXT: mov x0, xzr ; CHECK-NEXT: mov x1, xzr +; CHECK-NEXT: ; kill: def $w8 killed $w8 killed $x8 def $x8 ; CHECK-NEXT: str x8, [sp] ; CHECK-NEXT: bl _fprintf ; CHECK-NEXT: brk #0x1 diff --git a/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll b/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll index 2cf8621..474a9d1 100644 --- a/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll +++ b/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll @@ -36,7 +36,7 @@ define <vscale x 16 x i1> @match_nxv16i8_v4i8(<vscale x 16 x i8> %op1, <4 x i8> ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 ; CHECK-NEXT: umov w8, v1.h[1] @@ -241,7 +241,7 @@ define <vscale x 16 x i1> @match_nxv16i8_v32i8(<vscale x 16 x i8> %op1, <32 x i8 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 ; CHECK-NEXT: mov z3.b, z1.b[1] @@ -463,7 +463,7 @@ define <vscale x 4 x i1> @match_nxv4xi32_v4i32(<vscale x 4 x i32> %op1, <4 x i32 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 ; CHECK-NEXT: mov z2.s, z1.s[1] diff --git a/llvm/test/CodeGen/AArch64/lifetime-poison.ll b/llvm/test/CodeGen/AArch64/lifetime-poison.ll new file mode 100644 index 0000000..e04530d --- /dev/null +++ b/llvm/test/CodeGen/AArch64/lifetime-poison.ll @@ -0,0 +1,14 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=aarch64 -global-isel=0 < %s | FileCheck %s +; RUN: llc -mtriple=aarch64 -global-isel=1 < %s | FileCheck %s + +; Check that lifetime.start/end with poison argument are ignored. + +define void @test() { +; CHECK-LABEL: test: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + call void @llvm.lifetime.start.p0(i64 4, ptr poison) + call void @llvm.lifetime.end.p0(i64 4, ptr poison) + ret void +} diff --git a/llvm/test/CodeGen/AArch64/luti-with-sme2.ll b/llvm/test/CodeGen/AArch64/luti-with-sme2.ll index 2d30167..59e1cba 100644 --- a/llvm/test/CodeGen/AArch64/luti-with-sme2.ll +++ b/llvm/test/CodeGen/AArch64/luti-with-sme2.ll @@ -9,10 +9,10 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16> } @test_luti4_lane_i16_x2_tuple( ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] @@ -50,10 +50,10 @@ define { <vscale x 8 x half>, <vscale x 8 x half> } @test_luti4_lane_f16_x2_tupl ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] @@ -91,10 +91,10 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @test_luti4_lane_bf16_x2 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] diff --git a/llvm/test/CodeGen/AArch64/midpoint-int.ll b/llvm/test/CodeGen/AArch64/midpoint-int.ll index 15c1dff..79bba53 100644 --- a/llvm/test/CodeGen/AArch64/midpoint-int.ll +++ b/llvm/test/CodeGen/AArch64/midpoint-int.ll @@ -255,12 +255,11 @@ define i64 @scalar_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { define i16 @scalar_i16_signed_reg_reg(i16 %a1, i16 %a2) nounwind { ; CHECK-LABEL: scalar_i16_signed_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sxth w9, w1 -; CHECK-NEXT: sxth w10, w0 +; CHECK-NEXT: sxth w9, w0 ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w9, w9, w1, sxth ; CHECK-NEXT: cneg w8, w8, le +; CHECK-NEXT: cneg w9, w9, mi ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: madd w0, w9, w8, w0 ; CHECK-NEXT: ret @@ -278,12 +277,11 @@ define i16 @scalar_i16_signed_reg_reg(i16 %a1, i16 %a2) nounwind { define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind { ; CHECK-LABEL: scalar_i16_unsigned_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: and w9, w1, #0xffff -; CHECK-NEXT: and w10, w0, #0xffff +; CHECK-NEXT: and w9, w0, #0xffff ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w9, w9, w1, uxth ; CHECK-NEXT: cneg w8, w8, ls +; CHECK-NEXT: cneg w9, w9, mi ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: madd w0, w9, w8, w0 ; CHECK-NEXT: ret @@ -303,14 +301,13 @@ define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind { define i16 @scalar_i16_signed_mem_reg(ptr %a1_addr, i16 %a2) nounwind { ; CHECK-LABEL: scalar_i16_signed_mem_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sxth w9, w1 -; CHECK-NEXT: ldrsh w10, [x0] +; CHECK-NEXT: ldrsh w9, [x0] ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w10, w9, w1, sxth ; CHECK-NEXT: cneg w8, w8, le -; CHECK-NEXT: lsr w9, w9, #1 -; CHECK-NEXT: madd w0, w9, w8, w10 +; CHECK-NEXT: cneg w10, w10, mi +; CHECK-NEXT: lsr w10, w10, #1 +; CHECK-NEXT: madd w0, w10, w8, w9 ; CHECK-NEXT: ret %a1 = load i16, ptr %a1_addr %t3 = icmp sgt i16 %a1, %a2 ; signed @@ -382,12 +379,11 @@ define i16 @scalar_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { define i8 @scalar_i8_signed_reg_reg(i8 %a1, i8 %a2) nounwind { ; CHECK-LABEL: scalar_i8_signed_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sxtb w9, w1 -; CHECK-NEXT: sxtb w10, w0 +; CHECK-NEXT: sxtb w9, w0 ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w9, w9, w1, sxtb ; CHECK-NEXT: cneg w8, w8, le +; CHECK-NEXT: cneg w9, w9, mi ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: madd w0, w9, w8, w0 ; CHECK-NEXT: ret @@ -405,12 +401,11 @@ define i8 @scalar_i8_signed_reg_reg(i8 %a1, i8 %a2) nounwind { define i8 @scalar_i8_unsigned_reg_reg(i8 %a1, i8 %a2) nounwind { ; CHECK-LABEL: scalar_i8_unsigned_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: and w9, w1, #0xff -; CHECK-NEXT: and w10, w0, #0xff +; CHECK-NEXT: and w9, w0, #0xff ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w9, w9, w1, uxtb ; CHECK-NEXT: cneg w8, w8, ls +; CHECK-NEXT: cneg w9, w9, mi ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: madd w0, w9, w8, w0 ; CHECK-NEXT: ret @@ -430,14 +425,13 @@ define i8 @scalar_i8_unsigned_reg_reg(i8 %a1, i8 %a2) nounwind { define i8 @scalar_i8_signed_mem_reg(ptr %a1_addr, i8 %a2) nounwind { ; CHECK-LABEL: scalar_i8_signed_mem_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sxtb w9, w1 -; CHECK-NEXT: ldrsb w10, [x0] +; CHECK-NEXT: ldrsb w9, [x0] ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w10, w9, w1, sxtb ; CHECK-NEXT: cneg w8, w8, le -; CHECK-NEXT: lsr w9, w9, #1 -; CHECK-NEXT: madd w0, w9, w8, w10 +; CHECK-NEXT: cneg w10, w10, mi +; CHECK-NEXT: lsr w10, w10, #1 +; CHECK-NEXT: madd w0, w10, w8, w9 ; CHECK-NEXT: ret %a1 = load i8, ptr %a1_addr %t3 = icmp sgt i8 %a1, %a2 ; signed diff --git a/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll b/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll index 7b55c69..1ceb25b 100644 --- a/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll +++ b/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll @@ -13,10 +13,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @tbl2_b_tuple(i64 %stride, ptr ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: ld1b { z3.b, z11.b }, pn8/z, [x1] ; CHECK-NEXT: ld1b { z4.b, z12.b }, pn8/z, [x1, x0] @@ -53,10 +53,10 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16> } @tbl2_h_tuple(i64 %stride, ptr ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] @@ -94,10 +94,10 @@ define { <vscale x 4 x i32>, <vscale x 4 x i32> } @tbl2_s_tuple(i64 %stride, ptr ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1w { z3.s, z11.s }, pn8/z, [x1] @@ -135,10 +135,10 @@ define { <vscale x 2 x i64>, <vscale x 2 x i64> } @tbl2_d_tuple(i64 %stride, ptr ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1d { z3.d, z11.d }, pn8/z, [x1] @@ -176,10 +176,10 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @tbl2_bf16_tuple(i64 %st ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] @@ -217,10 +217,10 @@ define { <vscale x 4 x float>, <vscale x 4 x float> } @tbl2_f32_tuple(i64 %strid ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1w { z3.s, z11.s }, pn8/z, [x1] @@ -258,10 +258,10 @@ define { <vscale x 2 x double>, <vscale x 2 x double> } @tbl2_f64_tuple(i64 %str ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1d { z3.d, z11.d }, pn8/z, [x1] diff --git a/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll b/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll index 4206c0bc..2a77d4d 100644 --- a/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll +++ b/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll @@ -27,12 +27,11 @@ define i32 @caller() nounwind ssp { ; CHECK-NEXT: sub sp, sp, #208 ; CHECK-NEXT: mov w8, #10 ; =0xa ; CHECK-NEXT: mov w9, #9 ; =0x9 -; CHECK-NEXT: mov w0, #1 ; =0x1 +; CHECK-NEXT: mov w10, #8 ; =0x8 ; CHECK-NEXT: stp x9, x8, [sp, #24] -; CHECK-NEXT: mov w8, #8 ; =0x8 -; CHECK-NEXT: mov w9, #6 ; =0x6 -; CHECK-NEXT: str x8, [sp, #16] ; CHECK-NEXT: mov w8, #7 ; =0x7 +; CHECK-NEXT: mov w9, #6 ; =0x6 +; CHECK-NEXT: mov w0, #1 ; =0x1 ; CHECK-NEXT: mov w1, #2 ; =0x2 ; CHECK-NEXT: mov w2, #3 ; =0x3 ; CHECK-NEXT: mov w3, #4 ; =0x4 @@ -47,7 +46,8 @@ define i32 @caller() nounwind ssp { ; CHECK-NEXT: stp x22, x21, [sp, #160] ; 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #176] ; 16-byte Folded Spill ; CHECK-NEXT: stp x29, x30, [sp, #192] ; 16-byte Folded Spill -; CHECK-NEXT: stp x9, x8, [sp] +; CHECK-NEXT: stp x8, x10, [sp, #8] +; CHECK-NEXT: str x9, [sp] ; CHECK-NEXT: bl _callee ; CHECK-NEXT: ldp x29, x30, [sp, #192] ; 16-byte Folded Reload ; CHECK-NEXT: ldp x20, x19, [sp, #176] ; 16-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/register-coalesce-implicit-def-subreg-to-reg.mir b/llvm/test/CodeGen/AArch64/register-coalesce-implicit-def-subreg-to-reg.mir deleted file mode 100644 index aecb90a..0000000 --- a/llvm/test/CodeGen/AArch64/register-coalesce-implicit-def-subreg-to-reg.mir +++ /dev/null @@ -1,23 +0,0 @@ -# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 -# RUN: llc -mtriple=aarch64 -start-before=register-coalescer -stop-after=virtregrewriter -enable-subreg-liveness=false -o - %s | FileCheck %s -# RUN: llc -mtriple=aarch64 -start-before=register-coalescer -stop-after=virtregrewriter -enable-subreg-liveness=true -o - %s | FileCheck %s ---- -name: test -tracksRegLiveness: true -body: | - bb.0: - liveins: $x1 - ; CHECK-LABEL: name: test - ; CHECK: liveins: $x1 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $x0 = COPY $x1 - ; CHECK-NEXT: renamable $w1 = ORRWrr $wzr, renamable $w0, implicit-def renamable $x1 - ; CHECK-NEXT: RET_ReallyLR implicit $x1, implicit $x0 - %190:gpr64 = COPY killed $x1 - %191:gpr32 = COPY %190.sub_32:gpr64 - %192:gpr32 = ORRWrr $wzr, killed %191:gpr32 - %193:gpr64all = SUBREG_TO_REG 0, killed %192:gpr32, %subreg.sub_32 - $x0 = COPY killed %190:gpr64 - $x1 = COPY killed %193:gpr64all - RET_ReallyLR implicit $x1, implicit $x0 -... diff --git a/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir b/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir index eb6242c..08fc47d 100644 --- a/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir +++ b/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir @@ -7,18 +7,9 @@ # CHECK-DBG: ********** JOINING INTERVALS *********** # CHECK-DBG: ********** INTERVALS ********** # CHECK-DBG: %0 [16r,32r:0) 0@16r weight:0.000000e+00 -# CHECK-DBG: %3 [48r,112r:0) 0@48r L0000000000000080 [48r,112r:0) 0@48r L0000000000000040 [48r,112r:0) 0@48r weight:0.000000e+00 -# CHECK-DBG: %4 [80r,112e:1)[112e,112d:0) 0@112e 1@80r L0000000000000080 [80r,112e:1)[112e,112d:0) 0@112e 1@80r L0000000000000040 [80r,112e:1)[112e,112d:0) 0@112e 1@80r weight:0.000000e+00 +# CHECK-DBG: %3 [48r,112r:0) 0@48r L0000000000000040 [48r,112r:0) 0@48r weight:0.000000e+00 +# CHECK-DBG: %4 [80r,112e:1)[112e,112d:0) 0@112e 1@80r L0000000000000080 [112e,112d:0) 0@112e L0000000000000040 [80r,112e:1)[112e,112d:0) 0@112e 1@80r weight:0.000000e+00 # CHECK-DBG: %5 [32r,112r:1)[112r,112d:0) 0@112r 1@32r weight:0.000000e+00 -# CHECK-DBG: ********** MACHINEINSTRS ********** -# CHECK-DBG: 0B bb.0.entry: -# CHECK-DBG: 16B %0:gpr64sp = ADDXri %stack.0, 0, 0 -# CHECK-DBG: 32B %5:gpr64common = nuw ADDXri %0:gpr64sp, 64, 0 -# CHECK-DBG: 48B undef %3.sub_32:gpr64 = MOVi32imm 64, implicit-def %3:gpr64 -# CHECK-DBG: 80B undef %4.sub_32:gpr64 = MOVi32imm 64, implicit-def %4:gpr64 -# CHECK-DBG: 112B dead %5:gpr64common, dead early-clobber %4:gpr64 = MOPSMemorySetPseudo %5:gpr64common(tied-def 0), %4:gpr64(tied-def 1), %3:gpr64, implicit-def dead $nzcv -# CHECK-DBG: 128B RET_ReallyLR - --- name: test tracksRegLiveness: true @@ -52,44 +43,9 @@ body: | # CHECK-DBG: %1 [32r,48B:2)[48B,320r:0)[320r,368B:1) 0@48B-phi 1@320r 2@32r # CHECK-DBG-SAME: weight:0.000000e+00 # CHECK-DBG: %3 [80r,160B:2)[240r,272B:1)[288r,304B:0)[304B,320r:3) 0@288r 1@240r 2@80r 3@304B-phi -# CHECK-DBG-SAME: L0000000000000080 [240r,272B:1)[288r,304B:0)[304B,320r:3) 0@288r 1@240r 2@x 3@304B-phi +# CHECK-DBG-SAME: L0000000000000080 [288r,304B:0)[304B,320r:3) 0@288r 1@x 2@x 3@304B-phi # CHECK-DBG-SAME: L0000000000000040 [80r,160B:2)[240r,272B:1)[288r,304B:0)[304B,320r:3) 0@288r 1@240r 2@80r 3@304B-phi # CHECK-DBG-SAME: weight:0.000000e+00 -# CHECK-DBG: ********** MACHINEINSTRS ********** -# CHECK-DBG: 0B bb.0: -# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%) -# CHECK-DBG: 32B %1:gpr64 = IMPLICIT_DEF -# CHECK-DBG: 48B bb.1: -# CHECK-DBG: ; predecessors: %bb.0, %bb.7 -# CHECK-DBG: successors: %bb.2(0x80000000); %bb.2(100.00%) -# CHECK-DBG: 64B bb.2: -# CHECK-DBG: ; predecessors: %bb.1 -# CHECK-DBG: successors: %bb.3(0x80000000); %bb.3(100.00%) -# CHECK-DBG: 80B undef %3.sub_32:gpr64 = MOVi32imm 1 -# CHECK-DBG: 96B bb.3: -# CHECK-DBG: ; predecessors: %bb.2 -# CHECK-DBG: successors: %bb.7(0x40000000), %bb.4(0x40000000); %bb.7(50.00%), %bb.4(50.00%) -# CHECK-DBG: 112B $nzcv = IMPLICIT_DEF -# CHECK-DBG: 144B Bcc 1, %bb.7, implicit killed $nzcv -# CHECK-DBG: 160B bb.4: -# CHECK-DBG: ; predecessors: %bb.3 -# CHECK-DBG: successors: %bb.6(0x40000000), %bb.5(0x40000000); %bb.6(50.00%), %bb.5(50.00%) -# CHECK-DBG: 176B $nzcv = IMPLICIT_DEF -# CHECK-DBG: 192B Bcc 1, %bb.6, implicit killed $nzcv -# CHECK-DBG: 208B bb.5: -# CHECK-DBG: ; predecessors: %bb.4 -# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%) -# CHECK-DBG: 240B undef %3.sub_32:gpr64 = MOVi32imm 1, implicit-def %3:gpr64 -# CHECK-DBG: 256B B %bb.7 -# CHECK-DBG: 272B bb.6: -# CHECK-DBG: ; predecessors: %bb.4 -# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%) -# CHECK-DBG: 288B %3:gpr64 = COPY $xzr -# CHECK-DBG: 304B bb.7: -# CHECK-DBG: ; predecessors: %bb.3, %bb.5, %bb.6 -# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%) -# CHECK-DBG: 320B %1:gpr64 = ADDXrs %1:gpr64, %3:gpr64, 1 -# CHECK-DBG: 352B B %bb.1 --- name: reproducer tracksRegLiveness: true @@ -136,42 +92,6 @@ body: | # CHECK-DBG-SAME: L0000000000000080 [224r,256B:1)[272r,288B:0)[288B,304r:3) 0@272r 1@224r 2@x 3@288B-phi # CHECK-DBG-SAME: L0000000000000040 [80r,160B:2)[224r,256B:1)[272r,288B:0)[288B,304r:3) 0@272r 1@224r 2@80r 3@288B-phi # CHECK-DBG-SAME: weight:0.000000e+00 -# CHECK-DBG: ********** MACHINEINSTRS ********** -# CHECK-DBG: 0B bb.0: -# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%) -# CHECK-DBG: 32B %1:gpr64 = IMPLICIT_DEF -# CHECK-DBG: 48B bb.1: -# CHECK-DBG: ; predecessors: %bb.0, %bb.7 -# CHECK-DBG: successors: %bb.2(0x80000000); %bb.2(100.00%) -# CHECK-DBG: 64B bb.2: -# CHECK-DBG: ; predecessors: %bb.1 -# CHECK-DBG: successors: %bb.3(0x80000000); %bb.3(100.00%) -# CHECK-DBG: 80B undef %3.sub_32:gpr64 = MOVi32imm 1 -# CHECK-DBG: 96B bb.3: -# CHECK-DBG: ; predecessors: %bb.2 -# CHECK-DBG: successors: %bb.7(0x40000000), %bb.4(0x40000000); %bb.7(50.00%), %bb.4(50.00%) -# CHECK-DBG: 112B $nzcv = IMPLICIT_DEF -# CHECK-DBG: 144B Bcc 1, %bb.7, implicit killed $nzcv -# CHECK-DBG: 160B bb.4: -# CHECK-DBG: ; predecessors: %bb.3 -# CHECK-DBG: successors: %bb.6(0x40000000), %bb.5(0x40000000); %bb.6(50.00%), %bb.5(50.00%) -# CHECK-DBG: 176B $nzcv = IMPLICIT_DEF -# CHECK-DBG: 192B Bcc 1, %bb.6, implicit killed $nzcv -# CHECK-DBG: 208B bb.5: -# CHECK-DBG: ; predecessors: %bb.4 -# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%) -# CHECK-DBG: 224B %3:gpr64 = IMPLICIT_DEF -# CHECK-DBG: 240B B %bb.7 -# CHECK-DBG: 256B bb.6: -# CHECK-DBG: ; predecessors: %bb.4 -# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%) -# CHECK-DBG: 272B %3:gpr64 = COPY $xzr -# CHECK-DBG: 288B bb.7: -# CHECK-DBG: ; predecessors: %bb.3, %bb.5, %bb.6 -# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%) -# CHECK-DBG: 304B %1:gpr64 = ADDXrs %1:gpr64, %3:gpr64, 1 -# CHECK-DBG: 336B B %bb.1 - --- name: reproducer2 tracksRegLiveness: true @@ -207,78 +127,3 @@ body: | B %bb.1 ... -# CHECK-DBG: ********** REGISTER COALESCER ********** -# CHECK-DBG: ********** Function: reproducer3 -# CHECK-DBG: ********** JOINING INTERVALS *********** -# CHECK-DBG: ********** INTERVALS ********** -# CHECK-DBG: W0 [0B,32r:0)[320r,336r:1) 0@0B-phi 1@320r -# CHECK-DBG: W1 [0B,16r:0) 0@0B-phi -# CHECK-DBG: %0 [16r,64r:0) 0@16r weight:0.000000e+00 -# CHECK-DBG: %1 [32r,128r:0) 0@32r weight:0.000000e+00 -# CHECK-DBG: %2 [48r,64r:0) 0@48r weight:0.000000e+00 -# CHECK-DBG: %3 [64r,80r:0) 0@64r weight:0.000000e+00 -# CHECK-DBG: %4 [80r,176r:0) 0@80r weight:0.000000e+00 -# CHECK-DBG: %7 [112r,128r:1)[128r,256r:0)[304B,320r:0) 0@128r 1@112r -# CHECK-DBG-SAME: L0000000000000080 [128r,256r:0)[304B,320r:0) 0@128r -# CHECK-DBG-SAME: L0000000000000040 [112r,128r:1)[128r,256r:0)[304B,320r:0) 0@128r 1@112r -# CHECK-DBG-SAME: weight:0.000000e+00 -# CHECK-DBG: %8 [96r,176r:1)[176r,192r:0) 0@176r 1@96r weight:0.000000e+00 -# CHECK-DBG: %9 [256r,272r:0) 0@256r weight:0.000000e+00 -# CHECK-DBG: ********** MACHINEINSTRS ********** -# CHECK-DBG: 0B bb.0: -# CHECK-DBG: successors: %bb.2(0x40000000), %bb.1(0x40000000); %bb.2(50.00%), %bb.1(50.00%) -# CHECK-DBG: liveins: $w0, $w1 -# CHECK-DBG: 16B %0:gpr32 = COPY $w1 -# CHECK-DBG: 32B %1:gpr32 = COPY $w0 -# CHECK-DBG: 48B %2:gpr32 = UBFMWri %1:gpr32, 31, 30 -# CHECK-DBG: 64B %3:gpr32 = SUBWrs %2:gpr32, %0:gpr32, 1 -# CHECK-DBG: 80B %4:gpr32 = UBFMWri %3:gpr32, 1, 31 -# CHECK-DBG: 96B %8:gpr32common = MOVi32imm 1 -# CHECK-DBG: 112B undef %7.sub_32:gpr64 = MOVi32imm 1 -# CHECK-DBG: 128B undef %7.sub_32:gpr64 = BFMWri %7.sub_32:gpr64(tied-def 0), %1:gpr32, 31, 30, implicit-def %7:gpr64 -# CHECK-DBG: 176B %8:gpr32common = BFMWri %8:gpr32common(tied-def 0), %4:gpr32, 30, 29 -# CHECK-DBG: 192B dead $wzr = SUBSWri %8:gpr32common, 0, 0, implicit-def $nzcv -# CHECK-DBG: 208B Bcc 2, %bb.2, implicit killed $nzcv -# CHECK-DBG: 224B B %bb.1 -# CHECK-DBG: 240B bb.1: -# CHECK-DBG: ; predecessors: %bb.0 -# CHECK-DBG: 256B %9:gpr64common = UBFMXri %7:gpr64, 62, 61 -# CHECK-DBG: 272B dead $xzr = LDRXui %9:gpr64common, 0 -# CHECK-DBG: 288B RET_ReallyLR -# CHECK-DBG: 304B bb.2: -# CHECK-DBG: ; predecessors: %bb.0 -# CHECK-DBG: 320B $x0 = COPY %7:gpr64 -# CHECK-DBG: 336B RET_ReallyLR implicit $x0 - ---- -name: reproducer3 -tracksRegLiveness: true -body: | - bb.0: - liveins: $w0, $w1 - - %0:gpr32 = COPY killed $w1 - %1:gpr32 = COPY killed $w0 - %3:gpr32 = UBFMWri %1, 31, 30 - %4:gpr32 = SUBWrs killed %3, killed %0, 1 - %5:gpr32 = UBFMWri killed %4, 1, 31 - %6:gpr32 = MOVi32imm 1 - %7:gpr32 = COPY %6 - %7:gpr32 = BFMWri %7, killed %1, 31, 30 - %8:gpr64 = SUBREG_TO_REG 0, killed %7, %subreg.sub_32 - %9:gpr32common = COPY killed %6 - %9:gpr32common = BFMWri %9, killed %5, 30, 29 - dead $wzr = SUBSWri killed %9, 0, 0, implicit-def $nzcv - Bcc 2, %bb.2, implicit killed $nzcv - B %bb.1 - - bb.1: - %10:gpr64common = UBFMXri killed %8, 62, 61 - dead $xzr = LDRXui killed %10, 0 - RET_ReallyLR - - bb.2: - $x0 = COPY killed %8 - RET_ReallyLR implicit killed $x0 - -... diff --git a/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll b/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll index 0853325..6fcfc5b 100644 --- a/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll +++ b/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll @@ -328,7 +328,7 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 { ; CHECK-NEXT: .cfi_offset w30, -24 ; CHECK-NEXT: .cfi_offset w29, -32 ; CHECK-NEXT: addvl sp, sp, #-18 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 32 + 144 * VG ; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill @@ -351,16 +351,16 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 { ; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 32 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 32 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d8 @ cfa - 8 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d9 @ cfa - 16 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d10 @ cfa - 24 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d11 @ cfa - 32 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d12 @ cfa - 40 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d13 @ cfa - 48 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d14 @ cfa - 56 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d15 @ cfa - 64 * VG - 32 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 152 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x11, 0x98, 0x01, 0x1e, 0x22 // sp + 32 + 152 * VG ; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP @@ -371,7 +371,7 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 { ; CHECK-NEXT: smstart sm ; CHECK-NEXT: .cfi_restore vg ; CHECK-NEXT: addvl sp, sp, #1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 32 + 144 * VG ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload @@ -448,14 +448,14 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 { ; FP-CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill ; FP-CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; FP-CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48 ; FP-CHECK-NEXT: addvl sp, sp, #-1 ; FP-CHECK-NEXT: str z0, [x29, #-19, mul vl] // 16-byte Folded Spill ; FP-CHECK-NEXT: //APP diff --git a/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll b/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll index b0390ec..8398e07 100644 --- a/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll +++ b/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll @@ -36,7 +36,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1 ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: lsl x8, x0, #1 ; CHECK-NEXT: add x9, x1, x0 @@ -129,10 +129,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @bfcvt_tuple(i64 %stride, ptr ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z2.h, z10.h }, pn8/z, [x1] diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll index b4a83c1..58d2e25 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll @@ -58,7 +58,7 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: lsl x8, x0, #1 ; CHECK-NEXT: add x9, x1, x0 diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll index 0bc9e15..3bb516d 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll @@ -24,10 +24,10 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16> } @multi_vector_sat_shift_narrow ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1w { z2.s, z10.s }, pn8/z, [x1] @@ -98,7 +98,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1 ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: lsl x8, x0, #1 ; CHECK-NEXT: add x9, x1, x0 diff --git a/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir b/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir index 1d04cc6..c3338b1 100644 --- a/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir +++ b/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir @@ -17,7 +17,7 @@ body: | ; CHECK-NEXT: stp d9, d8, [sp, #16] ; CHECK-NEXT: str x29, [sp, #32] ; CHECK-NEXT: addvl sp, sp, #-2 - ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 16 * VG + ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 48 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: .cfi_offset b8, -24 ; CHECK-NEXT: .cfi_offset b9, -32 @@ -97,7 +97,7 @@ body: | ; CHECK: str x29, [sp, #-16]! ; CHECK-NEXT: addvl sp, sp, #-2 - ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG + ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: lsl x9, x1, #1 ; CHECK-NEXT: ptrue pn8.b diff --git a/llvm/test/CodeGen/AArch64/split-vector-insert.ll b/llvm/test/CodeGen/AArch64/split-vector-insert.ll index 555e38a..109059e 100644 --- a/llvm/test/CodeGen/AArch64/split-vector-insert.ll +++ b/llvm/test/CodeGen/AArch64/split-vector-insert.ll @@ -16,7 +16,7 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> % ; CHECK-LEGALIZATION-NEXT: .cfi_def_cfa_offset 16 ; CHECK-LEGALIZATION-NEXT: .cfi_offset w29, -16 ; CHECK-LEGALIZATION-NEXT: addvl sp, sp, #-3 -; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-LEGALIZATION-NEXT: cntd x8 ; CHECK-LEGALIZATION-NEXT: ptrue p0.d, vl2 ; CHECK-LEGALIZATION-NEXT: mov w9, #2 // =0x2 @@ -59,7 +59,7 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> % ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: cntd x8 ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: mov w9, #2 // =0x2 @@ -111,7 +111,7 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x ; CHECK-LEGALIZATION-NEXT: .cfi_def_cfa_offset 16 ; CHECK-LEGALIZATION-NEXT: .cfi_offset w29, -16 ; CHECK-LEGALIZATION-NEXT: addvl sp, sp, #-3 -; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-LEGALIZATION-NEXT: cntd x8 ; CHECK-LEGALIZATION-NEXT: ptrue p0.d, vl2 ; CHECK-LEGALIZATION-NEXT: mov w9, #2 // =0x2 @@ -154,7 +154,7 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: cntd x8 ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: mov w9, #2 // =0x2 diff --git a/llvm/test/CodeGen/AArch64/stack-hazard.ll b/llvm/test/CodeGen/AArch64/stack-hazard.ll index 3a33405..4615b1a 100644 --- a/llvm/test/CodeGen/AArch64/stack-hazard.ll +++ b/llvm/test/CodeGen/AArch64/stack-hazard.ll @@ -388,7 +388,7 @@ define i32 @csr_d8_allocnxv4i32(i64 %d) "aarch64_pstate_sm_compatible" { ; CHECK0-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill ; CHECK0-NEXT: str x29, [sp, #8] // 8-byte Folded Spill ; CHECK0-NEXT: addvl sp, sp, #-1 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK0-NEXT: .cfi_offset w29, -8 ; CHECK0-NEXT: .cfi_offset b8, -16 ; CHECK0-NEXT: mov z0.s, #0 // =0x0 @@ -407,7 +407,7 @@ define i32 @csr_d8_allocnxv4i32(i64 %d) "aarch64_pstate_sm_compatible" { ; CHECK64-NEXT: str x29, [sp, #72] // 8-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #64 ; CHECK64-NEXT: addvl sp, sp, #-1 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 8 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 144 + 8 * VG ; CHECK64-NEXT: .cfi_offset w29, -8 ; CHECK64-NEXT: .cfi_offset b8, -80 ; CHECK64-NEXT: mov z0.s, #0 // =0x0 @@ -429,7 +429,7 @@ define i32 @csr_d8_allocnxv4i32(i64 %d) "aarch64_pstate_sm_compatible" { ; CHECK1024-NEXT: str x29, [sp, #1032] // 8-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1024 ; CHECK1024-NEXT: addvl sp, sp, #-1 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2064 + 8 * VG ; CHECK1024-NEXT: .cfi_offset w29, -8 ; CHECK1024-NEXT: .cfi_offset b8, -1040 ; CHECK1024-NEXT: mov z0.s, #0 // =0x0 @@ -955,9 +955,9 @@ define i32 @svecc_csr_d8(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK0-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK0-NEXT: addvl sp, sp, #-1 ; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK0-NEXT: .cfi_offset w29, -16 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP ; CHECK0-NEXT: mov w0, wzr @@ -973,9 +973,9 @@ define i32 @svecc_csr_d8(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK64-NEXT: addvl sp, sp, #-1 ; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #64 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 8 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 144 + 8 * VG ; CHECK64-NEXT: .cfi_offset w29, -16 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80 ; CHECK64-NEXT: mov w0, wzr ; CHECK64-NEXT: //APP ; CHECK64-NEXT: //NO_APP @@ -993,9 +993,9 @@ define i32 @svecc_csr_d8(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK1024-NEXT: addvl sp, sp, #-1 ; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1024 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2064 + 8 * VG ; CHECK1024-NEXT: .cfi_offset w29, -16 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040 ; CHECK1024-NEXT: mov w0, wzr ; CHECK1024-NEXT: //APP ; CHECK1024-NEXT: //NO_APP @@ -1017,10 +1017,10 @@ define i32 @svecc_csr_d8d9(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_ps ; CHECK0-NEXT: addvl sp, sp, #-2 ; CHECK0-NEXT: str z9, [sp] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK0-NEXT: .cfi_offset w29, -16 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP ; CHECK0-NEXT: mov w0, wzr @@ -1038,10 +1038,10 @@ define i32 @svecc_csr_d8d9(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_ps ; CHECK64-NEXT: str z9, [sp] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #64 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 16 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 144 + 16 * VG ; CHECK64-NEXT: .cfi_offset w29, -16 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 80 - 16 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 80 ; CHECK64-NEXT: mov w0, wzr ; CHECK64-NEXT: //APP ; CHECK64-NEXT: //NO_APP @@ -1061,10 +1061,10 @@ define i32 @svecc_csr_d8d9(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_ps ; CHECK1024-NEXT: str z9, [sp] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1024 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 16 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2064 + 16 * VG ; CHECK1024-NEXT: .cfi_offset w29, -16 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1040 - 16 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1040 ; CHECK1024-NEXT: mov w0, wzr ; CHECK1024-NEXT: //APP ; CHECK1024-NEXT: //NO_APP @@ -1086,9 +1086,9 @@ define i32 @svecc_csr_d8_allocd(double %d, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK0-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK0-NEXT: addvl sp, sp, #-1 ; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK0-NEXT: .cfi_offset w29, -16 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP ; CHECK0-NEXT: addvl x8, sp, #1 @@ -1106,9 +1106,9 @@ define i32 @svecc_csr_d8_allocd(double %d, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK64-NEXT: addvl sp, sp, #-1 ; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #80 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 160 + 8 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 160 + 8 * VG ; CHECK64-NEXT: .cfi_offset w29, -16 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80 ; CHECK64-NEXT: mov w0, wzr ; CHECK64-NEXT: //APP ; CHECK64-NEXT: //NO_APP @@ -1127,9 +1127,9 @@ define i32 @svecc_csr_d8_allocd(double %d, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK1024-NEXT: addvl sp, sp, #-1 ; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1040 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2080 + 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG ; CHECK1024-NEXT: .cfi_offset w29, -16 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040 ; CHECK1024-NEXT: mov w0, wzr ; CHECK1024-NEXT: //APP ; CHECK1024-NEXT: //NO_APP @@ -1153,9 +1153,9 @@ define i32 @svecc_csr_d8_alloci64(i64 %d, <vscale x 4 x i32> %vs) "aarch64_pstat ; CHECK0-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK0-NEXT: addvl sp, sp, #-1 ; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK0-NEXT: .cfi_offset w29, -16 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP ; CHECK0-NEXT: mov x8, x0 @@ -1174,9 +1174,9 @@ define i32 @svecc_csr_d8_alloci64(i64 %d, <vscale x 4 x i32> %vs) "aarch64_pstat ; CHECK64-NEXT: addvl sp, sp, #-1 ; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #80 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 160 + 8 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 160 + 8 * VG ; CHECK64-NEXT: .cfi_offset w29, -16 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80 ; CHECK64-NEXT: mov x8, x0 ; CHECK64-NEXT: mov w0, wzr ; CHECK64-NEXT: //APP @@ -1196,9 +1196,9 @@ define i32 @svecc_csr_d8_alloci64(i64 %d, <vscale x 4 x i32> %vs) "aarch64_pstat ; CHECK1024-NEXT: addvl sp, sp, #-1 ; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1040 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2080 + 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG ; CHECK1024-NEXT: .cfi_offset w29, -16 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040 ; CHECK1024-NEXT: mov x8, x0 ; CHECK1024-NEXT: mov w0, wzr ; CHECK1024-NEXT: //APP @@ -1224,9 +1224,9 @@ define i32 @svecc_csr_d8_allocnxv4i32(i64 %d, <vscale x 4 x i32> %vs) "aarch64_p ; CHECK0-NEXT: addvl sp, sp, #-1 ; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK0-NEXT: addvl sp, sp, #-1 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK0-NEXT: .cfi_offset w29, -16 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK0-NEXT: mov z0.s, #0 // =0x0 ; CHECK0-NEXT: mov w0, wzr ; CHECK0-NEXT: //APP @@ -1246,9 +1246,9 @@ define i32 @svecc_csr_d8_allocnxv4i32(i64 %d, <vscale x 4 x i32> %vs) "aarch64_p ; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #64 ; CHECK64-NEXT: addvl sp, sp, #-1 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 16 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 144 + 16 * VG ; CHECK64-NEXT: .cfi_offset w29, -16 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80 ; CHECK64-NEXT: mov z0.s, #0 // =0x0 ; CHECK64-NEXT: add x8, sp, #64 ; CHECK64-NEXT: mov w0, wzr @@ -1271,9 +1271,9 @@ define i32 @svecc_csr_d8_allocnxv4i32(i64 %d, <vscale x 4 x i32> %vs) "aarch64_p ; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1024 ; CHECK1024-NEXT: addvl sp, sp, #-1 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 16 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2064 + 16 * VG ; CHECK1024-NEXT: .cfi_offset w29, -16 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040 ; CHECK1024-NEXT: mov z0.s, #0 // =0x0 ; CHECK1024-NEXT: add x8, sp, #1024 ; CHECK1024-NEXT: mov w0, wzr @@ -1311,7 +1311,7 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK0-NEXT: str z9, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: sub sp, sp, #16 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 80 + 64 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 80 + 64 * VG ; CHECK0-NEXT: .cfi_offset w19, -8 ; CHECK0-NEXT: .cfi_offset w20, -16 ; CHECK0-NEXT: .cfi_offset w21, -24 @@ -1320,14 +1320,14 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK0-NEXT: .cfi_offset w24, -48 ; CHECK0-NEXT: .cfi_offset w25, -56 ; CHECK0-NEXT: .cfi_offset w29, -64 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64 ; CHECK0-NEXT: mov x8, x0 ; CHECK0-NEXT: mov w0, wzr ; CHECK0-NEXT: //APP @@ -1368,7 +1368,7 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK64-NEXT: str z9, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #96 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x01, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 224 + 64 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x01, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 224 + 64 * VG ; CHECK64-NEXT: .cfi_offset w19, -8 ; CHECK64-NEXT: .cfi_offset w20, -16 ; CHECK64-NEXT: .cfi_offset w21, -24 @@ -1377,14 +1377,14 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK64-NEXT: .cfi_offset w24, -48 ; CHECK64-NEXT: .cfi_offset w25, -56 ; CHECK64-NEXT: .cfi_offset w29, -64 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128 ; CHECK64-NEXT: mov x8, x0 ; CHECK64-NEXT: mov w0, wzr ; CHECK64-NEXT: //APP @@ -1431,7 +1431,7 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK1024-NEXT: str z9, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1056 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2144 + 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 2144 + 64 * VG ; CHECK1024-NEXT: .cfi_offset w19, -8 ; CHECK1024-NEXT: .cfi_offset w20, -16 ; CHECK1024-NEXT: .cfi_offset w21, -24 @@ -1440,14 +1440,14 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK1024-NEXT: .cfi_offset w24, -48 ; CHECK1024-NEXT: .cfi_offset w25, -56 ; CHECK1024-NEXT: .cfi_offset w29, -64 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088 ; CHECK1024-NEXT: mov x8, x0 ; CHECK1024-NEXT: mov w0, wzr ; CHECK1024-NEXT: //APP @@ -1869,7 +1869,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK0-NEXT: .cfi_offset w30, -40 ; CHECK0-NEXT: .cfi_offset w29, -48 ; CHECK0-NEXT: addvl sp, sp, #-18 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG ; CHECK0-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK0-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK0-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -1898,14 +1898,14 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48 ; CHECK0-NEXT: mov x8, x0 ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP @@ -1990,7 +1990,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: .cfi_offset w30, -40 ; CHECK64-NEXT: .cfi_offset w29, -48 ; CHECK64-NEXT: addvl sp, sp, #-18 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG ; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2019,16 +2019,16 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 112 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 112 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 112 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 112 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 112 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 112 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 112 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 112 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 112 ; CHECK64-NEXT: sub sp, sp, #64 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x01, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 176 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x01, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 176 + 144 * VG ; CHECK64-NEXT: mov x8, x0 ; CHECK64-NEXT: //APP ; CHECK64-NEXT: //NO_APP @@ -2051,7 +2051,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: movk w0, #59491, lsl #16 ; CHECK64-NEXT: .cfi_restore vg ; CHECK64-NEXT: add sp, sp, #64 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG ; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -2119,7 +2119,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK1024-NEXT: .cfi_offset w30, -40 ; CHECK1024-NEXT: .cfi_offset w29, -48 ; CHECK1024-NEXT: addvl sp, sp, #-18 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG ; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2148,16 +2148,16 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1072 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1072 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1072 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1072 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1072 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1072 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1072 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1072 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1072 ; CHECK1024-NEXT: sub sp, sp, #1024 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2096 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 2096 + 144 * VG ; CHECK1024-NEXT: mov x8, x0 ; CHECK1024-NEXT: //APP ; CHECK1024-NEXT: //NO_APP @@ -2180,7 +2180,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK1024-NEXT: movk w0, #59491, lsl #16 ; CHECK1024-NEXT: .cfi_restore vg ; CHECK1024-NEXT: add sp, sp, #1024 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG ; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -2252,7 +2252,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK0-NEXT: .cfi_offset w30, -40 ; CHECK0-NEXT: .cfi_offset w29, -48 ; CHECK0-NEXT: addvl sp, sp, #-18 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG ; CHECK0-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK0-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK0-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2281,16 +2281,16 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48 ; CHECK0-NEXT: sub sp, sp, #48 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 96 + 144 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 96 + 144 * VG ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP ; CHECK0-NEXT: bl __arm_sme_state @@ -2312,7 +2312,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK0-NEXT: movk w0, #59491, lsl #16 ; CHECK0-NEXT: .cfi_restore vg ; CHECK0-NEXT: add sp, sp, #48 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG ; CHECK0-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK0-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK0-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -2376,7 +2376,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: .cfi_offset w30, -40 ; CHECK64-NEXT: .cfi_offset w29, -48 ; CHECK64-NEXT: addvl sp, sp, #-18 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG ; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2405,16 +2405,16 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 112 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 112 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 112 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 112 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 112 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 112 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 112 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 112 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 112 ; CHECK64-NEXT: sub sp, sp, #112 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x01, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 224 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x01, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 224 + 144 * VG ; CHECK64-NEXT: //APP ; CHECK64-NEXT: //NO_APP ; CHECK64-NEXT: bl __arm_sme_state @@ -2436,7 +2436,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: movk w0, #59491, lsl #16 ; CHECK64-NEXT: .cfi_restore vg ; CHECK64-NEXT: add sp, sp, #112 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG ; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -2504,7 +2504,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK1024-NEXT: .cfi_offset w30, -40 ; CHECK1024-NEXT: .cfi_offset w29, -48 ; CHECK1024-NEXT: addvl sp, sp, #-18 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG ; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2533,16 +2533,16 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1072 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1072 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1072 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1072 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1072 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1072 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1072 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1072 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1072 ; CHECK1024-NEXT: sub sp, sp, #1072 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2144 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 2144 + 144 * VG ; CHECK1024-NEXT: //APP ; CHECK1024-NEXT: //NO_APP ; CHECK1024-NEXT: bl __arm_sme_state @@ -2564,7 +2564,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK1024-NEXT: movk w0, #59491, lsl #16 ; CHECK1024-NEXT: .cfi_restore vg ; CHECK1024-NEXT: add sp, sp, #1072 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG ; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -3192,14 +3192,14 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x ; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64 ; CHECK0-NEXT: mov w9, w0 ; CHECK0-NEXT: mov x8, sp ; CHECK0-NEXT: mov w2, w1 @@ -3327,14 +3327,14 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x ; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128 ; CHECK64-NEXT: sub sp, sp, #64 ; CHECK64-NEXT: mov w9, w0 ; CHECK64-NEXT: mov x8, sp @@ -3469,14 +3469,14 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x ; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088 ; CHECK1024-NEXT: sub sp, sp, #1024 ; CHECK1024-NEXT: mov w9, w0 ; CHECK1024-NEXT: mov x8, sp @@ -3616,14 +3616,14 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i ; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64 ; CHECK0-NEXT: sub x9, sp, #1024 ; CHECK0-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK0-NEXT: mov w2, w1 @@ -3743,14 +3743,14 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i ; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128 ; CHECK64-NEXT: sub x9, sp, #1088 ; CHECK64-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK64-NEXT: mov w2, w1 @@ -3875,14 +3875,14 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i ; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088 ; CHECK1024-NEXT: sub x9, sp, #2048 ; CHECK1024-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK1024-NEXT: mov w2, w1 @@ -4016,14 +4016,14 @@ define i32 @svecc_call_dynamic_and_scalable_alloca(<4 x i16> %P0, i32 %P1, i32 % ; CHECK0-NEXT: .cfi_offset w28, -48 ; CHECK0-NEXT: .cfi_offset w30, -56 ; CHECK0-NEXT: .cfi_offset w29, -64 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64 ; CHECK0-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK0-NEXT: ubfiz x8, x0, #2, #32 ; CHECK0-NEXT: mov x9, sp @@ -4125,14 +4125,14 @@ define i32 @svecc_call_dynamic_and_scalable_alloca(<4 x i16> %P0, i32 %P1, i32 % ; CHECK64-NEXT: .cfi_offset w28, -48 ; CHECK64-NEXT: .cfi_offset w30, -56 ; CHECK64-NEXT: .cfi_offset w29, -64 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128 ; CHECK64-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK64-NEXT: ubfiz x8, x0, #2, #32 ; CHECK64-NEXT: mov x9, sp @@ -4240,14 +4240,14 @@ define i32 @svecc_call_dynamic_and_scalable_alloca(<4 x i16> %P0, i32 %P1, i32 % ; CHECK1024-NEXT: .cfi_offset w28, -48 ; CHECK1024-NEXT: .cfi_offset w30, -56 ; CHECK1024-NEXT: .cfi_offset w29, -64 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088 ; CHECK1024-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK1024-NEXT: ubfiz x8, x0, #2, #32 ; CHECK1024-NEXT: mov x9, sp diff --git a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll index 56d865e..59b95be 100644 --- a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll +++ b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll @@ -18,7 +18,7 @@ define void @sve_1_vector(ptr %out) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -38,7 +38,7 @@ define void @sve_4_vector(ptr %out) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: addvl sp, sp, #4 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -63,7 +63,7 @@ define void @sve_16_vector(ptr %out) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-16 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 128 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x01, 0x1e, 0x22 // sp + 16 + 128 * VG ; CHECK-NEXT: str xzr, [sp] ; CHECK-NEXT: addvl sp, sp, #16 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 @@ -103,7 +103,7 @@ define void @sve_17_vector(ptr %out) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl x9, sp, #-17 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 136 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x01, 0x1e, 0x22 // $x9 + 16 + 136 * VG ; CHECK-NEXT: .LBB3_1: // %entry ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096 @@ -155,9 +155,9 @@ define void @sve_1v_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload @@ -180,15 +180,15 @@ define void @sve_4v_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: str z11, [sp] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: ldr z11, [sp] // 16-byte Folded Reload @@ -217,7 +217,7 @@ define void @sve_16v_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-16 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 128 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x01, 0x1e, 0x22 // sp + 16 + 128 * VG ; CHECK-NEXT: str xzr, [sp] ; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill ; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill @@ -235,14 +235,14 @@ define void @sve_16v_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload @@ -287,7 +287,7 @@ define void @sve_1p_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP @@ -310,7 +310,7 @@ define void @sve_4p_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str p11, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p10, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p9, [sp, #6, mul vl] // 2-byte Folded Spill @@ -339,7 +339,7 @@ define void @sve_16v_1p_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl x9, sp, #-17 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 136 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x01, 0x1e, 0x22 // $x9 + 16 + 136 * VG ; CHECK-NEXT: .LBB9_1: // %entry ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096 @@ -370,14 +370,14 @@ define void @sve_16v_1p_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload @@ -426,7 +426,7 @@ define void @sve_1_vector_16_arr(ptr %out) #0 { ; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: .cfi_def_cfa wsp, 32 ; CHECK-NEXT: add sp, sp, #16 @@ -453,9 +453,9 @@ define void @sve_1_vector_4096_arr(ptr %out) #0 { ; CHECK-NEXT: sub x9, sp, #3, lsl #12 // =12288 ; CHECK-NEXT: .cfi_def_cfa w9, 12304 ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0f, 0x79, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 12304 + 256 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x79, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x02, 0x1e, 0x22 // $x9 + 12304 + 256 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0f, 0x79, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 12304 + 512 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x79, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x04, 0x1e, 0x22 // $x9 + 12304 + 512 * VG ; CHECK-NEXT: .LBB11_1: // %entry ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096 @@ -470,9 +470,9 @@ define void @sve_1_vector_4096_arr(ptr %out) #0 { ; CHECK-NEXT: ldr xzr, [sp] ; CHECK-NEXT: .cfi_def_cfa_register wsp ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0f, 0x8f, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x88, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 12304 + 264 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x02, 0x1e, 0x22 // sp + 12304 + 264 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 12304 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 12304 + 16 * VG ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: .cfi_def_cfa wsp, 12304 ; CHECK-NEXT: add sp, sp, #3, lsl #12 // =12288 @@ -538,38 +538,38 @@ define void @sve_1024_64k_guard(ptr %out) #0 "stack-probe-size"="65536" { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 256 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x02, 0x1e, 0x22 // sp + 16 + 256 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 512 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x04, 0x1e, 0x22 // sp + 16 + 512 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 768 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x06, 0x1e, 0x22 // sp + 16 + 768 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1024 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x08, 0x1e, 0x22 // sp + 16 + 1024 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1280 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0a, 0x1e, 0x22 // sp + 16 + 1280 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1536 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0c, 0x1e, 0x22 // sp + 16 + 1536 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1792 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0e, 0x1e, 0x22 // sp + 16 + 1792 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2048 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x10, 0x1e, 0x22 // sp + 16 + 2048 * VG ; CHECK-NEXT: str xzr, [sp] ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1800 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x0e, 0x1e, 0x22 // sp + 16 + 1800 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1552 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x0c, 0x1e, 0x22 // sp + 16 + 1552 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1304 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x98, 0x0a, 0x1e, 0x22 // sp + 16 + 1304 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1056 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x08, 0x1e, 0x22 // sp + 16 + 1056 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 808 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x06, 0x1e, 0x22 // sp + 16 + 808 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 560 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x04, 0x1e, 0x22 // sp + 16 + 560 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 312 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x02, 0x1e, 0x22 // sp + 16 + 312 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG ; CHECK-NEXT: addvl sp, sp, #8 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -588,23 +588,23 @@ define void @sve_1028_64k_guard(ptr %out) #0 "stack-probe-size"="65536" { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl x9, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 256 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x02, 0x1e, 0x22 // $x9 + 16 + 256 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 512 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x04, 0x1e, 0x22 // $x9 + 16 + 512 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 768 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x06, 0x1e, 0x22 // $x9 + 16 + 768 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1024 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x08, 0x1e, 0x22 // $x9 + 16 + 1024 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1280 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0a, 0x1e, 0x22 // $x9 + 16 + 1280 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1536 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0c, 0x1e, 0x22 // $x9 + 16 + 1536 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1792 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0e, 0x1e, 0x22 // $x9 + 16 + 1792 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 2048 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x10, 0x1e, 0x22 // $x9 + 16 + 2048 * VG ; CHECK-NEXT: addvl x9, x9, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 2056 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x10, 0x1e, 0x22 // $x9 + 16 + 2056 * VG ; CHECK-NEXT: .LBB14_1: // %entry ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: sub sp, sp, #16, lsl #12 // =65536 @@ -619,21 +619,21 @@ define void @sve_1028_64k_guard(ptr %out) #0 "stack-probe-size"="65536" { ; CHECK-NEXT: ldr xzr, [sp] ; CHECK-NEXT: .cfi_def_cfa_register wsp ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1808 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x0e, 0x1e, 0x22 // sp + 16 + 1808 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1560 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x98, 0x0c, 0x1e, 0x22 // sp + 16 + 1560 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1312 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x0a, 0x1e, 0x22 // sp + 16 + 1312 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1064 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x08, 0x1e, 0x22 // sp + 16 + 1064 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 816 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x06, 0x1e, 0x22 // sp + 16 + 816 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 568 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x04, 0x1e, 0x22 // sp + 16 + 568 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 320 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x02, 0x1e, 0x22 // sp + 16 + 320 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG ; CHECK-NEXT: addvl sp, sp, #9 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -656,7 +656,7 @@ define void @sve_5_vector(ptr %out) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-5 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 40 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x28, 0x1e, 0x22 // sp + 16 + 40 * VG ; CHECK-NEXT: str xzr, [sp] ; CHECK-NEXT: addvl sp, sp, #5 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 @@ -682,21 +682,21 @@ define void @sve_unprobed_area(<vscale x 4 x float> %a, i32 %n) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: str xzr, [sp] ; CHECK-NEXT: str p9, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: addvl sp, sp, #4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/stacksmash-arm64ec.ll b/llvm/test/CodeGen/AArch64/stacksmash-arm64ec.ll new file mode 100644 index 0000000..bd41101 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/stacksmash-arm64ec.ll @@ -0,0 +1,18 @@ +; RUN: llc -mtriple=arm64ec-unknown-windows < %s | FileCheck -check-prefixes=CHECK,NONGNU %s +; RUN: llc -mtriple=arm64ec-unknown-windows-gnu < %s | FileCheck -check-prefixes=CHECK,GNU %s + +; CHECK-LABEL: func = "#func" +; CHECK: bl "#other" +; NONGNU: bl "#__security_check_cookie_arm64ec" +; GNU: bl "#__stack_chk_fail" +define void @func() #0 { +entry: + %buf = alloca [10 x i8], align 1 + call void @other(ptr %buf) #1 + ret void +} + +declare void @other(ptr) #1 + +attributes #0 = { nounwind sspstrong } +attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/sve-alloca.ll b/llvm/test/CodeGen/AArch64/sve-alloca.ll index 2520095..8b7fa9e 100644 --- a/llvm/test/CodeGen/AArch64/sve-alloca.ll +++ b/llvm/test/CodeGen/AArch64/sve-alloca.ll @@ -46,14 +46,14 @@ define void @foo(<vscale x 4 x i64> %dst, i1 %cond) { ; CHECK-NEXT: .cfi_offset w28, -16 ; CHECK-NEXT: .cfi_offset w30, -24 ; CHECK-NEXT: .cfi_offset w29, -32 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 32 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 32 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d8 @ cfa - 8 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d9 @ cfa - 16 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d10 @ cfa - 24 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d11 @ cfa - 32 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d12 @ cfa - 40 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d13 @ cfa - 48 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d14 @ cfa - 56 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d15 @ cfa - 64 * VG - 32 ; CHECK-NEXT: rdvl x9, #2 ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: add x9, x9, #15 diff --git a/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll b/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll index 30a8396..254b8e0 100644 --- a/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll +++ b/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll @@ -43,17 +43,17 @@ define void @fbyte(<vscale x 16 x i8> %v){ ; NOPAIR-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; NOPAIR-NEXT: .cfi_offset w30, -8 ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; NOPAIR-NEXT: bl my_func ; NOPAIR-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; NOPAIR-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload @@ -113,17 +113,17 @@ define void @fbyte(<vscale x 16 x i8> %v){ ; PAIR-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; PAIR-NEXT: .cfi_offset w30, -8 ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; PAIR-NEXT: bl my_func ; PAIR-NEXT: ptrue pn8.b ; PAIR-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload @@ -187,17 +187,17 @@ define void @fhalf(<vscale x 8 x half> %v) { ; NOPAIR-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; NOPAIR-NEXT: .cfi_offset w30, -8 ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; NOPAIR-NEXT: bl my_func ; NOPAIR-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; NOPAIR-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload @@ -257,17 +257,17 @@ define void @fhalf(<vscale x 8 x half> %v) { ; PAIR-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; PAIR-NEXT: .cfi_offset w30, -8 ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; PAIR-NEXT: bl my_func ; PAIR-NEXT: ptrue pn8.b ; PAIR-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload @@ -310,11 +310,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs() { ; NOPAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; NOPAIR-NEXT: //APP ; NOPAIR-NEXT: //NO_APP ; NOPAIR-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload @@ -336,11 +336,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs() { ; PAIR-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP ; PAIR-NEXT: ptrue pn8.b @@ -368,11 +368,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs2() { ; NOPAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; NOPAIR-NEXT: //APP ; NOPAIR-NEXT: //NO_APP ; NOPAIR-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload @@ -393,11 +393,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs2() { ; PAIR-NEXT: str p10, [sp, #6, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: st1b { z8.b, z9.b }, pn9, [sp, #2, mul vl] // 32-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP ; PAIR-NEXT: ptrue pn9.b @@ -421,10 +421,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_regs() { ; NOPAIR-NEXT: addvl sp, sp, #-2 ; NOPAIR-NEXT: str z9, [sp] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 ; NOPAIR-NEXT: //APP ; NOPAIR-NEXT: //NO_APP ; NOPAIR-NEXT: ldr z9, [sp] // 16-byte Folded Reload @@ -440,10 +440,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_regs() { ; PAIR-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: str z8, [sp, #2, mul vl] // 16-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP ; PAIR-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload @@ -494,10 +494,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_2_z_regs_negative() { ; NOPAIR-NEXT: addvl sp, sp, #-2 ; NOPAIR-NEXT: str z10, [sp] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 16 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 16 * VG - 16 ; NOPAIR-NEXT: //APP ; NOPAIR-NEXT: //NO_APP ; NOPAIR-NEXT: ldr z10, [sp] // 16-byte Folded Reload @@ -512,10 +512,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_2_z_regs_negative() { ; PAIR-NEXT: addvl sp, sp, #-2 ; PAIR-NEXT: str z10, [sp] // 16-byte Folded Spill ; PAIR-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 16 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 16 * VG - 16 ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP ; PAIR-NEXT: ldr z10, [sp] // 16-byte Folded Reload @@ -536,7 +536,7 @@ define aarch64_sve_vector_pcs void @test_clobbers_p_reg_negative() { ; NOPAIR-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; NOPAIR-NEXT: addvl sp, sp, #-1 ; NOPAIR-NEXT: str p10, [sp, #7, mul vl] // 2-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; NOPAIR-NEXT: .cfi_offset w29, -16 ; NOPAIR-NEXT: //APP ; NOPAIR-NEXT: //NO_APP @@ -550,7 +550,7 @@ define aarch64_sve_vector_pcs void @test_clobbers_p_reg_negative() { ; PAIR-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; PAIR-NEXT: addvl sp, sp, #-1 ; PAIR-NEXT: str p10, [sp, #7, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; PAIR-NEXT: .cfi_offset w29, -16 ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll index 5e4c891..9066051 100644 --- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll +++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll @@ -438,7 +438,7 @@ define void @non_sve_caller_non_sve_callee_high_range() { ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: movi d0, #0000000000000000 @@ -464,7 +464,7 @@ define void @non_sve_caller_high_range_non_sve_callee_high_range(float %f0, floa ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: movi d0, #0000000000000000 @@ -523,17 +523,17 @@ define <vscale x 4 x float> @sve_caller_non_sve_callee_high_range(<vscale x 4 x ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 168 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x01, 0x1e, 0x22 // sp + 16 + 168 * VG ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: mov z25.d, z0.d ; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: movi d0, #0000000000000000 @@ -621,17 +621,17 @@ define <vscale x 4 x float> @sve_ret_caller_non_sve_callee_high_range() { ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 16 + 160 * VG ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: movi d0, #0000000000000000 ; CHECK-NEXT: fmov s1, #1.00000000 ; CHECK-NEXT: addvl x0, sp, #1 @@ -686,7 +686,7 @@ define void @verify_all_operands_are_initialised() { ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill ; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: movi d0, #0000000000000000 diff --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll index d02aa06..6c6a691 100644 --- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll @@ -8,7 +8,7 @@ define <4 x i32> @extract_v4i32_nxv16i32_12(<vscale x 16 x i32> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: str z3, [sp, #3, mul vl] ; CHECK-NEXT: str z2, [sp, #2, mul vl] @@ -27,7 +27,7 @@ define <8 x i16> @extract_v8i16_nxv32i16_8(<vscale x 32 x i16> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: str z1, [sp, #1, mul vl] ; CHECK-NEXT: str z0, [sp] @@ -44,7 +44,7 @@ define <4 x i16> @extract_v4i16_nxv32i16_8(<vscale x 32 x i16> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: str z3, [sp, #3, mul vl] ; CHECK-NEXT: str z2, [sp, #2, mul vl] @@ -65,7 +65,7 @@ define <2 x i16> @extract_v2i16_nxv32i16_8(<vscale x 32 x i16> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-8 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: str z3, [sp, #3, mul vl] @@ -94,7 +94,7 @@ define <2 x i64> @extract_v2i64_nxv8i64_8(<vscale x 8 x i64> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov w9, #8 // =0x8 @@ -120,7 +120,7 @@ define <4 x float> @extract_v4f32_nxv16f32_12(<vscale x 16 x float> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: str z3, [sp, #3, mul vl] ; CHECK-NEXT: str z2, [sp, #2, mul vl] @@ -168,7 +168,7 @@ define <4 x i1> @extract_v4i1_nxv32i1_16(<vscale x 32 x i1> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-8 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1 ; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1 @@ -224,7 +224,7 @@ define <4 x i3> @extract_v4i3_nxv32i3_16(<vscale x 32 x i3> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-8 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: str z1, [sp, #1, mul vl] @@ -271,7 +271,7 @@ define <4 x i64> @extract_v4i64_nxv8i64_0(<vscale x 8 x i64> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: str z1, [sp, #1, mul vl] ; CHECK-NEXT: str z0, [sp] diff --git a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll index cbede1b..4aaa25e 100644 --- a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll @@ -63,7 +63,7 @@ define <vscale x 14 x i1> @extract_nxv14i1_nxv28i1_14(<vscale x 28 x i1> %in) uw ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: punpkhi p2.h, p1.b ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: punpklo p1.h, p1.b diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll index 9efe0b3..122dc57 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll @@ -36,9 +36,8 @@ define void @select_v16f16(ptr %a, ptr %b) vscale_range(2,0) #0 { ; CHECK-NEXT: ptrue p0.h, vl16 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] -; CHECK-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h -; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h -; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: fcmne p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: st1h { z1.h }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <16 x half>, ptr %a %op2 = load <16 x half>, ptr %b @@ -57,12 +56,10 @@ define void @select_v32f16(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1h { z1.h }, p0/z, [x1, x8, lsl #1] ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] -; VBITS_GE_256-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h -; VBITS_GE_256-NEXT: fcmeq p2.h, p0/z, z2.h, z3.h -; VBITS_GE_256-NEXT: sel z0.h, p1, z0.h, z1.h -; VBITS_GE_256-NEXT: sel z1.h, p2, z2.h, z3.h -; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: fcmne p1.h, p0/z, z0.h, z1.h +; VBITS_GE_256-NEXT: fcmne p0.h, p0/z, z2.h, z3.h +; VBITS_GE_256-NEXT: st1h { z1.h }, p1, [x0, x8, lsl #1] +; VBITS_GE_256-NEXT: st1h { z3.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: select_v32f16: @@ -70,9 +67,8 @@ define void @select_v32f16(ptr %a, ptr %b) #0 { ; VBITS_GE_512-NEXT: ptrue p0.h, vl32 ; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1h { z1.h }, p0/z, [x1] -; VBITS_GE_512-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h -; VBITS_GE_512-NEXT: sel z0.h, p1, z0.h, z1.h -; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x0] +; VBITS_GE_512-NEXT: fcmne p0.h, p0/z, z0.h, z1.h +; VBITS_GE_512-NEXT: st1h { z1.h }, p0, [x0] ; VBITS_GE_512-NEXT: ret %op1 = load <32 x half>, ptr %a %op2 = load <32 x half>, ptr %b @@ -88,9 +84,8 @@ define void @select_v64f16(ptr %a, ptr %b) vscale_range(8,0) #0 { ; CHECK-NEXT: ptrue p0.h, vl64 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] -; CHECK-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h -; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h -; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: fcmne p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: st1h { z1.h }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <64 x half>, ptr %a %op2 = load <64 x half>, ptr %b @@ -106,9 +101,8 @@ define void @select_v128f16(ptr %a, ptr %b) vscale_range(16,0) #0 { ; CHECK-NEXT: ptrue p0.h, vl128 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] -; CHECK-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h -; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h -; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: fcmne p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: st1h { z1.h }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <128 x half>, ptr %a %op2 = load <128 x half>, ptr %b @@ -149,9 +143,8 @@ define void @select_v8f32(ptr %a, ptr %b) vscale_range(2,0) #0 { ; CHECK-NEXT: ptrue p0.s, vl8 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] -; CHECK-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s -; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s -; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: fcmne p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: st1w { z1.s }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <8 x float>, ptr %a %op2 = load <8 x float>, ptr %b @@ -170,12 +163,10 @@ define void @select_v16f32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1w { z1.s }, p0/z, [x1, x8, lsl #2] ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] -; VBITS_GE_256-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s -; VBITS_GE_256-NEXT: fcmeq p2.s, p0/z, z2.s, z3.s -; VBITS_GE_256-NEXT: sel z0.s, p1, z0.s, z1.s -; VBITS_GE_256-NEXT: sel z1.s, p2, z2.s, z3.s -; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: fcmne p1.s, p0/z, z0.s, z1.s +; VBITS_GE_256-NEXT: fcmne p0.s, p0/z, z2.s, z3.s +; VBITS_GE_256-NEXT: st1w { z1.s }, p1, [x0, x8, lsl #2] +; VBITS_GE_256-NEXT: st1w { z3.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: select_v16f32: @@ -183,9 +174,8 @@ define void @select_v16f32(ptr %a, ptr %b) #0 { ; VBITS_GE_512-NEXT: ptrue p0.s, vl16 ; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1w { z1.s }, p0/z, [x1] -; VBITS_GE_512-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s -; VBITS_GE_512-NEXT: sel z0.s, p1, z0.s, z1.s -; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x0] +; VBITS_GE_512-NEXT: fcmne p0.s, p0/z, z0.s, z1.s +; VBITS_GE_512-NEXT: st1w { z1.s }, p0, [x0] ; VBITS_GE_512-NEXT: ret %op1 = load <16 x float>, ptr %a %op2 = load <16 x float>, ptr %b @@ -201,9 +191,8 @@ define void @select_v32f32(ptr %a, ptr %b) vscale_range(8,0) #0 { ; CHECK-NEXT: ptrue p0.s, vl32 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] -; CHECK-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s -; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s -; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: fcmne p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: st1w { z1.s }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <32 x float>, ptr %a %op2 = load <32 x float>, ptr %b @@ -219,9 +208,8 @@ define void @select_v64f32(ptr %a, ptr %b) vscale_range(16,0) #0 { ; CHECK-NEXT: ptrue p0.s, vl64 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] -; CHECK-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s -; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s -; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: fcmne p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: st1w { z1.s }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <64 x float>, ptr %a %op2 = load <64 x float>, ptr %b @@ -263,9 +251,8 @@ define void @select_v4f64(ptr %a, ptr %b) vscale_range(2,0) #0 { ; CHECK-NEXT: ptrue p0.d, vl4 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] -; CHECK-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d -; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d -; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: fcmne p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: st1d { z1.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <4 x double>, ptr %a %op2 = load <4 x double>, ptr %b @@ -284,12 +271,10 @@ define void @select_v8f64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1d { z1.d }, p0/z, [x1, x8, lsl #3] ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] -; VBITS_GE_256-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d -; VBITS_GE_256-NEXT: fcmeq p2.d, p0/z, z2.d, z3.d -; VBITS_GE_256-NEXT: sel z0.d, p1, z0.d, z1.d -; VBITS_GE_256-NEXT: sel z1.d, p2, z2.d, z3.d -; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: fcmne p1.d, p0/z, z0.d, z1.d +; VBITS_GE_256-NEXT: fcmne p0.d, p0/z, z2.d, z3.d +; VBITS_GE_256-NEXT: st1d { z1.d }, p1, [x0, x8, lsl #3] +; VBITS_GE_256-NEXT: st1d { z3.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: select_v8f64: @@ -297,9 +282,8 @@ define void @select_v8f64(ptr %a, ptr %b) #0 { ; VBITS_GE_512-NEXT: ptrue p0.d, vl8 ; VBITS_GE_512-NEXT: ld1d { z0.d }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1d { z1.d }, p0/z, [x1] -; VBITS_GE_512-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d -; VBITS_GE_512-NEXT: sel z0.d, p1, z0.d, z1.d -; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x0] +; VBITS_GE_512-NEXT: fcmne p0.d, p0/z, z0.d, z1.d +; VBITS_GE_512-NEXT: st1d { z1.d }, p0, [x0] ; VBITS_GE_512-NEXT: ret %op1 = load <8 x double>, ptr %a %op2 = load <8 x double>, ptr %b @@ -315,9 +299,8 @@ define void @select_v16f64(ptr %a, ptr %b) vscale_range(8,0) #0 { ; CHECK-NEXT: ptrue p0.d, vl16 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] -; CHECK-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d -; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d -; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: fcmne p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: st1d { z1.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <16 x double>, ptr %a %op2 = load <16 x double>, ptr %b @@ -333,9 +316,8 @@ define void @select_v32f64(ptr %a, ptr %b) vscale_range(16,0) #0 { ; CHECK-NEXT: ptrue p0.d, vl32 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] -; CHECK-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d -; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d -; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: fcmne p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: st1d { z1.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <32 x double>, ptr %a %op2 = load <32 x double>, ptr %b diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll index 9cebbc4..291cddf 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll @@ -35,9 +35,8 @@ define void @select_v32i8(ptr %a, ptr %b) vscale_range(2,0) #0 { ; CHECK-NEXT: ptrue p0.b, vl32 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] ; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] -; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b -; CHECK-NEXT: sel z0.b, p1, z0.b, z1.b -; CHECK-NEXT: st1b { z0.b }, p0, [x0] +; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: st1b { z1.b }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i8>, ptr %a %op2 = load <32 x i8>, ptr %b @@ -56,12 +55,10 @@ define void @select_v64i8(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1b { z1.b }, p0/z, [x1, x8] ; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1] -; VBITS_GE_256-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b -; VBITS_GE_256-NEXT: cmpeq p2.b, p0/z, z2.b, z3.b -; VBITS_GE_256-NEXT: sel z0.b, p1, z0.b, z1.b -; VBITS_GE_256-NEXT: sel z1.b, p2, z2.b, z3.b -; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8] -; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0] +; VBITS_GE_256-NEXT: cmpne p1.b, p0/z, z0.b, z1.b +; VBITS_GE_256-NEXT: cmpne p0.b, p0/z, z2.b, z3.b +; VBITS_GE_256-NEXT: st1b { z1.b }, p1, [x0, x8] +; VBITS_GE_256-NEXT: st1b { z3.b }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: select_v64i8: @@ -69,9 +66,8 @@ define void @select_v64i8(ptr %a, ptr %b) #0 { ; VBITS_GE_512-NEXT: ptrue p0.b, vl64 ; VBITS_GE_512-NEXT: ld1b { z0.b }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1b { z1.b }, p0/z, [x1] -; VBITS_GE_512-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b -; VBITS_GE_512-NEXT: sel z0.b, p1, z0.b, z1.b -; VBITS_GE_512-NEXT: st1b { z0.b }, p0, [x0] +; VBITS_GE_512-NEXT: cmpne p0.b, p0/z, z0.b, z1.b +; VBITS_GE_512-NEXT: st1b { z1.b }, p0, [x0] ; VBITS_GE_512-NEXT: ret %op1 = load <64 x i8>, ptr %a %op2 = load <64 x i8>, ptr %b @@ -87,9 +83,8 @@ define void @select_v128i8(ptr %a, ptr %b) vscale_range(8,0) #0 { ; CHECK-NEXT: ptrue p0.b, vl128 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] ; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] -; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b -; CHECK-NEXT: sel z0.b, p1, z0.b, z1.b -; CHECK-NEXT: st1b { z0.b }, p0, [x0] +; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: st1b { z1.b }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <128 x i8>, ptr %a %op2 = load <128 x i8>, ptr %b @@ -105,9 +100,8 @@ define void @select_v256i8(ptr %a, ptr %b) vscale_range(16,0) #0 { ; CHECK-NEXT: ptrue p0.b, vl256 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] ; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] -; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b -; CHECK-NEXT: sel z0.b, p1, z0.b, z1.b -; CHECK-NEXT: st1b { z0.b }, p0, [x0] +; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: st1b { z1.b }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <256 x i8>, ptr %a %op2 = load <256 x i8>, ptr %b @@ -148,9 +142,8 @@ define void @select_v16i16(ptr %a, ptr %b) vscale_range(2,0) #0 { ; CHECK-NEXT: ptrue p0.h, vl16 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] -; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h -; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h -; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: st1h { z1.h }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <16 x i16>, ptr %a %op2 = load <16 x i16>, ptr %b @@ -169,12 +162,10 @@ define void @select_v32i16(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1h { z1.h }, p0/z, [x1, x8, lsl #1] ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] -; VBITS_GE_256-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h -; VBITS_GE_256-NEXT: cmpeq p2.h, p0/z, z2.h, z3.h -; VBITS_GE_256-NEXT: sel z0.h, p1, z0.h, z1.h -; VBITS_GE_256-NEXT: sel z1.h, p2, z2.h, z3.h -; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_GE_256-NEXT: cmpne p1.h, p0/z, z0.h, z1.h +; VBITS_GE_256-NEXT: cmpne p0.h, p0/z, z2.h, z3.h +; VBITS_GE_256-NEXT: st1h { z1.h }, p1, [x0, x8, lsl #1] +; VBITS_GE_256-NEXT: st1h { z3.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: select_v32i16: @@ -182,9 +173,8 @@ define void @select_v32i16(ptr %a, ptr %b) #0 { ; VBITS_GE_512-NEXT: ptrue p0.h, vl32 ; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1h { z1.h }, p0/z, [x1] -; VBITS_GE_512-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h -; VBITS_GE_512-NEXT: sel z0.h, p1, z0.h, z1.h -; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x0] +; VBITS_GE_512-NEXT: cmpne p0.h, p0/z, z0.h, z1.h +; VBITS_GE_512-NEXT: st1h { z1.h }, p0, [x0] ; VBITS_GE_512-NEXT: ret %op1 = load <32 x i16>, ptr %a %op2 = load <32 x i16>, ptr %b @@ -200,9 +190,8 @@ define void @select_v64i16(ptr %a, ptr %b) vscale_range(8,0) #0 { ; CHECK-NEXT: ptrue p0.h, vl64 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] -; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h -; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h -; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: st1h { z1.h }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <64 x i16>, ptr %a %op2 = load <64 x i16>, ptr %b @@ -218,9 +207,8 @@ define void @select_v128i16(ptr %a, ptr %b) vscale_range(16,0) #0 { ; CHECK-NEXT: ptrue p0.h, vl128 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] -; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h -; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h -; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: st1h { z1.h }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <128 x i16>, ptr %a %op2 = load <128 x i16>, ptr %b @@ -261,9 +249,8 @@ define void @select_v8i32(ptr %a, ptr %b) vscale_range(2,0) #0 { ; CHECK-NEXT: ptrue p0.s, vl8 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] -; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s -; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s -; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: st1w { z1.s }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <8 x i32>, ptr %a %op2 = load <8 x i32>, ptr %b @@ -282,12 +269,10 @@ define void @select_v16i32(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1w { z1.s }, p0/z, [x1, x8, lsl #2] ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] -; VBITS_GE_256-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s -; VBITS_GE_256-NEXT: cmpeq p2.s, p0/z, z2.s, z3.s -; VBITS_GE_256-NEXT: sel z0.s, p1, z0.s, z1.s -; VBITS_GE_256-NEXT: sel z1.s, p2, z2.s, z3.s -; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] +; VBITS_GE_256-NEXT: cmpne p1.s, p0/z, z0.s, z1.s +; VBITS_GE_256-NEXT: cmpne p0.s, p0/z, z2.s, z3.s +; VBITS_GE_256-NEXT: st1w { z1.s }, p1, [x0, x8, lsl #2] +; VBITS_GE_256-NEXT: st1w { z3.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: select_v16i32: @@ -295,9 +280,8 @@ define void @select_v16i32(ptr %a, ptr %b) #0 { ; VBITS_GE_512-NEXT: ptrue p0.s, vl16 ; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1w { z1.s }, p0/z, [x1] -; VBITS_GE_512-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s -; VBITS_GE_512-NEXT: sel z0.s, p1, z0.s, z1.s -; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x0] +; VBITS_GE_512-NEXT: cmpne p0.s, p0/z, z0.s, z1.s +; VBITS_GE_512-NEXT: st1w { z1.s }, p0, [x0] ; VBITS_GE_512-NEXT: ret %op1 = load <16 x i32>, ptr %a %op2 = load <16 x i32>, ptr %b @@ -313,9 +297,8 @@ define void @select_v32i32(ptr %a, ptr %b) vscale_range(8,0) #0 { ; CHECK-NEXT: ptrue p0.s, vl32 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] -; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s -; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s -; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: st1w { z1.s }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i32>, ptr %a %op2 = load <32 x i32>, ptr %b @@ -331,9 +314,8 @@ define void @select_v64i32(ptr %a, ptr %b) vscale_range(16,0) #0 { ; CHECK-NEXT: ptrue p0.s, vl64 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] -; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s -; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s -; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: st1w { z1.s }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <64 x i32>, ptr %a %op2 = load <64 x i32>, ptr %b @@ -375,9 +357,8 @@ define void @select_v4i64(ptr %a, ptr %b) vscale_range(2,0) #0 { ; CHECK-NEXT: ptrue p0.d, vl4 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] -; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d -; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d -; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: st1d { z1.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <4 x i64>, ptr %a %op2 = load <4 x i64>, ptr %b @@ -396,12 +377,10 @@ define void @select_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_256-NEXT: ld1d { z1.d }, p0/z, [x1, x8, lsl #3] ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] -; VBITS_GE_256-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d -; VBITS_GE_256-NEXT: cmpeq p2.d, p0/z, z2.d, z3.d -; VBITS_GE_256-NEXT: sel z0.d, p1, z0.d, z1.d -; VBITS_GE_256-NEXT: sel z1.d, p2, z2.d, z3.d -; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] -; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] +; VBITS_GE_256-NEXT: cmpne p1.d, p0/z, z0.d, z1.d +; VBITS_GE_256-NEXT: cmpne p0.d, p0/z, z2.d, z3.d +; VBITS_GE_256-NEXT: st1d { z1.d }, p1, [x0, x8, lsl #3] +; VBITS_GE_256-NEXT: st1d { z3.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: select_v8i64: @@ -409,9 +388,8 @@ define void @select_v8i64(ptr %a, ptr %b) #0 { ; VBITS_GE_512-NEXT: ptrue p0.d, vl8 ; VBITS_GE_512-NEXT: ld1d { z0.d }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1d { z1.d }, p0/z, [x1] -; VBITS_GE_512-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d -; VBITS_GE_512-NEXT: sel z0.d, p1, z0.d, z1.d -; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x0] +; VBITS_GE_512-NEXT: cmpne p0.d, p0/z, z0.d, z1.d +; VBITS_GE_512-NEXT: st1d { z1.d }, p0, [x0] ; VBITS_GE_512-NEXT: ret %op1 = load <8 x i64>, ptr %a %op2 = load <8 x i64>, ptr %b @@ -427,9 +405,8 @@ define void @select_v16i64(ptr %a, ptr %b) vscale_range(8,0) #0 { ; CHECK-NEXT: ptrue p0.d, vl16 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] -; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d -; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d -; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: st1d { z1.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <16 x i64>, ptr %a %op2 = load <16 x i64>, ptr %b @@ -445,9 +422,8 @@ define void @select_v32i64(ptr %a, ptr %b) vscale_range(16,0) #0 { ; CHECK-NEXT: ptrue p0.d, vl32 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] -; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d -; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d -; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: st1d { z1.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i64>, ptr %a %op2 = load <32 x i64>, ptr %b diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll index c48ee39..2eff6da 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll @@ -30,9 +30,7 @@ define void @crash_when_lowering_extract_shuffle(ptr %dst, i1 %cond) vscale_rang ; CHECK-NEXT: // %bb.1: // %vector.body ; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: movi v1.2d, #0000000000000000 -; CHECK-NEXT: ldr z4, [x0] -; CHECK-NEXT: ldr z5, [x0, #2, mul vl] -; CHECK-NEXT: ldr z6, [x0, #3, mul vl] +; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: umov w8, v0.b[8] ; CHECK-NEXT: mov v1.b[1], v0.b[1] ; CHECK-NEXT: fmov s2, w8 @@ -62,20 +60,20 @@ define void @crash_when_lowering_extract_shuffle(ptr %dst, i1 %cond) vscale_rang ; CHECK-NEXT: asr z1.s, z1.s, #31 ; CHECK-NEXT: uunpklo z3.s, z3.h ; CHECK-NEXT: lsl z0.s, z0.s, #31 -; CHECK-NEXT: bic z1.d, z4.d, z1.d +; CHECK-NEXT: cmpne p1.s, p0/z, z1.s, #0 ; CHECK-NEXT: lsl z2.s, z2.s, #31 -; CHECK-NEXT: ldr z4, [x0, #1, mul vl] +; CHECK-NEXT: movi v1.2d, #0000000000000000 ; CHECK-NEXT: asr z0.s, z0.s, #31 -; CHECK-NEXT: str z1, [x0] ; CHECK-NEXT: lsl z3.s, z3.s, #31 ; CHECK-NEXT: asr z2.s, z2.s, #31 -; CHECK-NEXT: bic z0.d, z5.d, z0.d +; CHECK-NEXT: st1w { z1.s }, p1, [x0] +; CHECK-NEXT: cmpne p2.s, p0/z, z0.s, #0 ; CHECK-NEXT: asr z3.s, z3.s, #31 -; CHECK-NEXT: bic z1.d, z4.d, z2.d -; CHECK-NEXT: str z0, [x0, #2, mul vl] -; CHECK-NEXT: bic z3.d, z6.d, z3.d -; CHECK-NEXT: str z1, [x0, #1, mul vl] -; CHECK-NEXT: str z3, [x0, #3, mul vl] +; CHECK-NEXT: cmpne p3.s, p0/z, z3.s, #0 +; CHECK-NEXT: cmpne p0.s, p0/z, z2.s, #0 +; CHECK-NEXT: st1w { z1.s }, p2, [x0, #2, mul vl] +; CHECK-NEXT: st1w { z1.s }, p3, [x0, #3, mul vl] +; CHECK-NEXT: st1w { z1.s }, p0, [x0, #1, mul vl] ; CHECK-NEXT: .LBB1_2: // %exit ; CHECK-NEXT: ret %broadcast.splat = shufflevector <32 x i1> zeroinitializer, <32 x i1> zeroinitializer, <32 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll b/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll index 4b93900..8750867 100644 --- a/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll +++ b/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll @@ -49,7 +49,7 @@ define half @fadda_nxv6f16(<vscale x 6 x half> %v, half %s) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov w8, #32768 // =0x8000 ; CHECK-NEXT: ptrue p0.d @@ -73,7 +73,7 @@ define half @fadda_nxv10f16(<vscale x 10 x half> %v, half %s) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: // kill: def $h2 killed $h2 def $z2 diff --git a/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll b/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll index 1b6b92a..4374409 100644 --- a/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll +++ b/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll @@ -254,7 +254,7 @@ define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, #-4476578029606273024 // =0xc1e0000000000000 ; CHECK-NEXT: ptrue p0.d @@ -341,7 +341,7 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, #-4548635623644200960 // =0xc0e0000000000000 ; CHECK-NEXT: ptrue p0.d diff --git a/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll b/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll index b3aefb8..1df2819 100644 --- a/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll +++ b/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll @@ -208,7 +208,7 @@ define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) { ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #281474974613504 // =0xffffffe00000 @@ -275,7 +275,7 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) { ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #281337537757184 // =0xffe000000000 diff --git a/llvm/test/CodeGen/AArch64/sve-insert-element.ll b/llvm/test/CodeGen/AArch64/sve-insert-element.ll index 7f558e3..8ca005a 100644 --- a/llvm/test/CodeGen/AArch64/sve-insert-element.ll +++ b/llvm/test/CodeGen/AArch64/sve-insert-element.ll @@ -588,7 +588,7 @@ define <vscale x 32 x i1> @test_predicate_insert_32xi1(<vscale x 32 x i1> %val, ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: rdvl x8, #2 ; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1 ; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1 diff --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll index dcf3317..73c783d 100644 --- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll @@ -186,7 +186,7 @@ define void @insert_v2i64_nxv16i64(<2 x i64> %sv0, <2 x i64> %sv1, ptr %out) uwt ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: str z0, [sp] ; CHECK-NEXT: str q1, [sp, #32] @@ -229,7 +229,7 @@ define void @insert_v2i64_nxv16i64_lo2(ptr %psv, ptr %out) uwtable { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [sp, #16] ; CHECK-NEXT: ldr z0, [sp, #1, mul vl] @@ -896,7 +896,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_0(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -923,7 +923,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_1(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -950,7 +950,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_2(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -977,7 +977,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_3(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -1004,7 +1004,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_4(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -1031,7 +1031,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_5(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -1058,7 +1058,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_6(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -1085,7 +1085,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_7(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -1112,7 +1112,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_8(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1139,7 +1139,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_9(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1166,7 +1166,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_10(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1193,7 +1193,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_11(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1220,7 +1220,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_12(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1247,7 +1247,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_13(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1274,7 +1274,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_14(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1301,7 +1301,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_15(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b diff --git a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir index 6d09425..2a7e8a43c 100644 --- a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir +++ b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir @@ -41,13 +41,13 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_positive_offset - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) @@ -64,7 +64,7 @@ body: | ; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) ; CHECK-NEXT: renamable $z0 = LDNF1D_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s64) from %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -100,13 +100,13 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_negative_offset - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) @@ -123,7 +123,7 @@ body: | ; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) ; CHECK-NEXT: renamable $z0 = LDNF1D_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s64) from %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -159,44 +159,44 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_positive_offset_out_of_range - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_H_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SH_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SH_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1W_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -231,44 +231,44 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_negative_offset_out_of_range - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_H_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SH_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SH_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1W_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 diff --git a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir index 1352b9d..863d4d1 100644 --- a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir +++ b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir @@ -41,13 +41,13 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_positive_offset - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, $sp, 7 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, $sp, 7 :: (load (s16) from %ir.object) ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, $sp, 7 :: (load (s32) from %ir.object, align 8) @@ -56,7 +56,7 @@ body: | ; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, $sp, 7 :: (store (s16) into %ir.object, align 8) ; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, $sp, 7 :: (store (s32) into %ir.object, align 8) ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, $sp, 7 :: (store (s64) into %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -84,13 +84,13 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_negative_offset - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, $sp, -8 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, $sp, -8 :: (load (s16) from %ir.object) ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, $sp, -8 :: (load (s32) from %ir.object) @@ -99,7 +99,7 @@ body: | ; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, $sp, -8 :: (store (s16) into %ir.object, align 8) ; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, $sp, -8 :: (store (s32) into %ir.object, align 8) ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, $sp, -8 :: (store (s64) into %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -127,30 +127,30 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_positive_offset_out_of_range - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, killed $x8, 7 :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, killed $x8, 7 :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, killed $x8, 7 :: (load (s32) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1D_ZRI renamable $p0, killed $x8, 7 :: (load (s64) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: STNT1B_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s8) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s16) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s32) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s64) into %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -178,30 +178,30 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_negative_offset_out_of_range - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, killed $x8, -8 :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, killed $x8, -8 :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, killed $x8, -8 :: (load (s32) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1D_ZRI renamable $p0, killed $x8, -8 :: (load (s64) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: STNT1B_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s8) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s16) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s32) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s64) into %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 diff --git a/llvm/test/CodeGen/AArch64/sve-llrint.ll b/llvm/test/CodeGen/AArch64/sve-llrint.ll index b0198cf..12d4918 100644 --- a/llvm/test/CodeGen/AArch64/sve-llrint.ll +++ b/llvm/test/CodeGen/AArch64/sve-llrint.ll @@ -88,7 +88,7 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f16(<vscale x 8 x half> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: uunpklo z1.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h @@ -161,11 +161,11 @@ define <vscale x 16 x i64> @llrint_v16i64_v16f16(<vscale x 16 x half> %x) { ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; CHECK-NEXT: uunpklo z2.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: mov w8, #64511 // =0xfbff @@ -299,16 +299,16 @@ define <vscale x 32 x i64> @llrint_v32i64_v32f16(<vscale x 32 x half> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: uunpklo z4.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: mov w9, #64511 // =0xfbff @@ -614,7 +614,7 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f32(<vscale x 8 x float> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: uunpklo z2.d, z0.s ; CHECK-NEXT: uunpkhi z0.d, z0.s @@ -684,11 +684,11 @@ define <vscale x 16 x i64> @llrint_v16i64_v16f32(<vscale x 16 x float> %x) { ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; CHECK-NEXT: uunpklo z4.d, z0.s ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: mov w8, #-553648128 // =0xdf000000 @@ -818,16 +818,16 @@ define <vscale x 32 x i64> @llrint_v32i64_v32f32(<vscale x 32 x float> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: uunpklo z24.d, z0.s ; CHECK-NEXT: uunpkhi z25.d, z0.s ; CHECK-NEXT: mov w9, #-553648128 // =0xdf000000 @@ -1125,7 +1125,7 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f64(<vscale x 8 x double> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000 @@ -1190,10 +1190,10 @@ define <vscale x 16 x i64> @llrint_v16f64(<vscale x 16 x double> %x) { ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000 ; CHECK-NEXT: mov z26.d, #0x8000000000000000 @@ -1312,16 +1312,16 @@ define <vscale x 32 x i64> @llrint_v32f64(<vscale x 32 x double> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: ldr z0, [x0] ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ldr z2, [x0, #2, mul vl] diff --git a/llvm/test/CodeGen/AArch64/sve-lrint.ll b/llvm/test/CodeGen/AArch64/sve-lrint.ll index aa586390..58ac53d 100644 --- a/llvm/test/CodeGen/AArch64/sve-lrint.ll +++ b/llvm/test/CodeGen/AArch64/sve-lrint.ll @@ -89,7 +89,7 @@ define <vscale x 8 x iXLen> @lrint_v8f16(<vscale x 8 x half> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: uunpklo z1.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h @@ -162,11 +162,11 @@ define <vscale x 16 x iXLen> @lrint_v16f16(<vscale x 16 x half> %x) { ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; CHECK-NEXT: uunpklo z2.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: mov w8, #64511 // =0xfbff @@ -300,16 +300,16 @@ define <vscale x 32 x iXLen> @lrint_v32f16(<vscale x 32 x half> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: uunpklo z4.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: mov w9, #64511 // =0xfbff @@ -615,7 +615,7 @@ define <vscale x 8 x iXLen> @lrint_v8f32(<vscale x 8 x float> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: uunpklo z2.d, z0.s ; CHECK-NEXT: uunpkhi z0.d, z0.s @@ -685,11 +685,11 @@ define <vscale x 16 x iXLen> @lrint_v16f32(<vscale x 16 x float> %x) { ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; CHECK-NEXT: uunpklo z4.d, z0.s ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: mov w8, #-553648128 // =0xdf000000 @@ -819,16 +819,16 @@ define <vscale x 32 x iXLen> @lrint_v32f32(<vscale x 32 x float> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: uunpklo z24.d, z0.s ; CHECK-NEXT: uunpkhi z25.d, z0.s ; CHECK-NEXT: mov w9, #-553648128 // =0xdf000000 @@ -1126,7 +1126,7 @@ define <vscale x 8 x iXLen> @lrint_v8f64(<vscale x 8 x double> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000 @@ -1191,10 +1191,10 @@ define <vscale x 16 x iXLen> @lrint_v16f64(<vscale x 16 x double> %x) { ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000 ; CHECK-NEXT: mov z26.d, #0x8000000000000000 @@ -1313,16 +1313,16 @@ define <vscale x 32 x iXLen> @lrint_v32f64(<vscale x 32 x double> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: ldr z0, [x0] ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ldr z2, [x0, #2, mul vl] diff --git a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll index 6e08606..24df76b 100644 --- a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll +++ b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll @@ -53,7 +53,7 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @add_nxv64i1(<vscale x 64 x i1> ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill @@ -137,7 +137,7 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @sub_nxv64i1(<vscale x 64 x i1> ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill diff --git a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll index 9a4231a..0bc8cb8 100644 --- a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll +++ b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll @@ -20,7 +20,7 @@ define i8 @split_extract_32i8_idx(<vscale x 32 x i8> %a, i32 %idx) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #2 ; CHECK-NEXT: mov w9, w0 @@ -43,7 +43,7 @@ define i16 @split_extract_16i16_idx(<vscale x 16 x i16> %a, i32 %idx) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #1 ; CHECK-NEXT: mov w9, w0 @@ -66,7 +66,7 @@ define i32 @split_extract_8i32_idx(<vscale x 8 x i32> %a, i32 %idx) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov w9, w0 @@ -89,7 +89,7 @@ define i64 @split_extract_8i64_idx(<vscale x 8 x i64> %a, i32 %idx) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov w9, w0 @@ -134,7 +134,7 @@ define i16 @split_extract_16i16(<vscale x 16 x i16> %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #1 ; CHECK-NEXT: mov w9, #128 // =0x80 @@ -157,7 +157,7 @@ define i32 @split_extract_16i32(<vscale x 16 x i32> %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #1 ; CHECK-NEXT: mov w9, #34464 // =0x86a0 @@ -183,7 +183,7 @@ define i64 @split_extract_4i64(<vscale x 4 x i64> %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cntw x8 ; CHECK-NEXT: mov w9, #10 // =0xa diff --git a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll index d7ed42d..4ed59bc 100644 --- a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll +++ b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll @@ -21,7 +21,7 @@ define <vscale x 32 x i8> @split_insert_32i8_idx(<vscale x 32 x i8> %a, i8 %elt, ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #2 ; CHECK-NEXT: mov x9, sp @@ -45,7 +45,7 @@ define <vscale x 8 x float> @split_insert_8f32_idx(<vscale x 8 x float> %a, floa ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov x9, sp @@ -69,7 +69,7 @@ define <vscale x 8 x i64> @split_insert_8i64_idx(<vscale x 8 x i64> %a, i64 %elt ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov x9, sp @@ -130,7 +130,7 @@ define <vscale x 32 x i16> @split_insert_32i16(<vscale x 32 x i16> %a, i16 %elt) ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #2 ; CHECK-NEXT: mov w9, #128 // =0x80 @@ -159,7 +159,7 @@ define <vscale x 8 x i32> @split_insert_8i32(<vscale x 8 x i32> %a, i32 %elt) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov w9, #16960 // =0x4240 diff --git a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll index c5cf459..e0da9b57 100644 --- a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll +++ b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll @@ -16,7 +16,7 @@ define i32 @csr_d8_allocnxv4i32i32f64(double %d) "aarch64_pstate_sm_compatible" ; CHECK-NEXT: str x29, [sp, #8] // 8-byte Folded Spill ; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -8 ; CHECK-NEXT: .cfi_offset b8, -16 ; CHECK-NEXT: mov z1.s, #0 // =0x0 @@ -219,7 +219,7 @@ define i32 @csr_d8_allocnxv4i32i32f64_stackargsi32f64(double %d0, double %d1, do ; CHECK-NEXT: str x29, [sp, #8] // 8-byte Folded Spill ; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -8 ; CHECK-NEXT: .cfi_offset b8, -16 ; CHECK-NEXT: mov z1.s, #0 // =0x0 @@ -266,7 +266,7 @@ define i32 @svecc_z8_allocnxv4i32i32f64_fp(double %d, <vscale x 4 x i32> %v) "aa ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK-NEXT: mov w0, wzr ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP @@ -310,7 +310,7 @@ define i32 @svecc_z8_allocnxv4i32i32f64_stackargsi32_fp(double %d, i32 %i0, i32 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK-NEXT: mov w0, wzr ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP @@ -383,7 +383,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK-NEXT: .cfi_offset w30, -40 ; CHECK-NEXT: .cfi_offset w29, -48 ; CHECK-NEXT: addvl sp, sp, #-18 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG ; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -412,14 +412,14 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48 ; CHECK-NEXT: mov x8, x0 ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll index ec0693a..c43e929 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll @@ -194,14 +194,14 @@ define <8 x half> @select_v8f16(<8 x half> %op1, <8 x half> %op2, <8 x i1> %mask define void @select_v16f16(ptr %a, ptr %b) { ; CHECK-LABEL: select_v16f16: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q3, [x1] +; CHECK-NEXT: ldp q3, q0, [x1] ; CHECK-NEXT: ptrue p0.h, vl8 -; CHECK-NEXT: ldp q1, q2, [x0] -; CHECK-NEXT: fcmeq p1.h, p0/z, z1.h, z0.h -; CHECK-NEXT: fcmeq p0.h, p0/z, z2.h, z3.h -; CHECK-NEXT: mov z0.h, p1/m, z1.h -; CHECK-NEXT: sel z1.h, p0, z2.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ldp q2, q1, [x0] +; CHECK-NEXT: mov x8, #8 // =0x8 +; CHECK-NEXT: fcmne p1.h, p0/z, z1.h, z0.h +; CHECK-NEXT: fcmne p0.h, p0/z, z2.h, z3.h +; CHECK-NEXT: st1h { z0.h }, p1, [x0, x8, lsl #1] +; CHECK-NEXT: st1h { z3.h }, p0, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: select_v16f16: @@ -429,14 +429,14 @@ define <4 x float> @select_v4f32(<4 x float> %op1, <4 x float> %op2, <4 x i1> %m define void @select_v8f32(ptr %a, ptr %b) { ; CHECK-LABEL: select_v8f32: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q3, [x1] +; CHECK-NEXT: ldp q3, q0, [x1] ; CHECK-NEXT: ptrue p0.s, vl4 -; CHECK-NEXT: ldp q1, q2, [x0] -; CHECK-NEXT: fcmeq p1.s, p0/z, z1.s, z0.s -; CHECK-NEXT: fcmeq p0.s, p0/z, z2.s, z3.s -; CHECK-NEXT: mov z0.s, p1/m, z1.s -; CHECK-NEXT: sel z1.s, p0, z2.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ldp q2, q1, [x0] +; CHECK-NEXT: mov x8, #4 // =0x4 +; CHECK-NEXT: fcmne p1.s, p0/z, z1.s, z0.s +; CHECK-NEXT: fcmne p0.s, p0/z, z2.s, z3.s +; CHECK-NEXT: st1w { z0.s }, p1, [x0, x8, lsl #2] +; CHECK-NEXT: st1w { z3.s }, p0, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: select_v8f32: @@ -553,14 +553,14 @@ define <2 x double> @select_v2f64(<2 x double> %op1, <2 x double> %op2, <2 x i1> define void @select_v4f64(ptr %a, ptr %b) { ; CHECK-LABEL: select_v4f64: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q3, [x1] +; CHECK-NEXT: ldp q3, q0, [x1] ; CHECK-NEXT: ptrue p0.d, vl2 -; CHECK-NEXT: ldp q1, q2, [x0] -; CHECK-NEXT: fcmeq p1.d, p0/z, z1.d, z0.d -; CHECK-NEXT: fcmeq p0.d, p0/z, z2.d, z3.d -; CHECK-NEXT: mov z0.d, p1/m, z1.d -; CHECK-NEXT: sel z1.d, p0, z2.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ldp q2, q1, [x0] +; CHECK-NEXT: mov x8, #2 // =0x2 +; CHECK-NEXT: fcmne p1.d, p0/z, z1.d, z0.d +; CHECK-NEXT: fcmne p0.d, p0/z, z2.d, z3.d +; CHECK-NEXT: st1d { z0.d }, p1, [x0, x8, lsl #3] +; CHECK-NEXT: st1d { z3.d }, p0, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: select_v4f64: diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll index 3970113..3787b23 100644 --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll @@ -288,14 +288,14 @@ define <16 x i8> @select_v16i8(<16 x i8> %op1, <16 x i8> %op2, <16 x i1> %mask) define void @select_v32i8(ptr %a, ptr %b) { ; CHECK-LABEL: select_v32i8: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q3, [x1] +; CHECK-NEXT: ldp q3, q0, [x1] ; CHECK-NEXT: ptrue p0.b, vl16 -; CHECK-NEXT: ldp q1, q2, [x0] -; CHECK-NEXT: cmpeq p1.b, p0/z, z1.b, z0.b -; CHECK-NEXT: cmpeq p0.b, p0/z, z2.b, z3.b -; CHECK-NEXT: mov z0.b, p1/m, z1.b -; CHECK-NEXT: sel z1.b, p0, z2.b, z3.b -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ldp q2, q1, [x0] +; CHECK-NEXT: mov w8, #16 // =0x10 +; CHECK-NEXT: cmpne p1.b, p0/z, z1.b, z0.b +; CHECK-NEXT: cmpne p0.b, p0/z, z2.b, z3.b +; CHECK-NEXT: st1b { z0.b }, p1, [x0, x8] +; CHECK-NEXT: st1b { z3.b }, p0, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: select_v32i8: @@ -692,14 +692,14 @@ define <8 x i16> @select_v8i16(<8 x i16> %op1, <8 x i16> %op2, <8 x i1> %mask) { define void @select_v16i16(ptr %a, ptr %b) { ; CHECK-LABEL: select_v16i16: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q3, [x1] +; CHECK-NEXT: ldp q3, q0, [x1] ; CHECK-NEXT: ptrue p0.h, vl8 -; CHECK-NEXT: ldp q1, q2, [x0] -; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z0.h -; CHECK-NEXT: cmpeq p0.h, p0/z, z2.h, z3.h -; CHECK-NEXT: mov z0.h, p1/m, z1.h -; CHECK-NEXT: sel z1.h, p0, z2.h, z3.h -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ldp q2, q1, [x0] +; CHECK-NEXT: mov x8, #8 // =0x8 +; CHECK-NEXT: cmpne p1.h, p0/z, z1.h, z0.h +; CHECK-NEXT: cmpne p0.h, p0/z, z2.h, z3.h +; CHECK-NEXT: st1h { z0.h }, p1, [x0, x8, lsl #1] +; CHECK-NEXT: st1h { z3.h }, p0, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: select_v16i16: @@ -906,14 +906,14 @@ define <4 x i32> @select_v4i32(<4 x i32> %op1, <4 x i32> %op2, <4 x i1> %mask) { define void @select_v8i32(ptr %a, ptr %b) { ; CHECK-LABEL: select_v8i32: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q3, [x1] +; CHECK-NEXT: ldp q3, q0, [x1] ; CHECK-NEXT: ptrue p0.s, vl4 -; CHECK-NEXT: ldp q1, q2, [x0] -; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z0.s -; CHECK-NEXT: cmpeq p0.s, p0/z, z2.s, z3.s -; CHECK-NEXT: mov z0.s, p1/m, z1.s -; CHECK-NEXT: sel z1.s, p0, z2.s, z3.s -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ldp q2, q1, [x0] +; CHECK-NEXT: mov x8, #4 // =0x4 +; CHECK-NEXT: cmpne p1.s, p0/z, z1.s, z0.s +; CHECK-NEXT: cmpne p0.s, p0/z, z2.s, z3.s +; CHECK-NEXT: st1w { z0.s }, p1, [x0, x8, lsl #2] +; CHECK-NEXT: st1w { z3.s }, p0, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: select_v8i32: @@ -1039,14 +1039,14 @@ define <2 x i64> @select_v2i64(<2 x i64> %op1, <2 x i64> %op2, <2 x i1> %mask) { define void @select_v4i64(ptr %a, ptr %b) { ; CHECK-LABEL: select_v4i64: ; CHECK: // %bb.0: -; CHECK-NEXT: ldp q0, q3, [x1] +; CHECK-NEXT: ldp q3, q0, [x1] ; CHECK-NEXT: ptrue p0.d, vl2 -; CHECK-NEXT: ldp q1, q2, [x0] -; CHECK-NEXT: cmpeq p1.d, p0/z, z1.d, z0.d -; CHECK-NEXT: cmpeq p0.d, p0/z, z2.d, z3.d -; CHECK-NEXT: mov z0.d, p1/m, z1.d -; CHECK-NEXT: sel z1.d, p0, z2.d, z3.d -; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ldp q2, q1, [x0] +; CHECK-NEXT: mov x8, #2 // =0x2 +; CHECK-NEXT: cmpne p1.d, p0/z, z1.d, z0.d +; CHECK-NEXT: cmpne p0.d, p0/z, z2.d, z3.d +; CHECK-NEXT: st1d { z0.d }, p1, [x0, x8, lsl #3] +; CHECK-NEXT: st1d { z3.d }, p0, [x0] ; CHECK-NEXT: ret ; ; NONEON-NOSVE-LABEL: select_v4i64: diff --git a/llvm/test/CodeGen/AArch64/sve-trunc.ll b/llvm/test/CodeGen/AArch64/sve-trunc.ll index 0ec6538..50580cb 100644 --- a/llvm/test/CodeGen/AArch64/sve-trunc.ll +++ b/llvm/test/CodeGen/AArch64/sve-trunc.ll @@ -115,7 +115,7 @@ define <vscale x 16 x i1> @trunc_i64toi1_split3(<vscale x 16 x i64> %in) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: and z7.d, z7.d, #0x1 ; CHECK-NEXT: and z6.d, z6.d, #0x1 diff --git a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll index 8a504cd..198e0a3 100644 --- a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll +++ b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll @@ -105,7 +105,7 @@ define <vscale x 8 x i32> @test_compress_large(<vscale x 8 x i32> %vec, <vscale ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: cnth x9 diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll index 0eacac2..1dbd7dd 100644 --- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll +++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll @@ -276,7 +276,7 @@ define <vscale x 16 x i8> @ld1_x2_i8_z0_taken(target("aarch64.svcount") %pn, ptr ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ld1b { z2.b, z3.b }, pn8/z, [x0] @@ -298,7 +298,7 @@ define <vscale x 16 x i8> @ld1_x2_i8_z0_taken_scalar(target("aarch64.svcount") % ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ld1b { z2.b, z3.b }, pn8/z, [x0, x1] @@ -585,7 +585,7 @@ define <vscale x 8 x i16> @ld1_x4_i16_z0_taken(target("aarch64.svcount") %pn, pt ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ld1h { z4.h - z7.h }, pn8/z, [x0] @@ -607,7 +607,7 @@ define <vscale x 8 x i16> @ld1_x4_i16_z0_taken_scalar(target("aarch64.svcount") ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ld1h { z4.h - z7.h }, pn8/z, [x0, x1, lsl #1] @@ -896,7 +896,7 @@ define <vscale x 4 x i32> @ldnt1_x2_i32_z0_taken(target("aarch64.svcount") %pn, ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ldnt1w { z2.s, z3.s }, pn8/z, [x0] @@ -918,7 +918,7 @@ define <vscale x 4 x i32> @ldnt1_x2_i32_z0_taken_scalar(target("aarch64.svcount" ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ldnt1w { z2.s, z3.s }, pn8/z, [x0, x1, lsl #2] @@ -1205,7 +1205,7 @@ define <vscale x 2 x i64> @ldnt1_x4_i64_z0_taken(target("aarch64.svcount") %pn, ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ldnt1d { z4.d - z7.d }, pn8/z, [x0] @@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @ldnt1_x4_i64_z0_taken_scalar(target("aarch64.svcount" ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ldnt1d { z4.d - z7.d }, pn8/z, [x0, x1, lsl #3] diff --git a/llvm/test/CodeGen/AArch64/unwind-preserved.ll b/llvm/test/CodeGen/AArch64/unwind-preserved.ll index 822be14..7e1f63d 100644 --- a/llvm/test/CodeGen/AArch64/unwind-preserved.ll +++ b/llvm/test/CodeGen/AArch64/unwind-preserved.ll @@ -13,7 +13,7 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-18 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -42,27 +42,27 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 16 + 160 * VG ; CHECK-NEXT: .cfi_remember_state ; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: .Ltmp0: +; CHECK-NEXT: .Ltmp0: // EH_LABEL ; CHECK-NEXT: bl may_throw_sve -; CHECK-NEXT: .Ltmp1: +; CHECK-NEXT: .Ltmp1: // EH_LABEL ; CHECK-NEXT: str z0, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: b .LBB0_1 ; CHECK-NEXT: .LBB0_1: // %.Lcontinue ; CHECK-NEXT: ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -108,10 +108,10 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB0_2: // %.Lunwind ; CHECK-NEXT: .cfi_restore_state -; CHECK-NEXT: .Ltmp2: +; CHECK-NEXT: .Ltmp2: // EH_LABEL ; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -165,7 +165,7 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; GISEL-NEXT: .cfi_offset w30, -8 ; GISEL-NEXT: .cfi_offset w29, -16 ; GISEL-NEXT: addvl sp, sp, #-18 -; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; GISEL-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; GISEL-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; GISEL-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -194,27 +194,27 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; GISEL-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; GISEL-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; GISEL-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; GISEL-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; GISEL-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; GISEL-NEXT: addvl sp, sp, #-2 -; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG +; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 16 + 160 * VG ; GISEL-NEXT: .cfi_remember_state ; GISEL-NEXT: str z0, [sp] // 16-byte Folded Spill -; GISEL-NEXT: .Ltmp0: +; GISEL-NEXT: .Ltmp0: // EH_LABEL ; GISEL-NEXT: bl may_throw_sve -; GISEL-NEXT: .Ltmp1: +; GISEL-NEXT: .Ltmp1: // EH_LABEL ; GISEL-NEXT: str z0, [sp, #1, mul vl] // 16-byte Folded Spill ; GISEL-NEXT: b .LBB0_1 ; GISEL-NEXT: .LBB0_1: // %.Lcontinue ; GISEL-NEXT: ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: addvl sp, sp, #2 -; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; GISEL-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -260,10 +260,10 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; GISEL-NEXT: ret ; GISEL-NEXT: .LBB0_2: // %.Lunwind ; GISEL-NEXT: .cfi_restore_state -; GISEL-NEXT: .Ltmp2: +; GISEL-NEXT: .Ltmp2: // EH_LABEL ; GISEL-NEXT: ldr z0, [sp] // 16-byte Folded Reload ; GISEL-NEXT: addvl sp, sp, #2 -; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; GISEL-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -355,9 +355,9 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v) ; CHECK-NEXT: .cfi_offset b23, -272 ; CHECK-NEXT: .cfi_remember_state ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: .Ltmp3: +; CHECK-NEXT: .Ltmp3: // EH_LABEL ; CHECK-NEXT: bl may_throw_neon -; CHECK-NEXT: .Ltmp4: +; CHECK-NEXT: .Ltmp4: // EH_LABEL ; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: b .LBB1_1 ; CHECK-NEXT: .LBB1_1: // %.Lcontinue @@ -394,7 +394,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v) ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB1_2: // %.Lunwind ; CHECK-NEXT: .cfi_restore_state -; CHECK-NEXT: .Ltmp5: +; CHECK-NEXT: .Ltmp5: // EH_LABEL ; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-NEXT: ldp x29, x30, [sp, #288] // 16-byte Folded Reload ; CHECK-NEXT: ldp q9, q8, [sp, #256] // 32-byte Folded Reload @@ -462,10 +462,10 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v) ; GISEL-NEXT: .cfi_offset b23, -272 ; GISEL-NEXT: .cfi_remember_state ; GISEL-NEXT: str q0, [sp] // 16-byte Folded Spill -; GISEL-NEXT: .Ltmp3: +; GISEL-NEXT: .Ltmp3: // EH_LABEL ; GISEL-NEXT: bl may_throw_neon ; GISEL-NEXT: str q0, [sp, #16] // 16-byte Folded Spill -; GISEL-NEXT: .Ltmp4: +; GISEL-NEXT: .Ltmp4: // EH_LABEL ; GISEL-NEXT: b .LBB1_1 ; GISEL-NEXT: .LBB1_1: // %.Lcontinue ; GISEL-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload @@ -501,7 +501,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v) ; GISEL-NEXT: ret ; GISEL-NEXT: .LBB1_2: // %.Lunwind ; GISEL-NEXT: .cfi_restore_state -; GISEL-NEXT: .Ltmp5: +; GISEL-NEXT: .Ltmp5: // EH_LABEL ; GISEL-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; GISEL-NEXT: ldp x29, x30, [sp, #288] // 16-byte Folded Reload ; GISEL-NEXT: ldp q9, q8, [sp, #256] // 32-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/xray-custom-log.ll b/llvm/test/CodeGen/AArch64/xray-custom-log.ll index fd8ddf9..2432808 100644 --- a/llvm/test/CodeGen/AArch64/xray-custom-log.ll +++ b/llvm/test/CodeGen/AArch64/xray-custom-log.ll @@ -1,7 +1,5 @@ ; RUN: llc -mtriple=aarch64 < %s | FileCheck %s ; RUN: llc -mtriple=arm64-apple-darwin < %s | FileCheck %s --check-prefix=MACHO -; RUN: llc -filetype=obj -mtriple=aarch64 %s -o %t -; RUN: llvm-dwarfdump -debug-info %t | FileCheck %s --check-prefix=DBG ; MACHO: bl ___xray_CustomEvent ; MACHO: bl ___xray_CustomEvent @@ -92,18 +90,6 @@ entry: ; CHECK-NEXT: .byte 0x02 ; CHECK-NEXT: .zero 13 -;; Construct call site entries for PATCHABLE_EVENT_CALL. -; DBG: DW_TAG_subprogram -; DBG: DW_AT_name -; DBG-SAME: ("customevent") -; DBG: DW_TAG_call_site -; DBG-NEXT: DW_AT_call_target (DW_OP_reg0 {{.*}}) -; DBG-NEXT: DW_AT_call_return_pc -; DBG-EMPTY: -; DBG: DW_TAG_call_site -; DBG-NEXT: DW_AT_call_target (DW_OP_reg2 {{.*}}) -; DBG-NEXT: DW_AT_call_return_pc - declare void @llvm.xray.customevent(ptr, i64) declare void @llvm.xray.typedevent(i64, ptr, i64) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll index a066b15..e6a8bac 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll @@ -1917,8 +1917,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX9-NEXT: s_mov_b32 s0, 0 ; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:4 ; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_movk_i32 s0, 0x3e80 ; GFX9-NEXT: v_mov_b32_e32 v0, 15 -; GFX9-NEXT: s_movk_i32 s0, 0x3e84 +; GFX9-NEXT: s_add_i32 s0, s0, 4 ; GFX9-NEXT: scratch_store_dword off, v0, s0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: scratch_load_dword v0, off, s0 glc @@ -1933,7 +1934,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s9 ; GFX10-NEXT: v_mov_b32_e32 v0, 13 ; GFX10-NEXT: v_mov_b32_e32 v1, 15 -; GFX10-NEXT: s_movk_i32 s0, 0x3e84 +; GFX10-NEXT: s_movk_i32 s0, 0x3e80 +; GFX10-NEXT: s_add_i32 s0, s0, 4 ; GFX10-NEXT: scratch_store_dword off, v0, off offset:4 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: scratch_store_dword off, v1, s0 @@ -1945,10 +1947,11 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX942-LABEL: store_load_large_imm_offset_kernel: ; GFX942: ; %bb.0: ; %bb ; GFX942-NEXT: v_mov_b32_e32 v0, 13 +; GFX942-NEXT: s_movk_i32 s0, 0x3e80 ; GFX942-NEXT: scratch_store_dword off, v0, off offset:4 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v0, 15 -; GFX942-NEXT: s_movk_i32 s0, 0x3e84 +; GFX942-NEXT: s_add_i32 s0, s0, 4 ; GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1 @@ -1958,7 +1961,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX11-LABEL: store_load_large_imm_offset_kernel: ; GFX11: ; %bb.0: ; %bb ; GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15 -; GFX11-NEXT: s_movk_i32 s0, 0x3e84 +; GFX11-NEXT: s_movk_i32 s0, 0x3e80 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_add_i32 s0, s0, 4 ; GFX11-NEXT: scratch_store_b32 off, v0, off offset:4 dlc ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc @@ -1986,8 +1991,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; UNALIGNED_GFX9-NEXT: s_mov_b32 s0, 0 ; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s0 offset:4 ; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) +; UNALIGNED_GFX9-NEXT: s_movk_i32 s0, 0x3e80 ; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v0, 15 -; UNALIGNED_GFX9-NEXT: s_movk_i32 s0, 0x3e84 +; UNALIGNED_GFX9-NEXT: s_add_i32 s0, s0, 4 ; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s0 ; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX9-NEXT: scratch_load_dword v0, off, s0 glc @@ -2002,7 +2008,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; UNALIGNED_GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s9 ; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v0, 13 ; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 15 -; UNALIGNED_GFX10-NEXT: s_movk_i32 s0, 0x3e84 +; UNALIGNED_GFX10-NEXT: s_movk_i32 s0, 0x3e80 +; UNALIGNED_GFX10-NEXT: s_add_i32 s0, s0, 4 ; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v0, off offset:4 ; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v1, s0 @@ -2014,10 +2021,11 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; UNALIGNED_GFX942-LABEL: store_load_large_imm_offset_kernel: ; UNALIGNED_GFX942: ; %bb.0: ; %bb ; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 13 +; UNALIGNED_GFX942-NEXT: s_movk_i32 s0, 0x3e80 ; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, off offset:4 sc0 sc1 ; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 15 -; UNALIGNED_GFX942-NEXT: s_movk_i32 s0, 0x3e84 +; UNALIGNED_GFX942-NEXT: s_add_i32 s0, s0, 4 ; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1 ; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1 @@ -2027,7 +2035,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; UNALIGNED_GFX11-LABEL: store_load_large_imm_offset_kernel: ; UNALIGNED_GFX11: ; %bb.0: ; %bb ; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15 -; UNALIGNED_GFX11-NEXT: s_movk_i32 s0, 0x3e84 +; UNALIGNED_GFX11-NEXT: s_movk_i32 s0, 0x3e80 +; UNALIGNED_GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; UNALIGNED_GFX11-NEXT: s_add_i32 s0, s0, 4 ; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v0, off offset:4 dlc ; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc @@ -2061,11 +2071,13 @@ define void @store_load_large_imm_offset_foo() { ; GFX9-LABEL: store_load_large_imm_offset_foo: ; GFX9: ; %bb.0: ; %bb ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_movk_i32 s0, 0x3e80 ; GFX9-NEXT: v_mov_b32_e32 v0, 13 +; GFX9-NEXT: s_add_i32 s1, s32, s0 ; GFX9-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, 15 -; GFX9-NEXT: s_add_i32 s0, s32, 0x3e84 +; GFX9-NEXT: s_add_i32 s0, s1, 4 ; GFX9-NEXT: scratch_store_dword off, v0, s0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: scratch_load_dword v0, off, s0 glc @@ -2076,8 +2088,10 @@ define void @store_load_large_imm_offset_foo() { ; GFX10: ; %bb.0: ; %bb ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v0, 13 +; GFX10-NEXT: s_movk_i32 s0, 0x3e80 ; GFX10-NEXT: v_mov_b32_e32 v1, 15 -; GFX10-NEXT: s_add_i32 s0, s32, 0x3e84 +; GFX10-NEXT: s_add_i32 s1, s32, s0 +; GFX10-NEXT: s_add_i32 s0, s1, 4 ; GFX10-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: scratch_store_dword off, v1, s0 @@ -2089,11 +2103,13 @@ define void @store_load_large_imm_offset_foo() { ; GFX942-LABEL: store_load_large_imm_offset_foo: ; GFX942: ; %bb.0: ; %bb ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: s_movk_i32 s0, 0x3e80 ; GFX942-NEXT: v_mov_b32_e32 v0, 13 +; GFX942-NEXT: s_add_i32 s1, s32, s0 ; GFX942-NEXT: scratch_store_dword off, v0, s32 offset:4 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v0, 15 -; GFX942-NEXT: s_add_i32 s0, s32, 0x3e84 +; GFX942-NEXT: s_add_i32 s0, s1, 4 ; GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1 @@ -2104,7 +2120,10 @@ define void @store_load_large_imm_offset_foo() { ; GFX11: ; %bb.0: ; %bb ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15 -; GFX11-NEXT: s_add_i32 s0, s32, 0x3e84 +; GFX11-NEXT: s_movk_i32 s0, 0x3e80 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_add_i32 s1, s32, s0 +; GFX11-NEXT: s_add_i32 s0, s1, 4 ; GFX11-NEXT: scratch_store_b32 off, v0, s32 offset:4 dlc ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc @@ -2133,11 +2152,13 @@ define void @store_load_large_imm_offset_foo() { ; UNALIGNED_GFX9-LABEL: store_load_large_imm_offset_foo: ; UNALIGNED_GFX9: ; %bb.0: ; %bb ; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; UNALIGNED_GFX9-NEXT: s_movk_i32 s0, 0x3e80 ; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v0, 13 +; UNALIGNED_GFX9-NEXT: s_add_i32 s1, s32, s0 ; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s32 offset:4 ; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v0, 15 -; UNALIGNED_GFX9-NEXT: s_add_i32 s0, s32, 0x3e84 +; UNALIGNED_GFX9-NEXT: s_add_i32 s0, s1, 4 ; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s0 ; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX9-NEXT: scratch_load_dword v0, off, s0 glc @@ -2148,8 +2169,10 @@ define void @store_load_large_imm_offset_foo() { ; UNALIGNED_GFX10: ; %bb.0: ; %bb ; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v0, 13 +; UNALIGNED_GFX10-NEXT: s_movk_i32 s0, 0x3e80 ; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 15 -; UNALIGNED_GFX10-NEXT: s_add_i32 s0, s32, 0x3e84 +; UNALIGNED_GFX10-NEXT: s_add_i32 s1, s32, s0 +; UNALIGNED_GFX10-NEXT: s_add_i32 s0, s1, 4 ; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v0, s32 offset:4 ; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v1, s0 @@ -2161,11 +2184,13 @@ define void @store_load_large_imm_offset_foo() { ; UNALIGNED_GFX942-LABEL: store_load_large_imm_offset_foo: ; UNALIGNED_GFX942: ; %bb.0: ; %bb ; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; UNALIGNED_GFX942-NEXT: s_movk_i32 s0, 0x3e80 ; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 13 +; UNALIGNED_GFX942-NEXT: s_add_i32 s1, s32, s0 ; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, s32 offset:4 sc0 sc1 ; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 15 -; UNALIGNED_GFX942-NEXT: s_add_i32 s0, s32, 0x3e84 +; UNALIGNED_GFX942-NEXT: s_add_i32 s0, s1, 4 ; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1 ; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1 @@ -2176,7 +2201,10 @@ define void @store_load_large_imm_offset_foo() { ; UNALIGNED_GFX11: ; %bb.0: ; %bb ; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15 -; UNALIGNED_GFX11-NEXT: s_add_i32 s0, s32, 0x3e84 +; UNALIGNED_GFX11-NEXT: s_movk_i32 s0, 0x3e80 +; UNALIGNED_GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; UNALIGNED_GFX11-NEXT: s_add_i32 s1, s32, s0 +; UNALIGNED_GFX11-NEXT: s_add_i32 s0, s1, 4 ; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v0, s32 offset:4 dlc ; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll index 7dce9ac..4c40009 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll @@ -997,8 +997,8 @@ define <33 x i32> @v33i32_func_v33i32_i32(ptr addrspace(1) %p, i32 %idx) #0 { ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY3]](s32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 256 - ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[MV]], [[MUL]](s64) + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = nsw G_MUL [[SEXT]], [[C]] + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p1) = nusw inbounds G_PTR_ADD [[MV]], [[MUL]](s64) ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(p1) = COPY [[PTR_ADD]](p1) ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<33 x s32>) = G_LOAD [[COPY4]](p1) :: (load (<33 x s32>) from %ir.gep, align 256, addrspace 1) ; CHECK-NEXT: G_STORE [[LOAD]](<33 x s32>), [[COPY]](p5) :: (store (<33 x s32>), align 256, addrspace 5) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir index 6a4522f..d69a3e1 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir @@ -141,11 +141,11 @@ body: | ; SIVI-NEXT: {{ $}} ; SIVI-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5 ; SIVI-NEXT: [[COPY1:%[0-9]+]]:_(p5) = COPY $vgpr0 + ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p5) ; SIVI-NEXT: [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY]](p4) ; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 68 ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY2]], [[C]](s64) ; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), addrspace 4) - ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p5) ; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32) ; SIVI-NEXT: [[C1:%[0-9]+]]:_(p5) = G_CONSTANT i32 -1 ; SIVI-NEXT: [[C2:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 @@ -157,9 +157,9 @@ body: | ; GFX9: liveins: $vgpr0 ; GFX9-NEXT: {{ $}} ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0 + ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p5) ; GFX9-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_private_base ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[S_MOV_B64_]](s64) - ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p5) ; GFX9-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[UV1]](s32) ; GFX9-NEXT: [[C:%[0-9]+]]:_(p5) = G_CONSTANT i32 -1 ; GFX9-NEXT: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 @@ -210,11 +210,11 @@ body: | ; SIVI-NEXT: {{ $}} ; SIVI-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5 ; SIVI-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr0 + ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p3) ; SIVI-NEXT: [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY]](p4) ; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY2]], [[C]](s64) ; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), align 64, addrspace 4) - ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p3) ; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32) ; SIVI-NEXT: [[C1:%[0-9]+]]:_(p3) = G_CONSTANT i32 -1 ; SIVI-NEXT: [[C2:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 @@ -226,9 +226,9 @@ body: | ; GFX9: liveins: $vgpr0 ; GFX9-NEXT: {{ $}} ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0 + ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p3) ; GFX9-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_shared_base ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[S_MOV_B64_]](s64) - ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p3) ; GFX9-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[UV1]](s32) ; GFX9-NEXT: [[C:%[0-9]+]]:_(p3) = G_CONSTANT i32 -1 ; GFX9-NEXT: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 @@ -354,20 +354,20 @@ body: | ; SIVI-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5 ; SIVI-NEXT: [[COPY1:%[0-9]+]]:_(<2 x p3>) = COPY $vgpr0_vgpr1 ; SIVI-NEXT: [[UV:%[0-9]+]]:_(p3), [[UV1:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[COPY1]](<2 x p3>) + ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3) ; SIVI-NEXT: [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY]](p4) ; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY2]], [[C]](s64) ; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), align 64, addrspace 4) - ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3) ; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32) ; SIVI-NEXT: [[C1:%[0-9]+]]:_(p3) = G_CONSTANT i32 -1 ; SIVI-NEXT: [[C2:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 ; SIVI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](p3), [[C1]] ; SIVI-NEXT: [[SELECT:%[0-9]+]]:_(p0) = G_SELECT [[ICMP]](s1), [[MV]], [[C2]] + ; SIVI-NEXT: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3) ; SIVI-NEXT: [[COPY3:%[0-9]+]]:_(p4) = COPY [[COPY]](p4) ; SIVI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY3]], [[C]](s64) ; SIVI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (dereferenceable invariant load (s32), align 64, addrspace 4) - ; SIVI-NEXT: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3) ; SIVI-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT1]](s32), [[LOAD1]](s32) ; SIVI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](p3), [[C1]] ; SIVI-NEXT: [[SELECT1:%[0-9]+]]:_(p0) = G_SELECT [[ICMP1]](s1), [[MV1]], [[C2]] @@ -379,17 +379,17 @@ body: | ; GFX9-NEXT: {{ $}} ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(<2 x p3>) = COPY $vgpr0_vgpr1 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(p3), [[UV1:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[COPY]](<2 x p3>) + ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3) ; GFX9-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_shared_base ; GFX9-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[S_MOV_B64_]](s64) - ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3) ; GFX9-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[UV3]](s32) ; GFX9-NEXT: [[C:%[0-9]+]]:_(p3) = G_CONSTANT i32 -1 ; GFX9-NEXT: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 ; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](p3), [[C]] ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(p0) = G_SELECT [[ICMP]](s1), [[MV]], [[C1]] + ; GFX9-NEXT: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3) ; GFX9-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_shared_base ; GFX9-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[S_MOV_B64_1]](s64) - ; GFX9-NEXT: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3) ; GFX9-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT1]](s32), [[UV5]](s32) ; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](p3), [[C]] ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(p0) = G_SELECT [[ICMP1]](s1), [[MV1]], [[C1]] @@ -506,19 +506,19 @@ body: | ; SIVI-NEXT: {{ $}} ; SIVI-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5 ; SIVI-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0 + ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[FRAME_INDEX]](p5) ; SIVI-NEXT: [[COPY1:%[0-9]+]]:_(p4) = COPY [[COPY]](p4) ; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 68 ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64) ; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), addrspace 4) - ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[FRAME_INDEX]](p5) ; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32) ; SIVI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p0) ; ; GFX9-LABEL: name: test_addrspacecast_p5_fi_to_p0 ; GFX9: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0 + ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[FRAME_INDEX]](p5) ; GFX9-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_private_base ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[S_MOV_B64_]](s64) - ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[FRAME_INDEX]](p5) ; GFX9-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[UV1]](s32) ; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p0) %0:_(p5) = G_FRAME_INDEX %stack.0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-unmerge-values.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-unmerge-values.mir index c231aa8..ee57b72 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-unmerge-values.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-unmerge-values.mir @@ -1090,3 +1090,24 @@ body: | $vgpr9_vgpr10_vgpr11 = COPY %8 ... + +--- +name: test_unmerge_through_copy +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: test_unmerge_through_copy + ; CHECK: liveins: $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]] + ; CHECK-NEXT: $vgpr0 = COPY [[AND]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s16), %2:_(s16) = G_UNMERGE_VALUES %0:_(s32) + %3:_(s16) = COPY %1:_(s16) + %4:_(s8), %5:_(s8) = G_UNMERGE_VALUES %3:_(s16) + %6:_(s32) = G_ZEXT %4:_(s8) + $vgpr0 = COPY %6:_(s32) +... diff --git a/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll new file mode 100644 index 0000000..4b6375c --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll @@ -0,0 +1,134 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s + +; Test code sequences for addrspacecast with globally addressable scratch. + +target triple = "amdgcn-amd-amdhsa" + +define amdgpu_kernel void @use_private_to_flat_addrspacecast(ptr addrspace(5) %ptr) { +; GFX1250-SDAG-LABEL: use_private_to_flat_addrspacecast: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x24 +; GFX1250-SDAG-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0 +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo +; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_lshlrev_b32 v1, 20, v0 +; GFX1250-SDAG-NEXT: s_cmp_lg_u32 s2, -1 +; GFX1250-SDAG-NEXT: s_cselect_b32 vcc_lo, -1, 0 +; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_cndmask_b32 v1, 0, v1 +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc_lo +; GFX1250-SDAG-NEXT: flat_store_b32 v[0:1], v2 scope:SCOPE_SYS +; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0 +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: use_private_to_flat_addrspacecast: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: s_load_b32 s2, s[4:5], 0x24 +; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo +; GFX1250-GISEL-NEXT: v_mbcnt_lo_u32_b32 v2, -1, 0 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s2, -1 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, s2, v0 +; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v2, 20, v2 +; GFX1250-GISEL-NEXT: s_cselect_b32 s0, 1, 0 +; GFX1250-GISEL-NEXT: s_and_b32 s0, 1, s0 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, v2, v1, vcc_lo +; GFX1250-GISEL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_cndmask_b32 v1, 0, v1 +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc_lo +; GFX1250-GISEL-NEXT: flat_store_b32 v[0:1], v2 scope:SCOPE_SYS +; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0 +; GFX1250-GISEL-NEXT: s_endpgm + %stof = addrspacecast ptr addrspace(5) %ptr to ptr + store volatile i32 0, ptr %stof + ret void +} + +define amdgpu_kernel void @use_private_to_flat_addrspacecast_nonnull(ptr addrspace(5) %ptr) { +; GFX1250-SDAG-LABEL: use_private_to_flat_addrspacecast_nonnull: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24 +; GFX1250-SDAG-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 20, v0 +; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, s0 +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] +; GFX1250-SDAG-NEXT: flat_store_b32 v[0:1], v2 scope:SCOPE_SYS +; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0 +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: use_private_to_flat_addrspacecast_nonnull: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: s_load_b32 s2, s[4:5], 0x24 +; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo +; GFX1250-GISEL-NEXT: v_mbcnt_lo_u32_b32 v2, -1, 0 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_lshlrev_b32 v2, 20, v2 +; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, s2, v0 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, v2, v1, vcc_lo +; GFX1250-GISEL-NEXT: flat_store_b32 v[0:1], v3 scope:SCOPE_SYS +; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0 +; GFX1250-GISEL-NEXT: s_endpgm + %stof = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) %ptr) + store volatile i32 0, ptr %stof + ret void +} + +define amdgpu_kernel void @use_flat_to_private_addrspacecast(ptr %ptr) { +; GFX1250-LABEL: use_flat_to_private_addrspacecast: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-NEXT: s_cmp_lg_u64 s[0:1], 0 +; GFX1250-NEXT: s_cselect_b32 s0, s2, -1 +; GFX1250-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_storecnt 0x0 +; GFX1250-NEXT: s_endpgm + %ftos = addrspacecast ptr %ptr to ptr addrspace(5) + store volatile i32 0, ptr addrspace(5) %ftos + ret void +} + +define amdgpu_kernel void @use_flat_to_private_addrspacecast_nonnull(ptr %ptr) { +; GFX1250-SDAG-LABEL: use_flat_to_private_addrspacecast_nonnull: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24 +; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX1250-SDAG-NEXT: s_sub_co_i32 s0, s0, s1 +; GFX1250-SDAG-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS +; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0 +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: use_flat_to_private_addrspacecast_nonnull: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s0, s1 +; GFX1250-GISEL-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS +; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0 +; GFX1250-GISEL-NEXT: s_endpgm + %ftos = call ptr addrspace(5) @llvm.amdgcn.addrspacecast.nonnull.p5.p0(ptr %ptr) + store volatile i32 0, ptr addrspace(5) %ftos + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll index 2ad7818..243f0ed 100644 --- a/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll +++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll @@ -25,8 +25,11 @@ define amdgpu_kernel void @uniform_conditional_max_short_forward_branch(ptr addr ; GCN-NEXT: s_load_b32 s0, s[4:5], 0x2c ; GCN-NEXT: s_wait_kmcnt 0x0 ; GCN-NEXT: s_cmp_eq_u32 s0, 0 -; GCN-NEXT: s_cbranch_scc1 .LBB0_2 -; GCN-NEXT: ; %bb.1: ; %bb2 +; GCN-NEXT: s_cbranch_scc0 .LBB0_1 +; GCN-NEXT: ; %bb.3: ; %bb +; GCN-NEXT: s_add_pc_i64 .LBB0_2-.Lpost_addpc0 +; GCN-NEXT: .Lpost_addpc0: +; GCN-NEXT: .LBB0_1: ; %bb2 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: v_nop_e64 ; GCN-NEXT: v_nop_e64 @@ -64,8 +67,8 @@ define amdgpu_kernel void @uniform_conditional_min_long_forward_branch(ptr addrs ; GCN-NEXT: s_cmp_eq_u32 s0, 0 ; GCN-NEXT: s_cbranch_scc0 .LBB1_1 ; GCN-NEXT: ; %bb.3: ; %bb0 -; GCN-NEXT: s_add_pc_i64 .LBB1_2-.Lpost_addpc0 -; GCN-NEXT: .Lpost_addpc0: +; GCN-NEXT: s_add_pc_i64 .LBB1_2-.Lpost_addpc1 +; GCN-NEXT: .Lpost_addpc1: ; GCN-NEXT: .LBB1_1: ; %bb2 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: v_nop_e64 @@ -106,8 +109,8 @@ define amdgpu_kernel void @uniform_conditional_min_long_forward_vcnd_branch(ptr ; GCN-NEXT: s_cmp_eq_f32 s0, 0 ; GCN-NEXT: s_cbranch_scc0 .LBB2_1 ; GCN-NEXT: ; %bb.3: ; %bb0 -; GCN-NEXT: s_add_pc_i64 .LBB2_2-.Lpost_addpc1 -; GCN-NEXT: .Lpost_addpc1: +; GCN-NEXT: s_add_pc_i64 .LBB2_2-.Lpost_addpc2 +; GCN-NEXT: .Lpost_addpc2: ; GCN-NEXT: .LBB2_1: ; %bb2 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: ; 32 bytes @@ -157,8 +160,8 @@ define amdgpu_kernel void @min_long_forward_vbranch(ptr addrspace(1) %arg) #0 { ; GCN-NEXT: v_cmpx_ne_u32_e32 0, v2 ; GCN-NEXT: s_cbranch_execnz .LBB3_1 ; GCN-NEXT: ; %bb.3: ; %bb -; GCN-NEXT: s_add_pc_i64 .LBB3_2-.Lpost_addpc2 -; GCN-NEXT: .Lpost_addpc2: +; GCN-NEXT: s_add_pc_i64 .LBB3_2-.Lpost_addpc3 +; GCN-NEXT: .Lpost_addpc3: ; GCN-NEXT: .LBB3_1: ; %bb2 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: ; 32 bytes @@ -209,8 +212,8 @@ define amdgpu_kernel void @long_backward_sbranch(ptr addrspace(1) %arg) #0 { ; GCN-NEXT: s_cbranch_scc0 .LBB4_2 ; GCN-NEXT: ; %bb.3: ; %bb2 ; GCN-NEXT: ; in Loop: Header=BB4_1 Depth=1 -; GCN-NEXT: s_add_pc_i64 .LBB4_1-.Lpost_addpc3 -; GCN-NEXT: .Lpost_addpc3: +; GCN-NEXT: s_add_pc_i64 .LBB4_1-.Lpost_addpc4 +; GCN-NEXT: .Lpost_addpc4: ; GCN-NEXT: .LBB4_2: ; %bb3 ; GCN-NEXT: s_endpgm bb: @@ -242,8 +245,8 @@ define amdgpu_kernel void @uniform_unconditional_min_long_forward_branch(ptr add ; GCN-NEXT: s_mov_b32 s0, -1 ; GCN-NEXT: s_cbranch_scc0 .LBB5_1 ; GCN-NEXT: ; %bb.7: ; %bb0 -; GCN-NEXT: s_add_pc_i64 .LBB5_4-.Lpost_addpc5 -; GCN-NEXT: .Lpost_addpc5: +; GCN-NEXT: s_add_pc_i64 .LBB5_4-.Lpost_addpc6 +; GCN-NEXT: .Lpost_addpc6: ; GCN-NEXT: .LBB5_1: ; %Flow ; GCN-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0 ; GCN-NEXT: s_cbranch_vccnz .LBB5_3 @@ -268,11 +271,11 @@ define amdgpu_kernel void @uniform_unconditional_min_long_forward_branch(ptr add ; GCN-NEXT: s_sleep 0 ; GCN-NEXT: s_cbranch_execnz .LBB5_5 ; GCN-NEXT: ; %bb.9: ; %bb3 -; GCN-NEXT: s_add_pc_i64 .LBB5_2-.Lpost_addpc6 -; GCN-NEXT: .Lpost_addpc6: +; GCN-NEXT: s_add_pc_i64 .LBB5_2-.Lpost_addpc7 +; GCN-NEXT: .Lpost_addpc7: ; GCN-NEXT: .LBB5_5: ; %bb3 -; GCN-NEXT: s_add_pc_i64 .LBB5_3-.Lpost_addpc4 -; GCN-NEXT: .Lpost_addpc4: +; GCN-NEXT: s_add_pc_i64 .LBB5_3-.Lpost_addpc5 +; GCN-NEXT: .Lpost_addpc5: bb0: %tmp = icmp ne i32 %arg1, 0 br i1 %tmp, label %bb2, label %bb3 @@ -310,8 +313,8 @@ define amdgpu_kernel void @uniform_unconditional_min_long_backward_branch(ptr ad ; GCN-NEXT: s_cbranch_vccz .LBB6_2 ; GCN-NEXT: ; %bb.3: ; %loop ; GCN-NEXT: ; in Loop: Header=BB6_1 Depth=1 -; GCN-NEXT: s_add_pc_i64 .LBB6_1-.Lpost_addpc7 -; GCN-NEXT: .Lpost_addpc7: +; GCN-NEXT: s_add_pc_i64 .LBB6_1-.Lpost_addpc8 +; GCN-NEXT: .Lpost_addpc8: ; GCN-NEXT: .LBB6_2: ; %DummyReturnBlock ; GCN-NEXT: s_endpgm entry: @@ -350,8 +353,8 @@ define amdgpu_kernel void @expand_requires_expand(i32 %cond0) #0 { ; GCN-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0 ; GCN-NEXT: s_cbranch_vccz .LBB7_3 ; GCN-NEXT: ; %bb.5: ; %Flow -; GCN-NEXT: s_add_pc_i64 .LBB7_4-.Lpost_addpc8 -; GCN-NEXT: .Lpost_addpc8: +; GCN-NEXT: s_add_pc_i64 .LBB7_4-.Lpost_addpc9 +; GCN-NEXT: .Lpost_addpc9: ; GCN-NEXT: .LBB7_3: ; %bb2 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: v_nop_e64 @@ -406,8 +409,8 @@ define amdgpu_kernel void @uniform_inside_divergent(ptr addrspace(1) %out, i32 % ; GCN-NEXT: v_cmpx_gt_u32_e32 16, v0 ; GCN-NEXT: s_cbranch_execnz .LBB8_1 ; GCN-NEXT: ; %bb.4: ; %entry -; GCN-NEXT: s_add_pc_i64 .LBB8_3-.Lpost_addpc9 -; GCN-NEXT: .Lpost_addpc9: +; GCN-NEXT: s_add_pc_i64 .LBB8_3-.Lpost_addpc10 +; GCN-NEXT: .Lpost_addpc10: ; GCN-NEXT: .LBB8_1: ; %if ; GCN-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 ; GCN-NEXT: v_mov_b32_e32 v0, 0 @@ -465,8 +468,8 @@ define amdgpu_kernel void @analyze_mask_branch() #0 { ; GCN-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GCN-NEXT: s_cbranch_execnz .LBB9_3 ; GCN-NEXT: ; %bb.6: ; %Flow1 -; GCN-NEXT: s_add_pc_i64 .LBB9_5-.Lpost_addpc10 -; GCN-NEXT: .Lpost_addpc10: +; GCN-NEXT: s_add_pc_i64 .LBB9_5-.Lpost_addpc11 +; GCN-NEXT: .Lpost_addpc11: ; GCN-NEXT: .LBB9_3: ; %loop.preheader ; GCN-NEXT: s_mov_b32 vcc_lo, 0 ; GCN-NEXT: .LBB9_4: ; %loop @@ -484,8 +487,8 @@ define amdgpu_kernel void @analyze_mask_branch() #0 { ; GCN-NEXT: s_cbranch_vccnz .LBB9_5 ; GCN-NEXT: ; %bb.8: ; %loop ; GCN-NEXT: ; in Loop: Header=BB9_4 Depth=1 -; GCN-NEXT: s_add_pc_i64 .LBB9_4-.Lpost_addpc11 -; GCN-NEXT: .Lpost_addpc11: +; GCN-NEXT: s_add_pc_i64 .LBB9_4-.Lpost_addpc12 +; GCN-NEXT: .Lpost_addpc12: ; GCN-NEXT: .LBB9_5: ; %UnifiedReturnBlock ; GCN-NEXT: s_endpgm entry: @@ -528,20 +531,20 @@ define amdgpu_kernel void @long_branch_hang(ptr addrspace(1) nocapture %arg, i32 ; GCN-NEXT: s_cmp_lt_i32 s3, 6 ; GCN-NEXT: s_cbranch_scc0 .LBB10_1 ; GCN-NEXT: ; %bb.10: ; %bb -; GCN-NEXT: s_add_pc_i64 .LBB10_4-.Lpost_addpc13 -; GCN-NEXT: .Lpost_addpc13: +; GCN-NEXT: s_add_pc_i64 .LBB10_4-.Lpost_addpc14 +; GCN-NEXT: .Lpost_addpc14: ; GCN-NEXT: .LBB10_1: ; %Flow ; GCN-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7 ; GCN-NEXT: s_cbranch_vccnz .LBB10_2 ; GCN-NEXT: ; %bb.12: ; %Flow -; GCN-NEXT: s_add_pc_i64 .LBB10_5-.Lpost_addpc14 -; GCN-NEXT: .Lpost_addpc14: +; GCN-NEXT: s_add_pc_i64 .LBB10_5-.Lpost_addpc15 +; GCN-NEXT: .Lpost_addpc15: ; GCN-NEXT: .LBB10_2: ; %Flow5 ; GCN-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0 ; GCN-NEXT: s_cbranch_vccz .LBB10_3 ; GCN-NEXT: ; %bb.14: ; %Flow5 -; GCN-NEXT: s_add_pc_i64 .LBB10_6-.Lpost_addpc15 -; GCN-NEXT: .Lpost_addpc15: +; GCN-NEXT: s_add_pc_i64 .LBB10_6-.Lpost_addpc16 +; GCN-NEXT: .Lpost_addpc16: ; GCN-NEXT: .LBB10_3: ; %bb14 ; GCN-NEXT: s_cmp_lt_i32 s1, 9 ; GCN-NEXT: s_cselect_b32 s0, -1, 0 @@ -553,8 +556,8 @@ define amdgpu_kernel void @long_branch_hang(ptr addrspace(1) nocapture %arg, i32 ; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 ; GCN-NEXT: ; %bb.8: ; %bb14 -; GCN-NEXT: s_add_pc_i64 .LBB10_7-.Lpost_addpc12 -; GCN-NEXT: .Lpost_addpc12: +; GCN-NEXT: s_add_pc_i64 .LBB10_7-.Lpost_addpc13 +; GCN-NEXT: .Lpost_addpc13: ; GCN-NEXT: .LBB10_4: ; %bb13 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: v_nop_e64 @@ -565,8 +568,8 @@ define amdgpu_kernel void @long_branch_hang(ptr addrspace(1) nocapture %arg, i32 ; GCN-NEXT: s_sleep 0 ; GCN-NEXT: s_cbranch_execz .LBB10_5 ; GCN-NEXT: ; %bb.16: ; %bb13 -; GCN-NEXT: s_add_pc_i64 .LBB10_2-.Lpost_addpc16 -; GCN-NEXT: .Lpost_addpc16: +; GCN-NEXT: s_add_pc_i64 .LBB10_2-.Lpost_addpc17 +; GCN-NEXT: .Lpost_addpc17: ; GCN-NEXT: .LBB10_5: ; %bb9 ; GCN-NEXT: s_cmp_lt_i32 s3, 11 ; GCN-NEXT: s_cselect_b32 s0, -1, 0 @@ -577,8 +580,8 @@ define amdgpu_kernel void @long_branch_hang(ptr addrspace(1) nocapture %arg, i32 ; GCN-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0 ; GCN-NEXT: s_cbranch_vccnz .LBB10_6 ; GCN-NEXT: ; %bb.18: ; %bb9 -; GCN-NEXT: s_add_pc_i64 .LBB10_3-.Lpost_addpc17 -; GCN-NEXT: .Lpost_addpc17: +; GCN-NEXT: s_add_pc_i64 .LBB10_3-.Lpost_addpc18 +; GCN-NEXT: .Lpost_addpc18: ; GCN-NEXT: .LBB10_6: ; GCN-NEXT: ; implicit-def: $vgpr0 ; GCN-NEXT: .LBB10_7: ; %bb19 diff --git a/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll b/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll index bdb52db..d1a1112 100644 --- a/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll +++ b/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll @@ -1,8 +1,33 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefix=GFX9 %s -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=GFX8 %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=GFX8,GFX8-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=GFX8,GFX8-GISEL %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9,GFX9-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9,GFX9-GISEL %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-FAKE16,GFX11-FAKE16-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-FAKE16,GFX11-FAKE16-GISEL %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-TRUE16,GFX11-TRUE16-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-TRUE16,GFX11-TRUE16-GISEL %s define void @undef_lo_v2i16(i16 %arg0) { +; GFX8-SDAG-LABEL: undef_lo_v2i16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v0 +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: undef_lo_v2i16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX8-GISEL-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v0 +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; ; GFX9-LABEL: undef_lo_v2i16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -12,20 +37,48 @@ define void @undef_lo_v2i16(i16 %arg0) { ; GFX9-NEXT: ;;#ASMEND ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_lo_v2i16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v0 -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX11-FAKE16-LABEL: undef_lo_v2i16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX11-FAKE16-NEXT: ;;#ASMSTART +; GFX11-FAKE16-NEXT: ; use v0 +; GFX11-FAKE16-NEXT: ;;#ASMEND +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: undef_lo_v2i16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v0.l +; GFX11-TRUE16-NEXT: ;;#ASMSTART +; GFX11-TRUE16-NEXT: ; use v0 +; GFX11-TRUE16-NEXT: ;;#ASMEND +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] %undef.lo = insertelement <2 x i16> poison, i16 %arg0, i32 1 call void asm sideeffect "; use $0", "v"(<2 x i16> %undef.lo); ret void } define void @undef_lo_v2f16(half %arg0) { +; GFX8-SDAG-LABEL: undef_lo_v2f16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v0 +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: undef_lo_v2f16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX8-GISEL-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v0 +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; ; GFX9-LABEL: undef_lo_v2f16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -35,20 +88,52 @@ define void @undef_lo_v2f16(half %arg0) { ; GFX9-NEXT: ;;#ASMEND ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_lo_v2f16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v0 -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX11-FAKE16-LABEL: undef_lo_v2f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX11-FAKE16-NEXT: ;;#ASMSTART +; GFX11-FAKE16-NEXT: ; use v0 +; GFX11-FAKE16-NEXT: ;;#ASMEND +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: undef_lo_v2f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v0.l +; GFX11-TRUE16-NEXT: ;;#ASMSTART +; GFX11-TRUE16-NEXT: ; use v0 +; GFX11-TRUE16-NEXT: ;;#ASMEND +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] %undef.lo = insertelement <2 x half> poison, half %arg0, i32 1 call void asm sideeffect "; use $0", "v"(<2 x half> %undef.lo); ret void } define void @undef_lo_op_v2f16(half %arg0) { +; GFX8-SDAG-LABEL: undef_lo_op_v2f16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: v_mov_b32_e32 v1, 0x3c00 +; GFX8-SDAG-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-SDAG-NEXT: v_or_b32_e32 v0, 0x7e00, v0 +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v0 +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: undef_lo_op_v2f16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_mov_b32_e32 v2, 0x3c00 +; GFX8-GISEL-NEXT: v_add_f16_e64 v1, s4, 1.0 +; GFX8-GISEL-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-GISEL-NEXT: v_or_b32_e32 v0, v1, v0 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v0 +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; ; GFX9-LABEL: undef_lo_op_v2f16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -59,16 +144,27 @@ define void @undef_lo_op_v2f16(half %arg0) { ; GFX9-NEXT: ;;#ASMEND ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_lo_op_v2f16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v1, 0x3c00 -; GFX8-NEXT: v_add_f16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, 0x7e00, v0 -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v0 -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX11-FAKE16-LABEL: undef_lo_op_v2f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, v0, 1.0 op_sel_hi:[1,0] +; GFX11-FAKE16-NEXT: ;;#ASMSTART +; GFX11-FAKE16-NEXT: ; use v0 +; GFX11-FAKE16-NEXT: ;;#ASMEND +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: undef_lo_op_v2f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v0.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, v0, 1.0 op_sel_hi:[1,0] +; GFX11-TRUE16-NEXT: ;;#ASMSTART +; GFX11-TRUE16-NEXT: ; use v0 +; GFX11-TRUE16-NEXT: ;;#ASMEND +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] %undef.lo = insertelement <2 x half> poison, half %arg0, i32 1 %op = fadd <2 x half> %undef.lo, <half 1.0, half 1.0> call void asm sideeffect "; use $0", "v"(<2 x half> %op); @@ -76,26 +172,93 @@ define void @undef_lo_op_v2f16(half %arg0) { } define void @undef_lo_op_v2i16(i16 %arg0) { -; GFX9-LABEL: undef_lo_op_v2i16: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX9-NEXT: s_movk_i32 s4, 0x63 -; GFX9-NEXT: v_pk_add_u16 v0, v0, s4 op_sel_hi:[1,0] -; GFX9-NEXT: ;;#ASMSTART -; GFX9-NEXT: ; use v0 -; GFX9-NEXT: ;;#ASMEND -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX8-SDAG-LABEL: undef_lo_op_v2i16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: v_mov_b32_e32 v1, 0x63 +; GFX8-SDAG-NEXT: v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v0 +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_lo_op_v2i16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v1, 0x63 -; GFX8-NEXT: v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v0 -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX8-GISEL-LABEL: undef_lo_op_v2i16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_mov_b32_e32 v1, 0x63 +; GFX8-GISEL-NEXT: s_and_b32 s4, 0xffff, s4 +; GFX8-GISEL-NEXT: v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-GISEL-NEXT: v_or_b32_e32 v0, s4, v0 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v0 +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-SDAG-LABEL: undef_lo_op_v2i16: +; GFX9-SDAG: ; %bb.0: +; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-SDAG-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX9-SDAG-NEXT: s_movk_i32 s4, 0x63 +; GFX9-SDAG-NEXT: v_pk_add_u16 v0, v0, s4 op_sel_hi:[1,0] +; GFX9-SDAG-NEXT: ;;#ASMSTART +; GFX9-SDAG-NEXT: ; use v0 +; GFX9-SDAG-NEXT: ;;#ASMEND +; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-GISEL-LABEL: undef_lo_op_v2i16: +; GFX9-GISEL: ; %bb.0: +; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0x630063 +; GFX9-GISEL-NEXT: v_pk_add_u16 v0, v0, v1 +; GFX9-GISEL-NEXT: ;;#ASMSTART +; GFX9-GISEL-NEXT: ; use v0 +; GFX9-GISEL-NEXT: ;;#ASMEND +; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-SDAG-LABEL: undef_lo_op_v2i16: +; GFX11-FAKE16-SDAG: ; %bb.0: +; GFX11-FAKE16-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-SDAG-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX11-FAKE16-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-SDAG-NEXT: v_pk_add_u16 v0, 0x63, v0 op_sel_hi:[0,1] +; GFX11-FAKE16-SDAG-NEXT: ;;#ASMSTART +; GFX11-FAKE16-SDAG-NEXT: ; use v0 +; GFX11-FAKE16-SDAG-NEXT: ;;#ASMEND +; GFX11-FAKE16-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-GISEL-LABEL: undef_lo_op_v2i16: +; GFX11-FAKE16-GISEL: ; %bb.0: +; GFX11-FAKE16-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-GISEL-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX11-FAKE16-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-GISEL-NEXT: v_pk_add_u16 v0, 0x630063, v0 +; GFX11-FAKE16-GISEL-NEXT: ;;#ASMSTART +; GFX11-FAKE16-GISEL-NEXT: ; use v0 +; GFX11-FAKE16-GISEL-NEXT: ;;#ASMEND +; GFX11-FAKE16-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-SDAG-LABEL: undef_lo_op_v2i16: +; GFX11-TRUE16-SDAG: ; %bb.0: +; GFX11-TRUE16-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-SDAG-NEXT: v_mov_b16_e32 v0.h, v0.l +; GFX11-TRUE16-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-SDAG-NEXT: v_pk_add_u16 v0, 0x63, v0 op_sel_hi:[0,1] +; GFX11-TRUE16-SDAG-NEXT: ;;#ASMSTART +; GFX11-TRUE16-SDAG-NEXT: ; use v0 +; GFX11-TRUE16-SDAG-NEXT: ;;#ASMEND +; GFX11-TRUE16-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-GISEL-LABEL: undef_lo_op_v2i16: +; GFX11-TRUE16-GISEL: ; %bb.0: +; GFX11-TRUE16-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-GISEL-NEXT: v_mov_b16_e32 v0.h, v0.l +; GFX11-TRUE16-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-GISEL-NEXT: v_pk_add_u16 v0, 0x630063, v0 +; GFX11-TRUE16-GISEL-NEXT: ;;#ASMSTART +; GFX11-TRUE16-GISEL-NEXT: ; use v0 +; GFX11-TRUE16-GISEL-NEXT: ;;#ASMEND +; GFX11-TRUE16-GISEL-NEXT: s_setpc_b64 s[30:31] %undef.lo = insertelement <2 x i16> poison, i16 %arg0, i32 1 %op = add <2 x i16> %undef.lo, <i16 99, i16 99> call void asm sideeffect "; use $0", "v"(<2 x i16> %op); @@ -103,6 +266,26 @@ define void @undef_lo_op_v2i16(i16 %arg0) { } define void @undef_lo3_v4i16(i16 %arg0) { +; GFX8-SDAG-LABEL: undef_lo3_v4i16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v[0:1] +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: undef_lo3_v4i16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX8-GISEL-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX8-GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v[0:1] +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; ; GFX9-LABEL: undef_lo3_v4i16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -112,20 +295,49 @@ define void @undef_lo3_v4i16(i16 %arg0) { ; GFX9-NEXT: ;;#ASMEND ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_lo3_v4i16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v[0:1] -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX11-FAKE16-LABEL: undef_lo3_v4i16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX11-FAKE16-NEXT: ;;#ASMSTART +; GFX11-FAKE16-NEXT: ; use v[0:1] +; GFX11-FAKE16-NEXT: ;;#ASMEND +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: undef_lo3_v4i16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v0.l +; GFX11-TRUE16-NEXT: ;;#ASMSTART +; GFX11-TRUE16-NEXT: ; use v[0:1] +; GFX11-TRUE16-NEXT: ;;#ASMEND +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] %undef.lo = insertelement <4 x i16> poison, i16 %arg0, i32 1 call void asm sideeffect "; use $0", "v"(<4 x i16> %undef.lo); ret void } define void @undef_lo3_v4f16(half %arg0) { +; GFX8-SDAG-LABEL: undef_lo3_v4f16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v[0:1] +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: undef_lo3_v4f16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX8-GISEL-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX8-GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v[0:1] +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; ; GFX9-LABEL: undef_lo3_v4f16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -135,20 +347,50 @@ define void @undef_lo3_v4f16(half %arg0) { ; GFX9-NEXT: ;;#ASMEND ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_lo3_v4f16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v[0:1] -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX11-FAKE16-LABEL: undef_lo3_v4f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX11-FAKE16-NEXT: ;;#ASMSTART +; GFX11-FAKE16-NEXT: ; use v[0:1] +; GFX11-FAKE16-NEXT: ;;#ASMEND +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: undef_lo3_v4f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v0.l +; GFX11-TRUE16-NEXT: ;;#ASMSTART +; GFX11-TRUE16-NEXT: ; use v[0:1] +; GFX11-TRUE16-NEXT: ;;#ASMEND +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] %undef.lo = insertelement <4 x half> poison, half %arg0, i32 1 call void asm sideeffect "; use $0", "v"(<4 x half> %undef.lo); ret void } define void @undef_lo2_v4i16(<2 x i16> %arg0) { +; GFX8-SDAG-LABEL: undef_lo2_v4i16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX8-SDAG-NEXT: v_alignbit_b32 v0, v1, v0, 16 +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v[0:1] +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: undef_lo2_v4i16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX8-GISEL-NEXT: v_alignbit_b32 v0, v1, v0, 16 +; GFX8-GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v[0:1] +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; ; GFX9-LABEL: undef_lo2_v4i16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -159,21 +401,62 @@ define void @undef_lo2_v4i16(<2 x i16> %arg0) { ; GFX9-NEXT: ;;#ASMEND ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_lo2_v4i16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v0 -; GFX8-NEXT: v_alignbit_b32 v0, v1, v0, 16 -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v[0:1] -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX11-FAKE16-LABEL: undef_lo2_v4i16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v0, v0, 0x7060302 +; GFX11-FAKE16-NEXT: ;;#ASMSTART +; GFX11-FAKE16-NEXT: ; use v[0:1] +; GFX11-FAKE16-NEXT: ;;#ASMEND +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-SDAG-LABEL: undef_lo2_v4i16: +; GFX11-TRUE16-SDAG: ; %bb.0: +; GFX11-TRUE16-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-SDAG-NEXT: v_mov_b16_e32 v1.l, v0.h +; GFX11-TRUE16-SDAG-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX11-TRUE16-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-SDAG-NEXT: v_lshl_or_b32 v0, v1, 16, v0 +; GFX11-TRUE16-SDAG-NEXT: ;;#ASMSTART +; GFX11-TRUE16-SDAG-NEXT: ; use v[0:1] +; GFX11-TRUE16-SDAG-NEXT: ;;#ASMEND +; GFX11-TRUE16-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-GISEL-LABEL: undef_lo2_v4i16: +; GFX11-TRUE16-GISEL: ; %bb.0: +; GFX11-TRUE16-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-GISEL-NEXT: v_mov_b16_e32 v0.l, v0.h +; GFX11-TRUE16-GISEL-NEXT: ;;#ASMSTART +; GFX11-TRUE16-GISEL-NEXT: ; use v[0:1] +; GFX11-TRUE16-GISEL-NEXT: ;;#ASMEND +; GFX11-TRUE16-GISEL-NEXT: s_setpc_b64 s[30:31] %undef.lo = shufflevector <2 x i16> %arg0, <2 x i16> poison, <4 x i32> <i32 1, i32 1, i32 2, i32 3> call void asm sideeffect "; use $0", "v"(<4 x i16> %undef.lo); ret void } define void @undef_lo2_v4f16(<2 x half> %arg0) { +; GFX8-SDAG-LABEL: undef_lo2_v4f16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX8-SDAG-NEXT: v_alignbit_b32 v0, v1, v0, 16 +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v[0:1] +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: undef_lo2_v4f16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX8-GISEL-NEXT: v_alignbit_b32 v0, v1, v0, 16 +; GFX8-GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v[0:1] +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; ; GFX9-LABEL: undef_lo2_v4f16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -184,21 +467,57 @@ define void @undef_lo2_v4f16(<2 x half> %arg0) { ; GFX9-NEXT: ;;#ASMEND ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_lo2_v4f16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v0 -; GFX8-NEXT: v_alignbit_b32 v0, v1, v0, 16 -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v[0:1] -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX11-FAKE16-LABEL: undef_lo2_v4f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v0, v0, 0x7060302 +; GFX11-FAKE16-NEXT: ;;#ASMSTART +; GFX11-FAKE16-NEXT: ; use v[0:1] +; GFX11-FAKE16-NEXT: ;;#ASMEND +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-SDAG-LABEL: undef_lo2_v4f16: +; GFX11-TRUE16-SDAG: ; %bb.0: +; GFX11-TRUE16-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-SDAG-NEXT: v_mov_b16_e32 v1.l, v0.h +; GFX11-TRUE16-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-SDAG-NEXT: v_bfi_b32 v0, 0xffff, v1, v0 +; GFX11-TRUE16-SDAG-NEXT: ;;#ASMSTART +; GFX11-TRUE16-SDAG-NEXT: ; use v[0:1] +; GFX11-TRUE16-SDAG-NEXT: ;;#ASMEND +; GFX11-TRUE16-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-GISEL-LABEL: undef_lo2_v4f16: +; GFX11-TRUE16-GISEL: ; %bb.0: +; GFX11-TRUE16-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-GISEL-NEXT: v_mov_b16_e32 v0.l, v0.h +; GFX11-TRUE16-GISEL-NEXT: ;;#ASMSTART +; GFX11-TRUE16-GISEL-NEXT: ; use v[0:1] +; GFX11-TRUE16-GISEL-NEXT: ;;#ASMEND +; GFX11-TRUE16-GISEL-NEXT: s_setpc_b64 s[30:31] %undef.lo = shufflevector <2 x half> %arg0, <2 x half> poison, <4 x i32> <i32 1, i32 1, i32 2, i32 3> call void asm sideeffect "; use $0", "v"(<4 x half> %undef.lo); ret void } define void @undef_hi_v2i16(i16 %arg0) { +; GFX8-SDAG-LABEL: undef_hi_v2i16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v0 +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: undef_hi_v2i16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v0 +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; ; GFX9-LABEL: undef_hi_v2i16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -207,19 +526,36 @@ define void @undef_hi_v2i16(i16 %arg0) { ; GFX9-NEXT: ;;#ASMEND ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_hi_v2i16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v0 -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: undef_hi_v2i16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: ;;#ASMSTART +; GFX11-NEXT: ; use v0 +; GFX11-NEXT: ;;#ASMEND +; GFX11-NEXT: s_setpc_b64 s[30:31] %undef.hi = insertelement <2 x i16> poison, i16 %arg0, i32 0 call void asm sideeffect "; use $0", "v"(<2 x i16> %undef.hi); ret void } define void @undef_hi_v2f16(half %arg0) { +; GFX8-SDAG-LABEL: undef_hi_v2f16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v0 +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: undef_hi_v2f16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v0 +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; ; GFX9-LABEL: undef_hi_v2f16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -228,19 +564,42 @@ define void @undef_hi_v2f16(half %arg0) { ; GFX9-NEXT: ;;#ASMEND ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_hi_v2f16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v0 -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: undef_hi_v2f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: ;;#ASMSTART +; GFX11-NEXT: ; use v0 +; GFX11-NEXT: ;;#ASMEND +; GFX11-NEXT: s_setpc_b64 s[30:31] %undef.hi = insertelement <2 x half> poison, half %arg0, i32 0 call void asm sideeffect "; use $0", "v"(<2 x half> %undef.hi); ret void } define void @undef_hi_op_v2f16(half %arg0) { +; GFX8-SDAG-LABEL: undef_hi_op_v2f16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: v_add_f16_e32 v0, 1.0, v0 +; GFX8-SDAG-NEXT: v_or_b32_e32 v0, 0x7e000000, v0 +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v0 +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: undef_hi_op_v2f16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_mov_b32_e32 v1, s4 +; GFX8-GISEL-NEXT: v_mov_b32_e32 v2, 0x3c00 +; GFX8-GISEL-NEXT: v_add_f16_e32 v0, 1.0, v0 +; GFX8-GISEL-NEXT: v_add_f16_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-GISEL-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v0 +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; ; GFX9-LABEL: undef_hi_op_v2f16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -250,15 +609,14 @@ define void @undef_hi_op_v2f16(half %arg0) { ; GFX9-NEXT: ;;#ASMEND ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_hi_op_v2f16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_f16_e32 v0, 1.0, v0 -; GFX8-NEXT: v_or_b32_e32 v0, 0x7e000000, v0 -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v0 -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: undef_hi_op_v2f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_add_f16 v0, v0, 1.0 op_sel_hi:[1,0] +; GFX11-NEXT: ;;#ASMSTART +; GFX11-NEXT: ; use v0 +; GFX11-NEXT: ;;#ASMEND +; GFX11-NEXT: s_setpc_b64 s[30:31] %undef.hi = insertelement <2 x half> poison, half %arg0, i32 0 %op = fadd <2 x half> %undef.hi, <half 1.0, half 1.0> call void asm sideeffect "; use $0", "v"(<2 x half> %op); @@ -266,24 +624,82 @@ define void @undef_hi_op_v2f16(half %arg0) { } define void @undef_hi_op_v2i16(i16 %arg0) { -; GFX9-LABEL: undef_hi_op_v2i16: -; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NEXT: s_movk_i32 s4, 0x63 -; GFX9-NEXT: v_pk_add_u16 v0, v0, s4 op_sel_hi:[1,0] -; GFX9-NEXT: ;;#ASMSTART -; GFX9-NEXT: ; use v0 -; GFX9-NEXT: ;;#ASMEND -; GFX9-NEXT: s_setpc_b64 s[30:31] +; GFX8-SDAG-LABEL: undef_hi_op_v2i16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: v_add_u16_e32 v0, 0x63, v0 +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v0 +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_hi_op_v2i16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u16_e32 v0, 0x63, v0 -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v0 -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX8-GISEL-LABEL: undef_hi_op_v2i16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: s_and_b32 s4, 0xffff, s4 +; GFX8-GISEL-NEXT: v_add_u16_e32 v0, 0x63, v0 +; GFX8-GISEL-NEXT: s_lshl_b32 s4, s4, 16 +; GFX8-GISEL-NEXT: v_or_b32_e32 v0, s4, v0 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v0 +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-SDAG-LABEL: undef_hi_op_v2i16: +; GFX9-SDAG: ; %bb.0: +; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-SDAG-NEXT: s_movk_i32 s4, 0x63 +; GFX9-SDAG-NEXT: v_pk_add_u16 v0, v0, s4 op_sel_hi:[1,0] +; GFX9-SDAG-NEXT: ;;#ASMSTART +; GFX9-SDAG-NEXT: ; use v0 +; GFX9-SDAG-NEXT: ;;#ASMEND +; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-GISEL-LABEL: undef_hi_op_v2i16: +; GFX9-GISEL: ; %bb.0: +; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0x630063 +; GFX9-GISEL-NEXT: v_pk_add_u16 v0, v0, v1 +; GFX9-GISEL-NEXT: ;;#ASMSTART +; GFX9-GISEL-NEXT: ; use v0 +; GFX9-GISEL-NEXT: ;;#ASMEND +; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-SDAG-LABEL: undef_hi_op_v2i16: +; GFX11-FAKE16-SDAG: ; %bb.0: +; GFX11-FAKE16-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-SDAG-NEXT: v_pk_add_u16 v0, 0x63, v0 op_sel_hi:[0,1] +; GFX11-FAKE16-SDAG-NEXT: ;;#ASMSTART +; GFX11-FAKE16-SDAG-NEXT: ; use v0 +; GFX11-FAKE16-SDAG-NEXT: ;;#ASMEND +; GFX11-FAKE16-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-GISEL-LABEL: undef_hi_op_v2i16: +; GFX11-FAKE16-GISEL: ; %bb.0: +; GFX11-FAKE16-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-GISEL-NEXT: v_pk_add_u16 v0, 0x630063, v0 +; GFX11-FAKE16-GISEL-NEXT: ;;#ASMSTART +; GFX11-FAKE16-GISEL-NEXT: ; use v0 +; GFX11-FAKE16-GISEL-NEXT: ;;#ASMEND +; GFX11-FAKE16-GISEL-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-SDAG-LABEL: undef_hi_op_v2i16: +; GFX11-TRUE16-SDAG: ; %bb.0: +; GFX11-TRUE16-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-SDAG-NEXT: v_pk_add_u16 v0, 0x63, v0 op_sel_hi:[0,1] +; GFX11-TRUE16-SDAG-NEXT: ;;#ASMSTART +; GFX11-TRUE16-SDAG-NEXT: ; use v0 +; GFX11-TRUE16-SDAG-NEXT: ;;#ASMEND +; GFX11-TRUE16-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-GISEL-LABEL: undef_hi_op_v2i16: +; GFX11-TRUE16-GISEL: ; %bb.0: +; GFX11-TRUE16-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-GISEL-NEXT: v_pk_add_u16 v0, 0x630063, v0 +; GFX11-TRUE16-GISEL-NEXT: ;;#ASMSTART +; GFX11-TRUE16-GISEL-NEXT: ; use v0 +; GFX11-TRUE16-GISEL-NEXT: ;;#ASMEND +; GFX11-TRUE16-GISEL-NEXT: s_setpc_b64 s[30:31] %undef.hi = insertelement <2 x i16> poison, i16 %arg0, i32 0 %op = add <2 x i16> %undef.hi, <i16 99, i16 99> call void asm sideeffect "; use $0", "v"(<2 x i16> %op); @@ -291,6 +707,24 @@ define void @undef_hi_op_v2i16(i16 %arg0) { } define void @undef_hi3_v4i16(i16 %arg0) { +; GFX8-SDAG-LABEL: undef_hi3_v4i16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v[0:1] +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: undef_hi3_v4i16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX8-GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v[0:1] +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; ; GFX9-LABEL: undef_hi3_v4i16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -299,19 +733,37 @@ define void @undef_hi3_v4i16(i16 %arg0) { ; GFX9-NEXT: ;;#ASMEND ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_hi3_v4i16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v[0:1] -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: undef_hi3_v4i16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: ;;#ASMSTART +; GFX11-NEXT: ; use v[0:1] +; GFX11-NEXT: ;;#ASMEND +; GFX11-NEXT: s_setpc_b64 s[30:31] %undef.hi = insertelement <4 x i16> poison, i16 %arg0, i32 0 call void asm sideeffect "; use $0", "v"(<4 x i16> %undef.hi); ret void } define void @undef_hi3_v4f16(half %arg0) { +; GFX8-SDAG-LABEL: undef_hi3_v4f16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v[0:1] +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: undef_hi3_v4f16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX8-GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v[0:1] +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; ; GFX9-LABEL: undef_hi3_v4f16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -320,19 +772,39 @@ define void @undef_hi3_v4f16(half %arg0) { ; GFX9-NEXT: ;;#ASMEND ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_hi3_v4f16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v[0:1] -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: undef_hi3_v4f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: ;;#ASMSTART +; GFX11-NEXT: ; use v[0:1] +; GFX11-NEXT: ;;#ASMEND +; GFX11-NEXT: s_setpc_b64 s[30:31] %undef.hi = insertelement <4 x half> poison, half %arg0, i32 0 call void asm sideeffect "; use $0", "v"(<4 x half> %undef.hi); ret void } define void @undef_hi2_v4i16(<2 x i16> %arg0) { +; GFX8-SDAG-LABEL: undef_hi2_v4i16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v[0:1] +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: undef_hi2_v4i16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX8-GISEL-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX8-GISEL-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX8-GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v[0:1] +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; ; GFX9-LABEL: undef_hi2_v4i16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -341,19 +813,39 @@ define void @undef_hi2_v4i16(<2 x i16> %arg0) { ; GFX9-NEXT: ;;#ASMEND ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_hi2_v4i16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v[0:1] -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: undef_hi2_v4i16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: ;;#ASMSTART +; GFX11-NEXT: ; use v[0:1] +; GFX11-NEXT: ;;#ASMEND +; GFX11-NEXT: s_setpc_b64 s[30:31] %undef.hi = shufflevector <2 x i16> %arg0, <2 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> call void asm sideeffect "; use $0", "v"(<4 x i16> %undef.hi); ret void } define void @undef_hi2_v4f16(<2 x half> %arg0) { +; GFX8-SDAG-LABEL: undef_hi2_v4f16: +; GFX8-SDAG: ; %bb.0: +; GFX8-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-SDAG-NEXT: ;;#ASMSTART +; GFX8-SDAG-NEXT: ; use v[0:1] +; GFX8-SDAG-NEXT: ;;#ASMEND +; GFX8-SDAG-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-GISEL-LABEL: undef_hi2_v4f16: +; GFX8-GISEL: ; %bb.0: +; GFX8-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX8-GISEL-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX8-GISEL-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX8-GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX8-GISEL-NEXT: ;;#ASMSTART +; GFX8-GISEL-NEXT: ; use v[0:1] +; GFX8-GISEL-NEXT: ;;#ASMEND +; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31] +; ; GFX9-LABEL: undef_hi2_v4f16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -362,15 +854,16 @@ define void @undef_hi2_v4f16(<2 x half> %arg0) { ; GFX9-NEXT: ;;#ASMEND ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX8-LABEL: undef_hi2_v4f16: -; GFX8: ; %bb.0: -; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: ;;#ASMSTART -; GFX8-NEXT: ; use v[0:1] -; GFX8-NEXT: ;;#ASMEND -; GFX8-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: undef_hi2_v4f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: ;;#ASMSTART +; GFX11-NEXT: ; use v[0:1] +; GFX11-NEXT: ;;#ASMEND +; GFX11-NEXT: s_setpc_b64 s[30:31] %undef.hi = shufflevector <2 x half> %arg0, <2 x half> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> call void asm sideeffect "; use $0", "v"(<4 x half> %undef.hi); ret void } - +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; GFX8: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/ctpop16.ll b/llvm/test/CodeGen/AMDGPU/ctpop16.ll index 1b9b508..cefcbdd 100644 --- a/llvm/test/CodeGen/AMDGPU/ctpop16.ll +++ b/llvm/test/CodeGen/AMDGPU/ctpop16.ll @@ -457,27 +457,58 @@ define amdgpu_kernel void @v_ctpop_v4i16(ptr addrspace(1) noalias %out, ptr addr ; ; EG-LABEL: v_ctpop_v4i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 2, @8, KC0[CB0:0-32], KC1[] +; EG-NEXT: ALU 3, @8, KC0[CB0:0-32], KC1[] ; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 7, @11, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T6.X, 1 +; EG-NEXT: ALU 37, @12, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T8.XY, T0.X, 1 ; EG-NEXT: CF_END ; EG-NEXT: PAD ; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1 +; EG-NEXT: VTX_READ_64 T8.XY, T0.X, 0, #1 ; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: LSHL * T0.W, T0.X, literal.x, +; EG-NEXT: MOV T0.Y, T4.X, +; EG-NEXT: LSHL * T0.W, T0.X, literal.x, BS:VEC_120/SCL_212 ; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00) ; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, PV.W, -; EG-NEXT: ALU clause starting at 11: -; EG-NEXT: LSHR * T0.W, T0.X, literal.x, +; EG-NEXT: ALU clause starting at 12: +; EG-NEXT: AND_INT * T0.W, T8.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T0.W, PV.W, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: LSHR * T0.W, T8.X, literal.x, ; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; EG-NEXT: BCNT_INT T0.Y, PV.W, -; EG-NEXT: AND_INT * T0.W, T0.X, literal.x, +; EG-NEXT: BCNT_INT T0.W, PV.W, +; EG-NEXT: AND_INT * T1.W, PV.X, literal.x, ; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; EG-NEXT: BCNT_INT T0.X, PV.W, -; EG-NEXT: LSHR * T6.X, KC0[2].Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV * T0.X, T5.X, +; EG-NEXT: AND_INT * T0.W, T8.Y, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T0.W, PV.W, +; EG-NEXT: AND_INT * T1.W, T0.X, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: LSHR * T0.W, T8.Y, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T0.W, PV.W, +; EG-NEXT: AND_INT * T1.W, PV.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: LSHR T0.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T8.Y, T1.W, PV.W, ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T5.X, PV.Y, +; EG-NEXT: MOV * T8.X, T4.X, %tid = call i32 @llvm.amdgcn.workitem.id.x() %in.gep = getelementptr <4 x i16>, ptr addrspace(1) %in, i32 %tid %val = load <4 x i16>, ptr addrspace(1) %in.gep, align 16 @@ -570,33 +601,94 @@ define amdgpu_kernel void @v_ctpop_v8i16(ptr addrspace(1) noalias %out, ptr addr ; ; EG-LABEL: v_ctpop_v8i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 2, @8, KC0[CB0:0-32], KC1[] +; EG-NEXT: ALU 3, @8, KC0[CB0:0-32], KC1[] ; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 13, @11, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T8.X, 1 +; EG-NEXT: ALU 73, @12, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T12.X, 1 ; EG-NEXT: CF_END ; EG-NEXT: PAD ; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_128 T0.XYZW, T0.X, 0, #1 +; EG-NEXT: VTX_READ_128 T12.XYZW, T0.X, 0, #1 ; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: LSHL * T0.W, T0.X, literal.x, +; EG-NEXT: MOV T0.Y, T4.X, +; EG-NEXT: LSHL * T0.W, T0.X, literal.x, BS:VEC_120/SCL_212 ; EG-NEXT: 4(5.605194e-45), 0(0.000000e+00) ; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, PV.W, -; EG-NEXT: ALU clause starting at 11: -; EG-NEXT: LSHR * T0.W, T0.Z, literal.x, +; EG-NEXT: ALU clause starting at 12: +; EG-NEXT: LSHR * T0.W, T12.X, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BCNT_INT * T0.W, PV.W, +; EG-NEXT: LSHL T0.W, PV.W, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 16(2.242078e-44), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: AND_INT * T0.W, T12.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T0.W, PV.W, +; EG-NEXT: AND_INT * T1.W, PV.X, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV * T0.X, T5.X, +; EG-NEXT: LSHR * T0.W, T12.Y, literal.x, ; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) ; EG-NEXT: BCNT_INT T0.W, PV.W, -; EG-NEXT: AND_INT * T1.W, T0.Z, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.X, literal.x, ; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; EG-NEXT: BCNT_INT T0.Z, PS, -; EG-NEXT: LSHR * T1.W, T0.X, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: AND_INT * T0.W, T12.Y, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T0.W, PV.W, +; EG-NEXT: AND_INT * T1.W, PV.X, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.Y, PS, PV.W, +; EG-NEXT: MOV T5.X, PV.Y, +; EG-NEXT: MOV * T0.X, T8.X, +; EG-NEXT: LSHR * T0.W, T12.Z, literal.x, ; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; EG-NEXT: BCNT_INT T0.Y, PV.W, +; EG-NEXT: BCNT_INT T0.W, PV.W, ; EG-NEXT: AND_INT * T1.W, T0.X, literal.x, ; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; EG-NEXT: BCNT_INT T0.X, PV.W, -; EG-NEXT: LSHR * T8.X, KC0[2].Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T8.X, PV.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: AND_INT * T0.W, T12.Z, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T0.W, PV.W, +; EG-NEXT: AND_INT * T1.W, PV.X, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV T8.X, PV.W, +; EG-NEXT: MOV * T0.X, T9.X, +; EG-NEXT: LSHR * T0.W, T12.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T0.W, PV.W, +; EG-NEXT: AND_INT * T1.W, T0.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T9.X, PV.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: AND_INT * T0.W, T12.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T0.W, PV.W, +; EG-NEXT: AND_INT * T1.W, PV.X, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: LSHR T12.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T0.W, PS, PV.W, ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T9.X, PV.W, +; EG-NEXT: MOV * T0.X, T4.X, +; EG-NEXT: MOV * T0.Z, T8.X, %tid = call i32 @llvm.amdgcn.workitem.id.x() %in.gep = getelementptr <8 x i16>, ptr addrspace(1) %in, i32 %tid %val = load <8 x i16>, ptr addrspace(1) %in.gep, align 32 @@ -745,46 +837,174 @@ define amdgpu_kernel void @v_ctpop_v16i16(ptr addrspace(1) noalias %out, ptr add ; ; EG-LABEL: v_ctpop_v16i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 2, @10, KC0[CB0:0-32], KC1[] -; EG-NEXT: TEX 1 @6 -; EG-NEXT: ALU 25, @13, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T14.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T12.XYZW, T13.X, 1 +; EG-NEXT: ALU 3, @12, KC0[CB0:0-32], KC1[] +; EG-NEXT: TEX 1 @8 +; EG-NEXT: ALU 114, @16, KC0[], KC1[] +; EG-NEXT: ALU 34, @131, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T22.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T20.XYZW, T21.X, 1 ; EG-NEXT: CF_END -; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_128 T12.XYZW, T0.X, 16, #1 -; EG-NEXT: VTX_READ_128 T0.XYZW, T0.X, 0, #1 -; EG-NEXT: ALU clause starting at 10: -; EG-NEXT: LSHL * T0.W, T0.X, literal.x, +; EG-NEXT: PAD +; EG-NEXT: Fetch clause starting at 8: +; EG-NEXT: VTX_READ_128 T20.XYZW, T0.X, 16, #1 +; EG-NEXT: VTX_READ_128 T21.XYZW, T0.X, 0, #1 +; EG-NEXT: ALU clause starting at 12: +; EG-NEXT: MOV T0.Y, T4.X, +; EG-NEXT: LSHL * T0.W, T0.X, literal.x, BS:VEC_120/SCL_212 ; EG-NEXT: 5(7.006492e-45), 0(0.000000e+00) ; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, PV.W, -; EG-NEXT: ALU clause starting at 13: -; EG-NEXT: LSHR * T0.W, T12.Z, literal.x, +; EG-NEXT: ALU clause starting at 16: +; EG-NEXT: LSHR * T0.W, T20.X, literal.x, ; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; EG-NEXT: BCNT_INT T12.W, PV.W, -; EG-NEXT: AND_INT * T0.W, T12.Z, literal.x, +; EG-NEXT: BCNT_INT * T0.W, PV.W, +; EG-NEXT: LSHL T0.W, PV.W, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 16(2.242078e-44), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: AND_INT * T0.W, T20.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T0.W, PV.W, +; EG-NEXT: AND_INT * T1.W, PV.X, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV * T0.X, T5.X, +; EG-NEXT: LSHR * T0.W, T20.Y, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T0.W, PV.W, +; EG-NEXT: AND_INT * T1.W, T0.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: AND_INT * T0.W, T20.Y, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T0.W, PV.W, +; EG-NEXT: AND_INT * T1.W, PV.X, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.Y, PS, PV.W, +; EG-NEXT: MOV T5.X, PV.Y, +; EG-NEXT: MOV * T0.X, T8.X, +; EG-NEXT: LSHR * T0.W, T20.Z, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T0.W, PV.W, +; EG-NEXT: AND_INT * T1.W, T0.X, literal.x, ; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; EG-NEXT: BCNT_INT T12.Z, PS, -; EG-NEXT: LSHR T0.W, T0.Z, literal.x, -; EG-NEXT: LSHR * T1.W, T12.X, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T8.X, PV.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: AND_INT * T0.W, T20.Z, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T0.W, PV.W, +; EG-NEXT: AND_INT * T1.W, PV.X, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV T8.X, PV.W, +; EG-NEXT: MOV * T0.X, T9.X, +; EG-NEXT: LSHR * T0.W, T20.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T0.W, PV.W, +; EG-NEXT: AND_INT * T1.W, T0.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, ; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; EG-NEXT: BCNT_INT T12.Y, PS, -; EG-NEXT: AND_INT T0.Z, T0.Z, literal.x, +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T9.X, PV.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: AND_INT * T0.W, T20.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) ; EG-NEXT: BCNT_INT T0.W, PV.W, -; EG-NEXT: AND_INT * T1.W, T12.X, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.X, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV T9.X, PV.W, +; EG-NEXT: MOV * T0.X, T12.X, +; EG-NEXT: LSHR * T1.W, T21.X, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T1.W, PV.W, +; EG-NEXT: AND_INT * T2.W, T0.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: LSHL * T1.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, T2.W, PV.W, +; EG-NEXT: MOV * T12.X, PV.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: AND_INT * T1.W, T21.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T1.W, PV.W, +; EG-NEXT: AND_INT * T2.W, PV.X, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PS, PV.W, +; EG-NEXT: MOV T12.X, PV.W, +; EG-NEXT: MOV * T0.X, T13.X, +; EG-NEXT: LSHR * T1.W, T21.Y, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T1.W, PV.W, +; EG-NEXT: AND_INT * T2.W, T0.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: LSHL * T1.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, T2.W, PV.W, +; EG-NEXT: MOV * T13.X, PV.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: AND_INT * T1.W, T21.Y, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T1.W, PV.W, +; EG-NEXT: AND_INT * T2.W, PV.X, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T20.Y, PS, PV.W, +; EG-NEXT: MOV T13.X, PV.Y, +; EG-NEXT: MOV * T0.X, T16.X, +; EG-NEXT: LSHR * T1.W, T21.Z, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T1.W, PV.W, +; EG-NEXT: AND_INT * T2.W, T0.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: LSHL * T1.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, T2.W, PV.W, +; EG-NEXT: ALU clause starting at 131: +; EG-NEXT: MOV * T16.X, T1.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: AND_INT * T1.W, T21.Z, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T1.W, PV.W, +; EG-NEXT: AND_INT * T2.W, PV.X, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PS, PV.W, +; EG-NEXT: MOV T16.X, PV.W, +; EG-NEXT: MOV * T0.X, T17.X, +; EG-NEXT: LSHR * T1.W, T21.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BCNT_INT T1.W, PV.W, +; EG-NEXT: AND_INT * T2.W, T0.X, literal.x, ; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; EG-NEXT: BCNT_INT T12.X, PS, -; EG-NEXT: BCNT_INT T0.Z, PV.Z, -; EG-NEXT: LSHR T1.W, T0.X, literal.x, -; EG-NEXT: ADD_INT * T2.W, KC0[2].Y, literal.x, +; EG-NEXT: LSHL * T1.W, PV.W, literal.x, ; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; EG-NEXT: LSHR T13.X, PS, literal.x, -; EG-NEXT: BCNT_INT T0.Y, PV.W, -; EG-NEXT: AND_INT * T1.W, T0.X, literal.y, -; EG-NEXT: 2(2.802597e-45), 65535(9.183409e-41) -; EG-NEXT: BCNT_INT T0.X, PV.W, -; EG-NEXT: LSHR * T14.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T1.W, T2.W, PV.W, +; EG-NEXT: MOV * T17.X, PV.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: AND_INT T1.W, T21.W, literal.x, +; EG-NEXT: LSHR * T21.X, KC0[2].Y, literal.y, +; EG-NEXT: 65535(9.183409e-41), 2(2.802597e-45) +; EG-NEXT: AND_INT T0.Z, PV.X, literal.x, +; EG-NEXT: BCNT_INT T1.W, PV.W, +; EG-NEXT: ADD_INT * T2.W, KC0[2].Y, literal.y, +; EG-NEXT: -65536(nan), 16(2.242078e-44) +; EG-NEXT: LSHR T22.X, PS, literal.x, +; EG-NEXT: OR_INT * T20.W, PV.Z, PV.W, ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T17.X, PV.W, +; EG-NEXT: MOV * T0.X, T4.X, +; EG-NEXT: MOV * T0.Z, T8.X, +; EG-NEXT: MOV T20.X, T12.X, +; EG-NEXT: MOV * T20.Z, T16.X, BS:VEC_120/SCL_212 %tid = call i32 @llvm.amdgcn.workitem.id.x() %in.gep = getelementptr <16 x i16>, ptr addrspace(1) %in, i32 %tid %val = load <16 x i16>, ptr addrspace(1) %in.gep, align 32 @@ -1292,7 +1512,7 @@ define amdgpu_kernel void @ctpop_i16_in_br(ptr addrspace(1) %out, ptr addrspace( ; SI-NEXT: buffer_store_short v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; SI-NEXT: .LBB14_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 ; SI-NEXT: s_branch .LBB14_2 ; ; VI-LABEL: ctpop_i16_in_br: diff --git a/llvm/test/CodeGen/AMDGPU/div_i128.ll b/llvm/test/CodeGen/AMDGPU/div_i128.ll index e6c38d2..747affa 100644 --- a/llvm/test/CodeGen/AMDGPU/div_i128.ll +++ b/llvm/test/CodeGen/AMDGPU/div_i128.ll @@ -495,8 +495,9 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_and_b32_e64 v6, 1, v6 ; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[8:9], v6, 1 ; GFX9-O0-NEXT: s_or_b64 s[8:9], s[4:5], s[8:9] -; GFX9-O0-NEXT: s_mov_b64 s[4:5], -1 -; GFX9-O0-NEXT: s_xor_b64 s[4:5], s[8:9], s[4:5] +; GFX9-O0-NEXT: s_mov_b64 s[14:15], -1 +; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[8:9] +; GFX9-O0-NEXT: s_xor_b64 s[4:5], s[4:5], s[14:15] ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5 ; GFX9-O0-NEXT: s_mov_b32 s14, s13 ; GFX9-O0-NEXT: v_xor_b32_e64 v6, v6, s14 @@ -2697,19 +2698,16 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v6 ; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[6:7], v[4:5], s[6:7] -; GFX9-O0-NEXT: s_mov_b64 s[12:13], s[8:9] ; GFX9-O0-NEXT: v_mov_b32_e32 v4, s11 -; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v1, v4, s[12:13] -; GFX9-O0-NEXT: s_mov_b64 s[12:13], s[8:9] +; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v1, v4, s[8:9] ; GFX9-O0-NEXT: v_mov_b32_e32 v1, s10 -; GFX9-O0-NEXT: v_cndmask_b32_e64 v0, v0, v1, s[12:13] +; GFX9-O0-NEXT: v_cndmask_b32_e64 v0, v0, v1, s[8:9] ; GFX9-O0-NEXT: ; implicit-def: $sgpr12 ; GFX9-O0-NEXT: ; implicit-def: $sgpr12 ; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v1, v4 -; GFX9-O0-NEXT: s_mov_b64 s[12:13], s[8:9] ; GFX9-O0-NEXT: v_mov_b32_e32 v4, s11 -; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v3, v4, s[12:13] +; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v3, v4, s[8:9] ; GFX9-O0-NEXT: v_mov_b32_e32 v3, s10 ; GFX9-O0-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[8:9] ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 diff --git a/llvm/test/CodeGen/AMDGPU/empty-text.ll b/llvm/test/CodeGen/AMDGPU/empty-text.ll new file mode 100644 index 0000000..8aa8600 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/empty-text.ll @@ -0,0 +1,9 @@ +; Test that there is no s_code_end padding if .text is otherwise empty. + +; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 < %s | FileCheck %s --check-prefixes=GCN + +@globalVar = global i32 37 + +declare amdgpu_ps void @funcDecl() + +; GCN-NOT: .fill diff --git a/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll b/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll index 7524750..5fb50d0 100644 --- a/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll @@ -2608,9 +2608,7 @@ define <2 x half> @v_test_canonicalize_undef_reg_v2f16(half %val) #1 { ; GFX11-TRUE16-LABEL: v_test_canonicalize_undef_reg_v2f16: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v0.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.h, v0.l, v0.l ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: v_test_canonicalize_undef_reg_v2f16: diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll index 2ff66c9..7d36c9f 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll @@ -252,13 +252,15 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-LABEL: flat_xchg_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB10_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -277,9 +279,11 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB10_2 ; GFX1250-SDAG-NEXT: .LBB10_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 @@ -292,15 +296,16 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB10_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -314,13 +319,16 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB10_2 ; GFX1250-GISEL-NEXT: .LBB10_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 @@ -344,11 +352,13 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB11_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -367,8 +377,11 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB11_2 ; GFX1250-SDAG-NEXT: .LBB11_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 @@ -381,18 +394,19 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB11_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -406,13 +420,16 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB11_2 ; GFX1250-GISEL-NEXT: .LBB11_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 @@ -433,11 +450,13 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB12_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -455,9 +474,11 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB12_2 ; GFX1250-SDAG-NEXT: .LBB12_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_endpgm ; @@ -465,13 +486,14 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB12_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -483,14 +505,17 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB12_2 ; GFX1250-GISEL-NEXT: .LBB12_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 @@ -508,10 +533,12 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB13_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -529,8 +556,11 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB13_2 ; GFX1250-SDAG-NEXT: .LBB13_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_endpgm ; @@ -538,16 +568,17 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB13_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -559,14 +590,17 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB13_2 ; GFX1250-GISEL-NEXT: .LBB13_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 @@ -642,13 +676,15 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_add_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB18_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -667,9 +703,11 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB18_2 ; GFX1250-SDAG-NEXT: .LBB18_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[2:3] @@ -683,15 +721,16 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB18_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -705,13 +744,16 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB18_2 ; GFX1250-GISEL-NEXT: .LBB18_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[4:5] @@ -736,11 +778,13 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB19_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -759,8 +803,11 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB19_2 ; GFX1250-SDAG-NEXT: .LBB19_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[2:3] @@ -774,18 +821,19 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB19_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -799,13 +847,16 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB19_2 ; GFX1250-GISEL-NEXT: .LBB19_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[4:5] @@ -827,11 +878,13 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB20_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -849,9 +902,11 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB20_2 ; GFX1250-SDAG-NEXT: .LBB20_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[2:3] @@ -862,13 +917,14 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB20_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -880,14 +936,17 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB20_2 ; GFX1250-GISEL-NEXT: .LBB20_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[4:5] @@ -908,10 +967,12 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB21_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -929,8 +990,11 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB21_2 ; GFX1250-SDAG-NEXT: .LBB21_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[2:3] @@ -941,16 +1005,17 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB21_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -962,14 +1027,17 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB21_2 ; GFX1250-GISEL-NEXT: .LBB21_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[4:5] @@ -1048,13 +1116,15 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_sub_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB26_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1073,9 +1143,11 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB26_2 ; GFX1250-SDAG-NEXT: .LBB26_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[2:3] @@ -1089,15 +1161,16 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB26_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1111,13 +1184,16 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB26_2 ; GFX1250-GISEL-NEXT: .LBB26_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[4:5] @@ -1142,11 +1218,13 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB27_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1165,8 +1243,11 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB27_2 ; GFX1250-SDAG-NEXT: .LBB27_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[2:3] @@ -1180,18 +1261,19 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB27_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1205,13 +1287,16 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB27_2 ; GFX1250-GISEL-NEXT: .LBB27_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[4:5] @@ -1233,11 +1318,13 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB28_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1255,9 +1342,11 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB28_2 ; GFX1250-SDAG-NEXT: .LBB28_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[2:3] @@ -1268,13 +1357,14 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB28_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1286,14 +1376,17 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB28_2 ; GFX1250-GISEL-NEXT: .LBB28_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[4:5] @@ -1314,10 +1407,12 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB29_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1335,8 +1430,11 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB29_2 ; GFX1250-SDAG-NEXT: .LBB29_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[2:3] @@ -1347,16 +1445,17 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB29_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1368,14 +1467,17 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB29_2 ; GFX1250-GISEL-NEXT: .LBB29_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[4:5] @@ -1454,13 +1556,15 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_and_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB34_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1479,9 +1583,11 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB34_2 ; GFX1250-SDAG-NEXT: .LBB34_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v3, v1, v3 @@ -1496,15 +1602,16 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB34_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1518,13 +1625,16 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB34_2 ; GFX1250-GISEL-NEXT: .LBB34_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, v0, v4 @@ -1550,11 +1660,13 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB35_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1573,8 +1685,11 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB35_2 ; GFX1250-SDAG-NEXT: .LBB35_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v3, v1, v3 @@ -1589,18 +1704,19 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB35_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1614,13 +1730,16 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB35_2 ; GFX1250-GISEL-NEXT: .LBB35_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, v0, v4 @@ -1643,11 +1762,13 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB36_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1665,9 +1786,11 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB36_2 ; GFX1250-SDAG-NEXT: .LBB36_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v1, v1, v3 @@ -1679,13 +1802,14 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB36_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1697,14 +1821,17 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB36_2 ; GFX1250-GISEL-NEXT: .LBB36_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v0, v4 @@ -1726,10 +1853,12 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB37_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1747,8 +1876,11 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB37_2 ; GFX1250-SDAG-NEXT: .LBB37_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v1, v1, v3 @@ -1760,16 +1892,17 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB37_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1781,14 +1914,17 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB37_2 ; GFX1250-GISEL-NEXT: .LBB37_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v0, v4 @@ -1868,13 +2004,15 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-LABEL: flat_or_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB42_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1893,9 +2031,11 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB42_2 ; GFX1250-SDAG-NEXT: .LBB42_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_or_b32_e32 v3, v1, v3 @@ -1910,15 +2050,16 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB42_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1932,13 +2073,16 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB42_2 ; GFX1250-GISEL-NEXT: .LBB42_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_or_b32_e32 v2, v0, v4 @@ -1964,11 +2108,13 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB43_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1987,8 +2133,11 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB43_2 ; GFX1250-SDAG-NEXT: .LBB43_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_or_b32_e32 v3, v1, v3 @@ -2003,18 +2152,19 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB43_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2028,13 +2178,16 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB43_2 ; GFX1250-GISEL-NEXT: .LBB43_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_or_b32_e32 v2, v0, v4 @@ -2057,11 +2210,13 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB44_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2079,9 +2234,11 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB44_2 ; GFX1250-SDAG-NEXT: .LBB44_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_or_b32_e32 v1, v1, v3 @@ -2093,13 +2250,14 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB44_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2111,14 +2269,17 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB44_2 ; GFX1250-GISEL-NEXT: .LBB44_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_or_b32_e32 v0, v0, v4 @@ -2140,10 +2301,12 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB45_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2161,8 +2324,11 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB45_2 ; GFX1250-SDAG-NEXT: .LBB45_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_or_b32_e32 v1, v1, v3 @@ -2174,16 +2340,17 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB45_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2195,14 +2362,17 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB45_2 ; GFX1250-GISEL-NEXT: .LBB45_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_or_b32_e32 v0, v0, v4 @@ -2282,13 +2452,15 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_xor_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB50_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2307,9 +2479,11 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB50_2 ; GFX1250-SDAG-NEXT: .LBB50_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_xor_b32_e32 v3, v1, v3 @@ -2324,15 +2498,16 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB50_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2346,13 +2521,16 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB50_2 ; GFX1250-GISEL-NEXT: .LBB50_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_xor_b32_e32 v2, v0, v4 @@ -2378,11 +2556,13 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB51_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2401,8 +2581,11 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB51_2 ; GFX1250-SDAG-NEXT: .LBB51_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_xor_b32_e32 v3, v1, v3 @@ -2417,18 +2600,19 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB51_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2442,13 +2626,16 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB51_2 ; GFX1250-GISEL-NEXT: .LBB51_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_xor_b32_e32 v2, v0, v4 @@ -2471,11 +2658,13 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB52_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2493,9 +2682,11 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB52_2 ; GFX1250-SDAG-NEXT: .LBB52_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_xor_b32_e32 v1, v1, v3 @@ -2507,13 +2698,14 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB52_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2525,14 +2717,17 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB52_2 ; GFX1250-GISEL-NEXT: .LBB52_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v0, v4 @@ -2554,10 +2749,12 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB53_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2575,8 +2772,11 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB53_2 ; GFX1250-SDAG-NEXT: .LBB53_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_xor_b32_e32 v1, v1, v3 @@ -2588,16 +2788,17 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB53_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2609,14 +2810,17 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB53_2 ; GFX1250-GISEL-NEXT: .LBB53_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v0, v4 @@ -2690,13 +2894,15 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_max_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB58_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2715,10 +2921,12 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB58_2 ; GFX1250-SDAG-NEXT: .LBB58_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_i64 v[2:3], v[0:1], v[2:3] @@ -2732,15 +2940,16 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB58_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2753,15 +2962,18 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: .LBB58_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB58_2 ; GFX1250-GISEL-NEXT: .LBB58_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_i64 v[2:3], v[0:1], v[4:5] @@ -2786,11 +2998,13 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB59_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2809,9 +3023,12 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB59_2 ; GFX1250-SDAG-NEXT: .LBB59_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_i64 v[2:3], v[0:1], v[2:3] @@ -2825,18 +3042,19 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB59_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2849,15 +3067,18 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: .LBB59_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB59_2 ; GFX1250-GISEL-NEXT: .LBB59_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_i64 v[2:3], v[0:1], v[4:5] @@ -2879,11 +3100,13 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB60_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2900,9 +3123,11 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB60_2 ; GFX1250-SDAG-NEXT: .LBB60_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_i64 v[0:1], v[0:1], v[2:3] @@ -2913,13 +3138,14 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB60_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2930,14 +3156,17 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: .LBB60_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v0, v[4:5], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB60_2 ; GFX1250-GISEL-NEXT: .LBB60_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_i64 v[0:1], v[0:1], v[4:5] @@ -2958,10 +3187,12 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB61_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2978,8 +3209,11 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB61_2 ; GFX1250-SDAG-NEXT: .LBB61_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_i64 v[0:1], v[0:1], v[2:3] @@ -2990,16 +3224,17 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB61_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3010,14 +3245,17 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: .LBB61_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v0, v[4:5], s[2:3] offset:-128 ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB61_2 ; GFX1250-GISEL-NEXT: .LBB61_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_i64 v[0:1], v[0:1], v[4:5] @@ -3090,13 +3328,15 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_min_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB66_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3115,10 +3355,12 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB66_2 ; GFX1250-SDAG-NEXT: .LBB66_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_i64 v[2:3], v[0:1], v[2:3] @@ -3132,15 +3374,16 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB66_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3153,15 +3396,18 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: .LBB66_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB66_2 ; GFX1250-GISEL-NEXT: .LBB66_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_i64 v[2:3], v[0:1], v[4:5] @@ -3186,11 +3432,13 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB67_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3209,9 +3457,12 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB67_2 ; GFX1250-SDAG-NEXT: .LBB67_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_i64 v[2:3], v[0:1], v[2:3] @@ -3225,18 +3476,19 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB67_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3249,15 +3501,18 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: .LBB67_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB67_2 ; GFX1250-GISEL-NEXT: .LBB67_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_i64 v[2:3], v[0:1], v[4:5] @@ -3279,11 +3534,13 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB68_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3300,9 +3557,11 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB68_2 ; GFX1250-SDAG-NEXT: .LBB68_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_i64 v[0:1], v[0:1], v[2:3] @@ -3313,13 +3572,14 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB68_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3330,14 +3590,17 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: .LBB68_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v0, v[4:5], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB68_2 ; GFX1250-GISEL-NEXT: .LBB68_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_i64 v[0:1], v[0:1], v[4:5] @@ -3358,10 +3621,12 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB69_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3378,8 +3643,11 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB69_2 ; GFX1250-SDAG-NEXT: .LBB69_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_i64 v[0:1], v[0:1], v[2:3] @@ -3390,16 +3658,17 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB69_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3410,14 +3679,17 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: .LBB69_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v0, v[4:5], s[2:3] offset:-128 ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB69_2 ; GFX1250-GISEL-NEXT: .LBB69_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_i64 v[0:1], v[0:1], v[4:5] @@ -3490,13 +3762,15 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-LABEL: flat_umax_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB74_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3515,10 +3789,12 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB74_2 ; GFX1250-SDAG-NEXT: .LBB74_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_u64 v[2:3], v[0:1], v[2:3] @@ -3532,15 +3808,16 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB74_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3553,15 +3830,18 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: .LBB74_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB74_2 ; GFX1250-GISEL-NEXT: .LBB74_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_u64 v[2:3], v[0:1], v[4:5] @@ -3586,11 +3866,13 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB75_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3609,9 +3891,12 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB75_2 ; GFX1250-SDAG-NEXT: .LBB75_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_u64 v[2:3], v[0:1], v[2:3] @@ -3625,18 +3910,19 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB75_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3649,15 +3935,18 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: .LBB75_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB75_2 ; GFX1250-GISEL-NEXT: .LBB75_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_u64 v[2:3], v[0:1], v[4:5] @@ -3679,11 +3968,13 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB76_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3700,9 +3991,11 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB76_2 ; GFX1250-SDAG-NEXT: .LBB76_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_u64 v[0:1], v[0:1], v[2:3] @@ -3713,13 +4006,14 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB76_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3730,14 +4024,17 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: .LBB76_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v0, v[4:5], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB76_2 ; GFX1250-GISEL-NEXT: .LBB76_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_u64 v[0:1], v[0:1], v[4:5] @@ -3758,10 +4055,12 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB77_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3778,8 +4077,11 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB77_2 ; GFX1250-SDAG-NEXT: .LBB77_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_u64 v[0:1], v[0:1], v[2:3] @@ -3790,16 +4092,17 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB77_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3810,14 +4113,17 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: .LBB77_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v0, v[4:5], s[2:3] offset:-128 ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB77_2 ; GFX1250-GISEL-NEXT: .LBB77_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_u64 v[0:1], v[0:1], v[4:5] @@ -3890,13 +4196,15 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-LABEL: flat_umin_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB82_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3915,10 +4223,12 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB82_2 ; GFX1250-SDAG-NEXT: .LBB82_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_u64 v[2:3], v[0:1], v[2:3] @@ -3932,15 +4242,16 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB82_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3953,15 +4264,18 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: .LBB82_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB82_2 ; GFX1250-GISEL-NEXT: .LBB82_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_u64 v[2:3], v[0:1], v[4:5] @@ -3986,11 +4300,13 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB83_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4009,9 +4325,12 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB83_2 ; GFX1250-SDAG-NEXT: .LBB83_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_u64 v[2:3], v[0:1], v[2:3] @@ -4025,18 +4344,19 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB83_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4049,15 +4369,18 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: .LBB83_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB83_2 ; GFX1250-GISEL-NEXT: .LBB83_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_u64 v[2:3], v[0:1], v[4:5] @@ -4079,11 +4402,13 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB84_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4100,9 +4425,11 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB84_2 ; GFX1250-SDAG-NEXT: .LBB84_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_u64 v[0:1], v[0:1], v[2:3] @@ -4113,13 +4440,14 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB84_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4130,14 +4458,17 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: .LBB84_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v0, v[4:5], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB84_2 ; GFX1250-GISEL-NEXT: .LBB84_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_u64 v[0:1], v[0:1], v[4:5] @@ -4158,10 +4489,12 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB85_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4178,8 +4511,11 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB85_2 ; GFX1250-SDAG-NEXT: .LBB85_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_u64 v[0:1], v[0:1], v[2:3] @@ -4190,16 +4526,17 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB85_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4210,14 +4547,17 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: .LBB85_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v0, v[4:5], s[2:3] offset:-128 ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB85_2 ; GFX1250-GISEL-NEXT: .LBB85_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_u64 v[0:1], v[0:1], v[4:5] @@ -4310,14 +4650,16 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v5, v4 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB90_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4338,9 +4680,11 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB90_2 ; GFX1250-SDAG-NEXT: .LBB90_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v2, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v8, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7] @@ -4356,15 +4700,16 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v7, v4 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v0, v5 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v0, s0, v3 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB90_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4380,13 +4725,16 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v[0:1], v5, v[6:9], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB90_2 ; GFX1250-GISEL-NEXT: .LBB90_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] @@ -4414,11 +4762,13 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB91_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4439,8 +4789,11 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB91_2 ; GFX1250-SDAG-NEXT: .LBB91_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v2, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v8, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7] @@ -4456,18 +4809,19 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v7, v4 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v5 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v0, s0, v3 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB91_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4483,13 +4837,16 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v[0:1], v5, v[6:9], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_SYS ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB91_2 ; GFX1250-GISEL-NEXT: .LBB91_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] @@ -4512,13 +4869,15 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_nortn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v5, v4 -; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v2 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB92_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4538,9 +4897,11 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB92_2 ; GFX1250-SDAG-NEXT: .LBB92_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7] @@ -4553,13 +4914,14 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2 ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB92_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4573,14 +4935,17 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v0, v[6:9], s[2:3] scope:SCOPE_SYS ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB92_2 ; GFX1250-GISEL-NEXT: .LBB92_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] @@ -4603,10 +4968,12 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v2 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB93_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4626,8 +4993,11 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB93_2 ; GFX1250-SDAG-NEXT: .LBB93_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7] @@ -4640,16 +5010,17 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2 ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB93_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4663,14 +5034,17 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v0, v[6:9], s[2:3] offset:-128 scope:SCOPE_SYS ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB93_2 ; GFX1250-GISEL-NEXT: .LBB93_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] @@ -4742,13 +5116,15 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_inc_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB98_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4766,15 +5142,16 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB98_2 ; GFX1250-SDAG-NEXT: .LBB98_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], 1, v[0:1] ; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3] -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, 0, v5 :: v_dual_cndmask_b32 v2, 0, v4 ; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 @@ -4786,15 +5163,16 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB98_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4806,21 +5184,24 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_branch .LBB98_5 ; GFX1250-GISEL-NEXT: .LBB98_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB98_2 ; GFX1250-GISEL-NEXT: .LBB98_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 @@ -4843,11 +5224,13 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB99_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4865,14 +5248,16 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB99_2 ; GFX1250-SDAG-NEXT: .LBB99_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], 1, v[0:1] ; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3] -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, 0, v5 :: v_dual_cndmask_b32 v2, 0, v4 ; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 @@ -4884,18 +5269,19 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB99_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4907,21 +5293,24 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_branch .LBB99_5 ; GFX1250-GISEL-NEXT: .LBB99_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB99_2 ; GFX1250-GISEL-NEXT: .LBB99_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 @@ -4941,11 +5330,13 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB100_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4961,14 +5352,15 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB100_2 ; GFX1250-SDAG-NEXT: .LBB100_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], 1, v[0:1] ; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3] -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, 0, v5 :: v_dual_cndmask_b32 v0, 0, v4 ; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_endpgm @@ -4977,13 +5369,14 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB100_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4993,20 +5386,23 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_endpgm ; GFX1250-GISEL-NEXT: .LBB100_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB100_2 ; GFX1250-GISEL-NEXT: .LBB100_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_endpgm @@ -5025,10 +5421,12 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB101_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -5044,13 +5442,15 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB101_2 ; GFX1250-SDAG-NEXT: .LBB101_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], 1, v[0:1] ; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3] -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, 0, v5 :: v_dual_cndmask_b32 v0, 0, v4 ; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_endpgm @@ -5059,16 +5459,17 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB101_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -5078,20 +5479,23 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_endpgm ; GFX1250-GISEL-NEXT: .LBB101_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB101_2 ; GFX1250-GISEL-NEXT: .LBB101_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_endpgm @@ -5161,13 +5565,15 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_dec_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB106_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -5185,10 +5591,12 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB106_2 ; GFX1250-SDAG-NEXT: .LBB106_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s0, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] @@ -5207,15 +5615,16 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB106_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -5227,15 +5636,18 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_branch .LBB106_5 ; GFX1250-GISEL-NEXT: .LBB106_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB106_2 ; GFX1250-GISEL-NEXT: .LBB106_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] @@ -5265,11 +5677,13 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB107_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -5287,9 +5701,12 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB107_2 ; GFX1250-SDAG-NEXT: .LBB107_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s0, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] @@ -5308,18 +5725,19 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB107_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -5331,15 +5749,18 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_branch .LBB107_5 ; GFX1250-GISEL-NEXT: .LBB107_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB107_2 ; GFX1250-GISEL-NEXT: .LBB107_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] @@ -5366,11 +5787,13 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB108_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -5386,9 +5809,11 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB108_2 ; GFX1250-SDAG-NEXT: .LBB108_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] @@ -5404,13 +5829,14 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB108_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -5420,14 +5846,17 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_endpgm ; GFX1250-GISEL-NEXT: .LBB108_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB108_2 ; GFX1250-GISEL-NEXT: .LBB108_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] @@ -5453,10 +5882,12 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB109_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -5472,8 +5903,11 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB109_2 ; GFX1250-SDAG-NEXT: .LBB109_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] @@ -5489,16 +5923,17 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB109_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -5508,14 +5943,17 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_endpgm ; GFX1250-GISEL-NEXT: .LBB109_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB109_2 ; GFX1250-GISEL-NEXT: .LBB109_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll index b25d9b2..fc88839 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll @@ -3621,7 +3621,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX9-NEXT: s_mov_b32 s0, 0 ; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:4 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: s_movk_i32 s0, 0x3004 +; GFX9-NEXT: s_movk_i32 s0, 0x3000 +; GFX9-NEXT: s_add_i32 s0, s0, 4 ; GFX9-NEXT: v_mov_b32_e32 v0, 15 ; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:3712 ; GFX9-NEXT: s_waitcnt vmcnt(0) @@ -3637,7 +3638,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s9 ; GFX10-NEXT: v_mov_b32_e32 v0, 13 ; GFX10-NEXT: v_mov_b32_e32 v1, 15 -; GFX10-NEXT: s_movk_i32 s0, 0x3804 +; GFX10-NEXT: s_movk_i32 s0, 0x3800 +; GFX10-NEXT: s_add_i32 s0, s0, 4 ; GFX10-NEXT: scratch_store_dword off, v0, off offset:4 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: scratch_store_dword off, v1, s0 offset:1664 @@ -3682,7 +3684,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX9-PAL-NEXT: s_addc_u32 flat_scratch_hi, s13, 0 ; GFX9-PAL-NEXT: scratch_store_dword off, v0, s0 offset:4 ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) -; GFX9-PAL-NEXT: s_movk_i32 s0, 0x3004 +; GFX9-PAL-NEXT: s_movk_i32 s0, 0x3000 +; GFX9-PAL-NEXT: s_add_i32 s0, s0, 4 ; GFX9-PAL-NEXT: v_mov_b32_e32 v0, 15 ; GFX9-PAL-NEXT: scratch_store_dword off, v0, s0 offset:3712 ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) @@ -3716,8 +3719,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX1010-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13 ; GFX1010-PAL-NEXT: v_mov_b32_e32 v0, 13 ; GFX1010-PAL-NEXT: v_mov_b32_e32 v1, 15 +; GFX1010-PAL-NEXT: s_movk_i32 s0, 0x3800 ; GFX1010-PAL-NEXT: s_mov_b32 s1, 0 -; GFX1010-PAL-NEXT: s_movk_i32 s0, 0x3804 +; GFX1010-PAL-NEXT: s_add_i32 s0, s0, 4 ; GFX1010-PAL-NEXT: scratch_store_dword off, v0, s1 offset:4 ; GFX1010-PAL-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX1010-PAL-NEXT: scratch_store_dword off, v1, s0 offset:1664 @@ -3739,7 +3743,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX1030-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13 ; GFX1030-PAL-NEXT: v_mov_b32_e32 v0, 13 ; GFX1030-PAL-NEXT: v_mov_b32_e32 v1, 15 -; GFX1030-PAL-NEXT: s_movk_i32 s0, 0x3804 +; GFX1030-PAL-NEXT: s_movk_i32 s0, 0x3800 +; GFX1030-PAL-NEXT: s_add_i32 s0, s0, 4 ; GFX1030-PAL-NEXT: scratch_store_dword off, v0, off offset:4 ; GFX1030-PAL-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX1030-PAL-NEXT: scratch_store_dword off, v1, s0 offset:1664 @@ -3785,10 +3790,12 @@ define void @store_load_large_imm_offset_foo() { ; GFX9-LABEL: store_load_large_imm_offset_foo: ; GFX9: ; %bb.0: ; %bb ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_movk_i32 s0, 0x3000 ; GFX9-NEXT: v_mov_b32_e32 v0, 13 +; GFX9-NEXT: s_add_i32 s1, s32, s0 ; GFX9-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: s_add_i32 s0, s32, 0x3004 +; GFX9-NEXT: s_add_i32 s0, s1, 4 ; GFX9-NEXT: v_mov_b32_e32 v0, 15 ; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:3712 ; GFX9-NEXT: s_waitcnt vmcnt(0) @@ -3800,8 +3807,10 @@ define void @store_load_large_imm_offset_foo() { ; GFX10: ; %bb.0: ; %bb ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v0, 13 +; GFX10-NEXT: s_movk_i32 s0, 0x3800 ; GFX10-NEXT: v_mov_b32_e32 v1, 15 -; GFX10-NEXT: s_add_i32 s0, s32, 0x3804 +; GFX10-NEXT: s_add_i32 s1, s32, s0 +; GFX10-NEXT: s_add_i32 s0, s1, 4 ; GFX10-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: scratch_store_dword off, v1, s0 offset:1664 @@ -3843,10 +3852,12 @@ define void @store_load_large_imm_offset_foo() { ; GFX9-PAL-LABEL: store_load_large_imm_offset_foo: ; GFX9-PAL: ; %bb.0: ; %bb ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-PAL-NEXT: s_movk_i32 s0, 0x3000 ; GFX9-PAL-NEXT: v_mov_b32_e32 v0, 13 +; GFX9-PAL-NEXT: s_add_i32 s1, s32, s0 ; GFX9-PAL-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) -; GFX9-PAL-NEXT: s_add_i32 s0, s32, 0x3004 +; GFX9-PAL-NEXT: s_add_i32 s0, s1, 4 ; GFX9-PAL-NEXT: v_mov_b32_e32 v0, 15 ; GFX9-PAL-NEXT: scratch_store_dword off, v0, s0 offset:3712 ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) @@ -3872,8 +3883,10 @@ define void @store_load_large_imm_offset_foo() { ; GFX10-PAL: ; %bb.0: ; %bb ; GFX10-PAL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-PAL-NEXT: v_mov_b32_e32 v0, 13 +; GFX10-PAL-NEXT: s_movk_i32 s0, 0x3800 ; GFX10-PAL-NEXT: v_mov_b32_e32 v1, 15 -; GFX10-PAL-NEXT: s_add_i32 s0, s32, 0x3804 +; GFX10-PAL-NEXT: s_add_i32 s1, s32, s0 +; GFX10-PAL-NEXT: s_add_i32 s0, s1, 4 ; GFX10-PAL-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX10-PAL-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-PAL-NEXT: scratch_store_dword off, v1, s0 offset:1664 diff --git a/llvm/test/CodeGen/AMDGPU/fmaximum.ll b/llvm/test/CodeGen/AMDGPU/fmaximum.ll index 0adbecd..e59fbad 100644 --- a/llvm/test/CodeGen/AMDGPU/fmaximum.ll +++ b/llvm/test/CodeGen/AMDGPU/fmaximum.ll @@ -173,8 +173,8 @@ define amdgpu_ps <3 x half> @test_fmaximum_v3f16_vv(<3 x half> %a, <3 x half> %b ; ; GFX12-GISEL-TRUE16-LABEL: test_fmaximum_v3f16_vv: ; GFX12-GISEL-TRUE16: ; %bb.0: -; GFX12-GISEL-TRUE16-NEXT: v_maximum_f16 v1.l, v1.l, v3.l ; GFX12-GISEL-TRUE16-NEXT: v_pk_maximum_f16 v0, v0, v2 +; GFX12-GISEL-TRUE16-NEXT: v_maximum_f16 v1.l, v1.l, v3.l ; GFX12-GISEL-TRUE16-NEXT: ; return to shader part epilog ; ; GFX12-GISEL-FAKE16-LABEL: test_fmaximum_v3f16_vv: diff --git a/llvm/test/CodeGen/AMDGPU/fminimum.ll b/llvm/test/CodeGen/AMDGPU/fminimum.ll index e1d35b5..b25120f 100644 --- a/llvm/test/CodeGen/AMDGPU/fminimum.ll +++ b/llvm/test/CodeGen/AMDGPU/fminimum.ll @@ -173,8 +173,8 @@ define amdgpu_ps <3 x half> @test_fminimum_v3f16_vv(<3 x half> %a, <3 x half> %b ; ; GFX12-GISEL-TRUE16-LABEL: test_fminimum_v3f16_vv: ; GFX12-GISEL-TRUE16: ; %bb.0: -; GFX12-GISEL-TRUE16-NEXT: v_minimum_f16 v1.l, v1.l, v3.l ; GFX12-GISEL-TRUE16-NEXT: v_pk_minimum_f16 v0, v0, v2 +; GFX12-GISEL-TRUE16-NEXT: v_minimum_f16 v1.l, v1.l, v3.l ; GFX12-GISEL-TRUE16-NEXT: ; return to shader part epilog ; ; GFX12-GISEL-FAKE16-LABEL: test_fminimum_v3f16_vv: diff --git a/llvm/test/CodeGen/AMDGPU/fmuladd.f32.ll b/llvm/test/CodeGen/AMDGPU/fmuladd.f32.ll index ceacdf5..cbda062 100644 --- a/llvm/test/CodeGen/AMDGPU/fmuladd.f32.ll +++ b/llvm/test/CodeGen/AMDGPU/fmuladd.f32.ll @@ -1,45 +1,184 @@ -; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=tahiti -denormal-fp-math-f32=preserve-sign -mattr=+fast-fmaf -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN-FLUSH,GCN-FLUSH-MAD,SI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=tahiti -denormal-fp-math-f32=ieee -mattr=+fast-fmaf -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN-DENORM-STRICT,SI-DENORM,GCN-DENORM-FASTFMA,SI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=verde -denormal-fp-math-f32=preserve-sign -mattr=-fast-fmaf -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN-FLUSH,GCN-FLUSH-MAD,SI-FLUSH,SI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=verde -denormal-fp-math-f32=ieee -mattr=-fast-fmaf -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN-DENORM-STRICT,SI-DENORM,GCN-DENORM-SLOWFMA,SI %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=tahiti -denormal-fp-math-f32=preserve-sign -mattr=+fast-fmaf -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=SI-FLUSH %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=verde -denormal-fp-math-f32=preserve-sign -mattr=-fast-fmaf -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=SI-FLUSH %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=tahiti -denormal-fp-math-f32=ieee -mattr=+fast-fmaf -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=SI-DENORM-FASTFMA,SI-DENORM-FASTFMA-STRICT %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=verde -denormal-fp-math-f32=ieee -mattr=-fast-fmaf -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=SI-DENORM-SLOWFMA %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=tahiti -denormal-fp-math-f32=preserve-sign -mattr=+fast-fmaf -fp-contract=fast < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN-FLUSH,GCN-FLUSH-MAD,SI-FLUSH,SI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=tahiti -denormal-fp-math-f32=ieee -mattr=+fast-fmaf -fp-contract=fast < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI-DENORM,GCN-DENORM-FASTFMA,GCN-DENORM-FASTFMA-CONTRACT,SI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=verde -denormal-fp-math-f32=preserve-sign -mattr=-fast-fmaf -fp-contract=fast < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN-FLUSH,GCN-FLUSH-MAD,SI-FLUSH,SI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=verde -denormal-fp-math-f32=ieee -mattr=-fast-fmaf -fp-contract=fast < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI-DENORM,GCN-DENORM-SLOWFMA,GCN-DENORM-SLOWFMA-CONTRACT,SI %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=tahiti -denormal-fp-math-f32=preserve-sign -mattr=+fast-fmaf -fp-contract=fast < %s | FileCheck -enable-var-scope -check-prefixes=SI-FLUSH %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=verde -denormal-fp-math-f32=preserve-sign -mattr=-fast-fmaf -fp-contract=fast < %s | FileCheck -enable-var-scope -check-prefixes=SI-FLUSH %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=tahiti -denormal-fp-math-f32=ieee -mattr=+fast-fmaf -fp-contract=fast < %s | FileCheck -enable-var-scope -check-prefixes=SI-DENORM-FASTFMA,SI-DENORM-FASTFMA-CONTRACT %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=verde -denormal-fp-math-f32=ieee -mattr=-fast-fmaf -fp-contract=fast < %s | FileCheck -enable-var-scope -check-prefixes=SI-DENORM-SLOWFMA %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=gfx900 -denormal-fp-math-f32=preserve-sign -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-FLUSH,GFX9-FLUSH-MAD %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=gfx900 -denormal-fp-math-f32=ieee -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-DENORM,GFX9-DENORM-FASTFMA-MAD %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=gfx900 -denormal-fp-math-f32=preserve-sign -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN-FLUSH,GCN-FLUSH-MAD %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=gfx900 -denormal-fp-math-f32=ieee -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN-DENORM-STRICT,GCN-DENORM-FASTFMA %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=gfx906 -denormal-fp-math-f32=preserve-sign -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-FLUSH,GFX9-FLUSH-FMAC %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=gfx906 -denormal-fp-math-f32=ieee -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-DENORM,GFX9-DENORM-FASTFMA-FMAC %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=gfx906 -denormal-fp-math-f32=preserve-sign -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN-FLUSH,GCN-FLUSH-FMAC %s - -; FIXME: Should probably test this, but sometimes selecting fmac is painful to match. -; XUN: llc -amdgpu-scalarize-global-loads=false -mcpu=gfx906 -denormal-fp-math-f32=ieee -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN-DENORM-STRICT,GCN-DENORM-FASTFMA %s - -; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=gfx1030 -denormal-fp-math-f32=preserve-sign -mattr=+mad-mac-f32-insts -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN-FLUSH,GCN-FLUSH-FMAC %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=gfx1030 -denormal-fp-math-f32=ieee -mattr=+mad-mac-f32-insts -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GCN-DENORM-STRICT %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=gfx1030 -denormal-fp-math-f32=preserve-sign -mattr=+mad-mac-f32-insts -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GFX10,GFX10-FLUSH %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mcpu=gfx1030 -denormal-fp-math-f32=ieee -mattr=+mad-mac-f32-insts -fp-contract=on < %s | FileCheck -enable-var-scope -check-prefixes=GFX10,GFX10-DENORM %s ; Test all permutations of: fp32 denormals, fast fp contract, fp contract enabled for fmuladd, fmaf fast/slow. target triple = "amdgcn--" - declare i32 @llvm.amdgcn.workitem.id.x() #1 declare float @llvm.fmuladd.f32(float, float, float) #1 declare half @llvm.fmuladd.f16(half, half, half) #1 declare float @llvm.fabs.f32(float) #1 -; GCN-LABEL: {{^}}fmuladd_f32: -; GCN-FLUSH-MAD: v_mac_f32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} -; GCN-FLUSH-FMAC: v_fmac_f32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} - -; GCN-DENORM-FASTFMA: v_fma_f32 {{v[0-9]+, v[0-9]+, v[0-9]+}} - -; GCN-DENORM-SLOWFMA: v_mul_f32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} -; GCN-DENORM-SLOWFMA: v_add_f32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} -define amdgpu_kernel void @fmuladd_f32(ptr addrspace(1) %out, ptr addrspace(1) %in1, - ptr addrspace(1) %in2, ptr addrspace(1) %in3) #0 { +define amdgpu_kernel void @fmuladd_f32(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2, ptr addrspace(1) %in3) #0 { +; SI-FLUSH-LABEL: fmuladd_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s11, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s10, -1 +; SI-FLUSH-NEXT: s_mov_b32 s14, s10 +; SI-FLUSH-NEXT: s_mov_b32 s15, s11 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: s_mov_b32 s12, s2 +; SI-FLUSH-NEXT: s_mov_b32 s13, s3 +; SI-FLUSH-NEXT: s_mov_b32 s16, s4 +; SI-FLUSH-NEXT: s_mov_b32 s17, s5 +; SI-FLUSH-NEXT: s_mov_b32 s18, s10 +; SI-FLUSH-NEXT: s_mov_b32 s19, s11 +; SI-FLUSH-NEXT: s_mov_b32 s4, s6 +; SI-FLUSH-NEXT: s_mov_b32 s5, s7 +; SI-FLUSH-NEXT: s_mov_b32 s6, s10 +; SI-FLUSH-NEXT: s_mov_b32 s7, s11 +; SI-FLUSH-NEXT: buffer_load_dword v0, off, s[12:15], 0 +; SI-FLUSH-NEXT: buffer_load_dword v1, off, s[16:19], 0 +; SI-FLUSH-NEXT: buffer_load_dword v2, off, s[4:7], 0 +; SI-FLUSH-NEXT: s_mov_b32 s8, s0 +; SI-FLUSH-NEXT: s_mov_b32 s9, s1 +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: v_mac_f32_e32 v2, v0, v1 +; SI-FLUSH-NEXT: buffer_store_dword v2, off, s[8:11], 0 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-LABEL: fmuladd_f32: +; SI-DENORM-FASTFMA: ; %bb.0: +; SI-DENORM-FASTFMA-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s11, 0xf000 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s10, -1 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s14, s10 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s15, s11 +; SI-DENORM-FASTFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s12, s2 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s13, s3 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s16, s4 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s17, s5 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s18, s10 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s19, s11 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s4, s6 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s5, s7 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s6, s10 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s7, s11 +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v0, off, s[12:15], 0 +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v1, off, s[16:19], 0 +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v2, off, s[4:7], 0 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s8, s0 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s9, s1 +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: v_fma_f32 v0, v0, v1, v2 +; SI-DENORM-FASTFMA-NEXT: buffer_store_dword v0, off, s[8:11], 0 +; SI-DENORM-FASTFMA-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: fmuladd_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s11, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s10, -1 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s14, s10 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s15, s11 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s12, s2 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s13, s3 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s16, s4 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s17, s5 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s18, s10 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s19, s11 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s4, s6 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s5, s7 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s6, s10 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s7, s11 +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v0, off, s[12:15], 0 +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v1, off, s[16:19], 0 +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, off, s[4:7], 0 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s8, s0 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s9, s1 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(1) +; SI-DENORM-SLOWFMA-NEXT: v_mul_f32_e32 v0, v0, v1 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v0, v0, v2 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v0, off, s[8:11], 0 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; GFX9-FLUSH-MAD-LABEL: fmuladd_f32: +; GFX9-FLUSH-MAD: ; %bb.0: +; GFX9-FLUSH-MAD-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24 +; GFX9-FLUSH-MAD-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-FLUSH-MAD-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v1, v0, s[10:11] +; GFX9-FLUSH-MAD-NEXT: global_load_dword v2, v0, s[12:13] +; GFX9-FLUSH-MAD-NEXT: global_load_dword v3, v0, s[14:15] +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: v_mac_f32_e32 v3, v1, v2 +; GFX9-FLUSH-MAD-NEXT: global_store_dword v0, v3, s[8:9] +; GFX9-FLUSH-MAD-NEXT: s_endpgm +; +; GFX9-DENORM-FASTFMA-MAD-LABEL: fmuladd_f32: +; GFX9-DENORM-FASTFMA-MAD: ; %bb.0: +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24 +; GFX9-DENORM-FASTFMA-MAD-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v1, v0, s[10:11] +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v2, v0, s[12:13] +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v3, v0, s[14:15] +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: v_fma_f32 v1, v1, v2, v3 +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_store_dword v0, v1, s[8:9] +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_endpgm +; +; GFX9-FLUSH-FMAC-LABEL: fmuladd_f32: +; GFX9-FLUSH-FMAC: ; %bb.0: +; GFX9-FLUSH-FMAC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24 +; GFX9-FLUSH-FMAC-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v1, v0, s[10:11] +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v2, v0, s[12:13] +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v3, v0, s[14:15] +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: v_fmac_f32_e32 v3, v1, v2 +; GFX9-FLUSH-FMAC-NEXT: global_store_dword v0, v3, s[8:9] +; GFX9-FLUSH-FMAC-NEXT: s_endpgm +; +; GFX9-DENORM-FASTFMA-FMAC-LABEL: fmuladd_f32: +; GFX9-DENORM-FASTFMA-FMAC: ; %bb.0: +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v1, v0, s[10:11] +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v2, v0, s[12:13] +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v3, v0, s[14:15] +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: v_fmac_f32_e32 v3, v1, v2 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_store_dword v0, v3, s[8:9] +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_endpgm +; +; GFX10-LABEL: fmuladd_f32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24 +; GFX10-NEXT: v_mov_b32_e32 v0, 0 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: s_clause 0x2 +; GFX10-NEXT: global_load_dword v1, v0, s[2:3] +; GFX10-NEXT: global_load_dword v2, v0, s[4:5] +; GFX10-NEXT: global_load_dword v3, v0, s[6:7] +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_fmac_f32_e32 v3, v1, v2 +; GFX10-NEXT: global_store_dword v0, v3, s[0:1] +; GFX10-NEXT: s_endpgm %r0 = load float, ptr addrspace(1) %in1 %r1 = load float, ptr addrspace(1) %in2 %r2 = load float, ptr addrspace(1) %in3 @@ -48,18 +187,190 @@ define amdgpu_kernel void @fmuladd_f32(ptr addrspace(1) %out, ptr addrspace(1) % ret void } -; GCN-LABEL: {{^}}fmul_fadd_f32: -; GCN-FLUSH: v_mac_f32 - -; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 - -; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 -; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 - -; GCN-DENORM-STRICT: v_mul_f32_e32 -; GCN-DENORM-STRICT: v_add_f32_e32 -define amdgpu_kernel void @fmul_fadd_f32(ptr addrspace(1) %out, ptr addrspace(1) %in1, - ptr addrspace(1) %in2, ptr addrspace(1) %in3) #0 { +define amdgpu_kernel void @fmul_fadd_f32(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2, ptr addrspace(1) %in3) #0 { +; SI-FLUSH-LABEL: fmul_fadd_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s11, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s10, -1 +; SI-FLUSH-NEXT: s_mov_b32 s14, s10 +; SI-FLUSH-NEXT: s_mov_b32 s15, s11 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: s_mov_b32 s12, s2 +; SI-FLUSH-NEXT: s_mov_b32 s13, s3 +; SI-FLUSH-NEXT: s_mov_b32 s16, s4 +; SI-FLUSH-NEXT: s_mov_b32 s17, s5 +; SI-FLUSH-NEXT: s_mov_b32 s18, s10 +; SI-FLUSH-NEXT: s_mov_b32 s19, s11 +; SI-FLUSH-NEXT: s_mov_b32 s4, s6 +; SI-FLUSH-NEXT: s_mov_b32 s5, s7 +; SI-FLUSH-NEXT: s_mov_b32 s6, s10 +; SI-FLUSH-NEXT: s_mov_b32 s7, s11 +; SI-FLUSH-NEXT: buffer_load_dword v0, off, s[12:15], 0 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v1, off, s[16:19], 0 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v2, off, s[4:7], 0 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: s_mov_b32 s8, s0 +; SI-FLUSH-NEXT: s_mov_b32 s9, s1 +; SI-FLUSH-NEXT: v_mac_f32_e32 v2, v0, v1 +; SI-FLUSH-NEXT: buffer_store_dword v2, off, s[8:11], 0 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-STRICT-LABEL: fmul_fadd_f32: +; SI-DENORM-FASTFMA-STRICT: ; %bb.0: +; SI-DENORM-FASTFMA-STRICT-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s11, 0xf000 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s10, -1 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s14, s10 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s15, s11 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s12, s2 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s13, s3 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s16, s4 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s17, s5 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s18, s10 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s19, s11 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s4, s6 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s5, s7 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s6, s10 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s7, s11 +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v0, off, s[12:15], 0 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v1, off, s[16:19], 0 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v2, off, s[4:7], 0 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s8, s0 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s9, s1 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mul_f32_e32 v0, v0, v1 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_add_f32_e32 v0, v0, v2 +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_store_dword v0, off, s[8:11], 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: fmul_fadd_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s11, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s10, -1 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s14, s10 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s15, s11 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s12, s2 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s13, s3 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s16, s4 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s17, s5 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s18, s10 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s19, s11 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s4, s6 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s5, s7 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s6, s10 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s7, s11 +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v0, off, s[12:15], 0 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v1, off, s[16:19], 0 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, off, s[4:7], 0 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s8, s0 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s9, s1 +; SI-DENORM-SLOWFMA-NEXT: v_mul_f32_e32 v0, v0, v1 +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v0, v0, v2 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v0, off, s[8:11], 0 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-CONTRACT-LABEL: fmul_fadd_f32: +; SI-DENORM-FASTFMA-CONTRACT: ; %bb.0: +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s11, 0xf000 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s10, -1 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s14, s10 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s15, s11 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s12, s2 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s13, s3 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s16, s4 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s17, s5 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s18, s10 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s19, s11 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s4, s6 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s5, s7 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s6, s10 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s7, s11 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v0, off, s[12:15], 0 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v1, off, s[16:19], 0 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v2, off, s[4:7], 0 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s8, s0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s9, s1 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_fma_f32 v0, v0, v1, v2 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_store_dword v0, off, s[8:11], 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_endpgm +; +; GFX9-FLUSH-LABEL: fmul_fadd_f32: +; GFX9-FLUSH: ; %bb.0: +; GFX9-FLUSH-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24 +; GFX9-FLUSH-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v1, v0, s[10:11] glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v2, v0, s[12:13] glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v3, v0, s[14:15] glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: v_mac_f32_e32 v3, v1, v2 +; GFX9-FLUSH-NEXT: global_store_dword v0, v3, s[8:9] +; GFX9-FLUSH-NEXT: s_endpgm +; +; GFX9-DENORM-LABEL: fmul_fadd_f32: +; GFX9-DENORM: ; %bb.0: +; GFX9-DENORM-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24 +; GFX9-DENORM-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v1, v0, s[10:11] glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v2, v0, s[12:13] glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v3, v0, s[14:15] glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: v_mul_f32_e32 v1, v1, v2 +; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v1, v3 +; GFX9-DENORM-NEXT: global_store_dword v0, v1, s[8:9] +; GFX9-DENORM-NEXT: s_endpgm +; +; GFX10-FLUSH-LABEL: fmul_fadd_f32: +; GFX10-FLUSH: ; %bb.0: +; GFX10-FLUSH-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24 +; GFX10-FLUSH-NEXT: v_mov_b32_e32 v0, 0 +; GFX10-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v1, v0, s[2:3] glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v2, v0, s[4:5] glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v3, v0, s[6:7] glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: v_mac_f32_e32 v3, v1, v2 +; GFX10-FLUSH-NEXT: global_store_dword v0, v3, s[0:1] +; GFX10-FLUSH-NEXT: s_endpgm +; +; GFX10-DENORM-LABEL: fmul_fadd_f32: +; GFX10-DENORM: ; %bb.0: +; GFX10-DENORM-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24 +; GFX10-DENORM-NEXT: v_mov_b32_e32 v0, 0 +; GFX10-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v1, v0, s[2:3] glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v2, v0, s[4:5] glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v3, v0, s[6:7] glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: v_mul_f32_e32 v1, v1, v2 +; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v1, v3 +; GFX10-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-DENORM-NEXT: s_endpgm %r0 = load volatile float, ptr addrspace(1) %in1 %r1 = load volatile float, ptr addrspace(1) %in2 %r2 = load volatile float, ptr addrspace(1) %in3 @@ -69,15 +380,172 @@ define amdgpu_kernel void @fmul_fadd_f32(ptr addrspace(1) %out, ptr addrspace(1) ret void } -; GCN-LABEL: {{^}}fmul_fadd_contract_f32: -; GCN-FLUSH-FMAC: v_fmac_f32_e32 - -; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 -; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 - -; GCN-DENORM-FASTFMA: v_fma_f32 -define amdgpu_kernel void @fmul_fadd_contract_f32(ptr addrspace(1) %out, ptr addrspace(1) %in1, - ptr addrspace(1) %in2, ptr addrspace(1) %in3) #0 { +define amdgpu_kernel void @fmul_fadd_contract_f32(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2, ptr addrspace(1) %in3) #0 { +; SI-FLUSH-LABEL: fmul_fadd_contract_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s11, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s10, -1 +; SI-FLUSH-NEXT: s_mov_b32 s14, s10 +; SI-FLUSH-NEXT: s_mov_b32 s15, s11 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: s_mov_b32 s12, s2 +; SI-FLUSH-NEXT: s_mov_b32 s13, s3 +; SI-FLUSH-NEXT: s_mov_b32 s16, s4 +; SI-FLUSH-NEXT: s_mov_b32 s17, s5 +; SI-FLUSH-NEXT: s_mov_b32 s18, s10 +; SI-FLUSH-NEXT: s_mov_b32 s19, s11 +; SI-FLUSH-NEXT: s_mov_b32 s4, s6 +; SI-FLUSH-NEXT: s_mov_b32 s5, s7 +; SI-FLUSH-NEXT: s_mov_b32 s6, s10 +; SI-FLUSH-NEXT: s_mov_b32 s7, s11 +; SI-FLUSH-NEXT: buffer_load_dword v0, off, s[12:15], 0 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v1, off, s[16:19], 0 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v2, off, s[4:7], 0 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: s_mov_b32 s8, s0 +; SI-FLUSH-NEXT: s_mov_b32 s9, s1 +; SI-FLUSH-NEXT: v_mac_f32_e32 v2, v0, v1 +; SI-FLUSH-NEXT: buffer_store_dword v2, off, s[8:11], 0 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-LABEL: fmul_fadd_contract_f32: +; SI-DENORM-FASTFMA: ; %bb.0: +; SI-DENORM-FASTFMA-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s11, 0xf000 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s10, -1 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s14, s10 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s15, s11 +; SI-DENORM-FASTFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s12, s2 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s13, s3 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s16, s4 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s17, s5 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s18, s10 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s19, s11 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s4, s6 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s5, s7 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s6, s10 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s7, s11 +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v0, off, s[12:15], 0 glc +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v1, off, s[16:19], 0 glc +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v2, off, s[4:7], 0 glc +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s8, s0 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s9, s1 +; SI-DENORM-FASTFMA-NEXT: v_fma_f32 v0, v0, v1, v2 +; SI-DENORM-FASTFMA-NEXT: buffer_store_dword v0, off, s[8:11], 0 +; SI-DENORM-FASTFMA-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: fmul_fadd_contract_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s11, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s10, -1 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s14, s10 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s15, s11 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s12, s2 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s13, s3 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s16, s4 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s17, s5 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s18, s10 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s19, s11 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s4, s6 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s5, s7 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s6, s10 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s7, s11 +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v0, off, s[12:15], 0 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v1, off, s[16:19], 0 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, off, s[4:7], 0 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s8, s0 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s9, s1 +; SI-DENORM-SLOWFMA-NEXT: v_mul_f32_e32 v0, v0, v1 +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v0, v0, v2 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v0, off, s[8:11], 0 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; GFX9-FLUSH-MAD-LABEL: fmul_fadd_contract_f32: +; GFX9-FLUSH-MAD: ; %bb.0: +; GFX9-FLUSH-MAD-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24 +; GFX9-FLUSH-MAD-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-FLUSH-MAD-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v1, v0, s[10:11] glc +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v2, v0, s[12:13] glc +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v3, v0, s[14:15] glc +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: v_mac_f32_e32 v3, v1, v2 +; GFX9-FLUSH-MAD-NEXT: global_store_dword v0, v3, s[8:9] +; GFX9-FLUSH-MAD-NEXT: s_endpgm +; +; GFX9-DENORM-FASTFMA-MAD-LABEL: fmul_fadd_contract_f32: +; GFX9-DENORM-FASTFMA-MAD: ; %bb.0: +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24 +; GFX9-DENORM-FASTFMA-MAD-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v1, v0, s[10:11] glc +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v2, v0, s[12:13] glc +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v3, v0, s[14:15] glc +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: v_fma_f32 v1, v1, v2, v3 +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_store_dword v0, v1, s[8:9] +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_endpgm +; +; GFX9-FLUSH-FMAC-LABEL: fmul_fadd_contract_f32: +; GFX9-FLUSH-FMAC: ; %bb.0: +; GFX9-FLUSH-FMAC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24 +; GFX9-FLUSH-FMAC-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v1, v0, s[10:11] glc +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v2, v0, s[12:13] glc +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v3, v0, s[14:15] glc +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: v_fmac_f32_e32 v3, v1, v2 +; GFX9-FLUSH-FMAC-NEXT: global_store_dword v0, v3, s[8:9] +; GFX9-FLUSH-FMAC-NEXT: s_endpgm +; +; GFX9-DENORM-FASTFMA-FMAC-LABEL: fmul_fadd_contract_f32: +; GFX9-DENORM-FASTFMA-FMAC: ; %bb.0: +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v1, v0, s[10:11] glc +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v2, v0, s[12:13] glc +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v3, v0, s[14:15] glc +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: v_fmac_f32_e32 v3, v1, v2 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_store_dword v0, v3, s[8:9] +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_endpgm +; +; GFX10-LABEL: fmul_fadd_contract_f32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24 +; GFX10-NEXT: v_mov_b32_e32 v0, 0 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: global_load_dword v1, v0, s[2:3] glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: global_load_dword v2, v0, s[4:5] glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: global_load_dword v3, v0, s[6:7] glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_fmac_f32_e32 v3, v1, v2 +; GFX10-NEXT: global_store_dword v0, v3, s[0:1] +; GFX10-NEXT: s_endpgm %r0 = load volatile float, ptr addrspace(1) %in1 %r1 = load volatile float, ptr addrspace(1) %in2 %r2 = load volatile float, ptr addrspace(1) %in3 @@ -87,23 +555,120 @@ define amdgpu_kernel void @fmul_fadd_contract_f32(ptr addrspace(1) %out, ptr add ret void } -; GCN-LABEL: {{^}}fmuladd_2.0_a_b_f32 -; GCN: {{buffer|flat|global}}_load_dword [[R1:v[0-9]+]], -; GCN: {{buffer|flat|global}}_load_dword [[R2:v[0-9]+]], - -; GCN-FLUSH-MAD: v_mac_f32_e32 [[R2]], 2.0, [[R1]] -; GCN-FLUSH-FMAC: v_fmac_f32_e32 [[R2]], 2.0, [[R1]] -; SI-FLUSH: buffer_store_dword [[R2]] -; VI-FLUSH: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[R2]] - -; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]] - -; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] -; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]] - -; SI-DENORM: buffer_store_dword [[RESULT]] -; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] define amdgpu_kernel void @fmuladd_2.0_a_b_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { +; SI-FLUSH-LABEL: fmuladd_2.0_a_b_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s3, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s2, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: v_mac_f32_e32 v3, 2.0, v2 +; SI-FLUSH-NEXT: buffer_store_dword v3, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-LABEL: fmuladd_2.0_a_b_f32: +; SI-DENORM-FASTFMA: ; %bb.0: +; SI-DENORM-FASTFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-FASTFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: v_fma_f32 v2, v2, 2.0, v3 +; SI-DENORM-FASTFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: fmuladd_2.0_a_b_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v2, v2 +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v2, v3 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; GFX9-FLUSH-MAD-LABEL: fmuladd_2.0_a_b_f32: +; GFX9-FLUSH-MAD: ; %bb.0: +; GFX9-FLUSH-MAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-MAD-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-MAD-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: v_mac_f32_e32 v2, 2.0, v1 +; GFX9-FLUSH-MAD-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-FLUSH-MAD-NEXT: s_endpgm +; +; GFX9-DENORM-FASTFMA-MAD-LABEL: fmuladd_2.0_a_b_f32: +; GFX9-DENORM-FASTFMA-MAD: ; %bb.0: +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-DENORM-FASTFMA-MAD-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: v_fma_f32 v1, v1, 2.0, v2 +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_endpgm +; +; GFX9-FLUSH-FMAC-LABEL: fmuladd_2.0_a_b_f32: +; GFX9-FLUSH-FMAC: ; %bb.0: +; GFX9-FLUSH-FMAC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-FMAC-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: v_fmac_f32_e32 v2, 2.0, v1 +; GFX9-FLUSH-FMAC-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-FLUSH-FMAC-NEXT: s_endpgm +; +; GFX9-DENORM-FASTFMA-FMAC-LABEL: fmuladd_2.0_a_b_f32: +; GFX9-DENORM-FASTFMA-FMAC: ; %bb.0: +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: v_fmac_f32_e32 v2, 2.0, v1 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_endpgm +; +; GFX10-LABEL: fmuladd_2.0_a_b_f32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: global_load_dword v1, v0, s[0:1] glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_fmac_f32_e32 v2, 2.0, v1 +; GFX10-NEXT: global_store_dword v0, v2, s[0:1] +; GFX10-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %gep.0 = getelementptr float, ptr addrspace(1) %out, i32 %tid %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 @@ -117,24 +682,120 @@ define amdgpu_kernel void @fmuladd_2.0_a_b_f32(ptr addrspace(1) %out, ptr addrsp ret void } -; GCN-LABEL: {{^}}fmuladd_a_2.0_b_f32 -; GCN: {{buffer|flat|global}}_load_dword [[R1:v[0-9]+]], -; GCN: {{buffer|flat|global}}_load_dword [[R2:v[0-9]+]], - -; GCN-FLUSH-MAD: v_mac_f32_e32 [[R2]], 2.0, [[R1]] -; GCN-FLUSH-FMAC: v_fmac_f32_e32 [[R2]], 2.0, [[R1]] - -; SI-FLUSH: buffer_store_dword [[R2]] -; VI-FLUSH: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[R2]] - -; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]] - -; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] -; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]] - -; SI-DENORM: buffer_store_dword [[RESULT]] -; VI-DENORM: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] define amdgpu_kernel void @fmuladd_a_2.0_b_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { +; SI-FLUSH-LABEL: fmuladd_a_2.0_b_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s3, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s2, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: v_mac_f32_e32 v3, 2.0, v2 +; SI-FLUSH-NEXT: buffer_store_dword v3, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-LABEL: fmuladd_a_2.0_b_f32: +; SI-DENORM-FASTFMA: ; %bb.0: +; SI-DENORM-FASTFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-FASTFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: v_fma_f32 v2, v2, 2.0, v3 +; SI-DENORM-FASTFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: fmuladd_a_2.0_b_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v2, v2 +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v2, v3 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; GFX9-FLUSH-MAD-LABEL: fmuladd_a_2.0_b_f32: +; GFX9-FLUSH-MAD: ; %bb.0: +; GFX9-FLUSH-MAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-MAD-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-MAD-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: v_mac_f32_e32 v2, 2.0, v1 +; GFX9-FLUSH-MAD-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-FLUSH-MAD-NEXT: s_endpgm +; +; GFX9-DENORM-FASTFMA-MAD-LABEL: fmuladd_a_2.0_b_f32: +; GFX9-DENORM-FASTFMA-MAD: ; %bb.0: +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-DENORM-FASTFMA-MAD-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: v_fma_f32 v1, v1, 2.0, v2 +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_endpgm +; +; GFX9-FLUSH-FMAC-LABEL: fmuladd_a_2.0_b_f32: +; GFX9-FLUSH-FMAC: ; %bb.0: +; GFX9-FLUSH-FMAC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-FMAC-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: v_fmac_f32_e32 v2, 2.0, v1 +; GFX9-FLUSH-FMAC-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-FLUSH-FMAC-NEXT: s_endpgm +; +; GFX9-DENORM-FASTFMA-FMAC-LABEL: fmuladd_a_2.0_b_f32: +; GFX9-DENORM-FASTFMA-FMAC: ; %bb.0: +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: v_fmac_f32_e32 v2, 2.0, v1 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_endpgm +; +; GFX10-LABEL: fmuladd_a_2.0_b_f32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: global_load_dword v1, v0, s[0:1] glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_fmac_f32_e32 v2, 2.0, v1 +; GFX10-NEXT: global_store_dword v0, v2, s[0:1] +; GFX10-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %gep.0 = getelementptr float, ptr addrspace(1) %out, i32 %tid %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 @@ -148,28 +809,126 @@ define amdgpu_kernel void @fmuladd_a_2.0_b_f32(ptr addrspace(1) %out, ptr addrsp ret void } -; GCN-LABEL: {{^}}fadd_a_a_b_f32: -; GCN: {{buffer|flat|global}}_load_dword [[R1:v[0-9]+]], -; GCN: {{buffer|flat|global}}_load_dword [[R2:v[0-9]+]], - -; GCN-FLUSH: v_mac_f32_e32 [[R2]], 2.0, [[R1]] - -; SI-FLUSH: buffer_store_dword [[R2]] -; VI-FLUSH: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[R2]] - -; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]] - -; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] -; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]] - -; GCN-DENORM-STRICT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] -; GCN-DENORM-STRICT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]] - -; SI-DENORM: buffer_store_dword [[RESULT]] -; VI-DENORM: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] -define amdgpu_kernel void @fadd_a_a_b_f32(ptr addrspace(1) %out, - ptr addrspace(1) %in1, - ptr addrspace(1) %in2) #0 { +define amdgpu_kernel void @fadd_a_a_b_f32(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 { +; SI-FLUSH-LABEL: fadd_a_a_b_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s3, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s2, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: v_mac_f32_e32 v3, 2.0, v2 +; SI-FLUSH-NEXT: buffer_store_dword v3, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-STRICT-LABEL: fadd_a_a_b_f32: +; SI-DENORM-FASTFMA-STRICT: ; %bb.0: +; SI-DENORM-FASTFMA-STRICT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: v_add_f32_e32 v2, v2, v2 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_add_f32_e32 v2, v2, v3 +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: fadd_a_a_b_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v2, v2 +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v2, v3 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-CONTRACT-LABEL: fadd_a_a_b_f32: +; SI-DENORM-FASTFMA-CONTRACT: ; %bb.0: +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_fma_f32 v2, v2, 2.0, v3 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_endpgm +; +; GFX9-FLUSH-LABEL: fadd_a_a_b_f32: +; GFX9-FLUSH: ; %bb.0: +; GFX9-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: v_mac_f32_e32 v2, 2.0, v1 +; GFX9-FLUSH-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-FLUSH-NEXT: s_endpgm +; +; GFX9-DENORM-LABEL: fadd_a_a_b_f32: +; GFX9-DENORM: ; %bb.0: +; GFX9-DENORM-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v1, v1 +; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v1, v2 +; GFX9-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-NEXT: s_endpgm +; +; GFX10-FLUSH-LABEL: fadd_a_a_b_f32: +; GFX10-FLUSH: ; %bb.0: +; GFX10-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX10-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v1, v0, s[0:1] glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: v_mac_f32_e32 v2, 2.0, v1 +; GFX10-FLUSH-NEXT: global_store_dword v0, v2, s[0:1] +; GFX10-FLUSH-NEXT: s_endpgm +; +; GFX10-DENORM-LABEL: fadd_a_a_b_f32: +; GFX10-DENORM: ; %bb.0: +; GFX10-DENORM-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX10-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v1, v0, s[0:1] glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v1, v1 +; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v1, v2 +; GFX10-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-DENORM-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %gep.0 = getelementptr float, ptr addrspace(1) %out, i32 %tid %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 @@ -184,28 +943,126 @@ define amdgpu_kernel void @fadd_a_a_b_f32(ptr addrspace(1) %out, ret void } -; GCN-LABEL: {{^}}fadd_b_a_a_f32: -; GCN: {{buffer|flat|global}}_load_dword [[R1:v[0-9]+]], -; GCN: {{buffer|flat|global}}_load_dword [[R2:v[0-9]+]], - -; GCN-FLUSH: v_mac_f32_e32 [[R2]], 2.0, [[R1]] - -; SI-FLUSH: buffer_store_dword [[R2]] -; VI-FLUSH: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[R2]] - -; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]] - -; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] -; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]] - -; GCN-DENORM-STRICT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] -; GCN-DENORM-STRICT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]] - -; SI-DENORM: buffer_store_dword [[RESULT]] -; VI-DENORM: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] -define amdgpu_kernel void @fadd_b_a_a_f32(ptr addrspace(1) %out, - ptr addrspace(1) %in1, - ptr addrspace(1) %in2) #0 { +define amdgpu_kernel void @fadd_b_a_a_f32(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 { +; SI-FLUSH-LABEL: fadd_b_a_a_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s3, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s2, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: v_mac_f32_e32 v3, 2.0, v2 +; SI-FLUSH-NEXT: buffer_store_dword v3, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-STRICT-LABEL: fadd_b_a_a_f32: +; SI-DENORM-FASTFMA-STRICT: ; %bb.0: +; SI-DENORM-FASTFMA-STRICT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: v_add_f32_e32 v2, v2, v2 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_add_f32_e32 v2, v3, v2 +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: fadd_b_a_a_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v2, v2 +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v3, v2 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-CONTRACT-LABEL: fadd_b_a_a_f32: +; SI-DENORM-FASTFMA-CONTRACT: ; %bb.0: +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_fma_f32 v2, v2, 2.0, v3 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_endpgm +; +; GFX9-FLUSH-LABEL: fadd_b_a_a_f32: +; GFX9-FLUSH: ; %bb.0: +; GFX9-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: v_mac_f32_e32 v2, 2.0, v1 +; GFX9-FLUSH-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-FLUSH-NEXT: s_endpgm +; +; GFX9-DENORM-LABEL: fadd_b_a_a_f32: +; GFX9-DENORM: ; %bb.0: +; GFX9-DENORM-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v1, v1 +; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v2, v1 +; GFX9-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-NEXT: s_endpgm +; +; GFX10-FLUSH-LABEL: fadd_b_a_a_f32: +; GFX10-FLUSH: ; %bb.0: +; GFX10-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX10-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v1, v0, s[0:1] glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: v_mac_f32_e32 v2, 2.0, v1 +; GFX10-FLUSH-NEXT: global_store_dword v0, v2, s[0:1] +; GFX10-FLUSH-NEXT: s_endpgm +; +; GFX10-DENORM-LABEL: fadd_b_a_a_f32: +; GFX10-DENORM: ; %bb.0: +; GFX10-DENORM-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX10-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v1, v0, s[0:1] glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v1, v1 +; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v2, v1 +; GFX10-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-DENORM-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %gep.0 = getelementptr float, ptr addrspace(1) %out, i32 %tid %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 @@ -220,20 +1077,120 @@ define amdgpu_kernel void @fadd_b_a_a_f32(ptr addrspace(1) %out, ret void } -; GCN-LABEL: {{^}}fmuladd_neg_2.0_a_b_f32 -; GCN: {{buffer|flat|global}}_load_dword [[R1:v[0-9]+]], -; GCN: {{buffer|flat|global}}_load_dword [[R2:v[0-9]+]], -; GCN-FLUSH-MAD: v_mac_f32_e32 [[R2]], -2.0, [[R1]] -; GCN-FLUSH-FMAC: v_fmac_f32_e32 [[R2]], -2.0, [[R1]] - -; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], -2.0, [[R2]] - -; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] -; GCN-DENORM-SLOWFMA: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]] - -; SI-DENORM: buffer_store_dword [[RESULT]] -; VI-DENORM: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] define amdgpu_kernel void @fmuladd_neg_2.0_a_b_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { +; SI-FLUSH-LABEL: fmuladd_neg_2.0_a_b_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s3, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s2, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: v_mac_f32_e32 v3, -2.0, v2 +; SI-FLUSH-NEXT: buffer_store_dword v3, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-LABEL: fmuladd_neg_2.0_a_b_f32: +; SI-DENORM-FASTFMA: ; %bb.0: +; SI-DENORM-FASTFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-FASTFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: v_fma_f32 v2, v2, -2.0, v3 +; SI-DENORM-FASTFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: fmuladd_neg_2.0_a_b_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v2, v2 +; SI-DENORM-SLOWFMA-NEXT: v_sub_f32_e32 v2, v3, v2 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; GFX9-FLUSH-MAD-LABEL: fmuladd_neg_2.0_a_b_f32: +; GFX9-FLUSH-MAD: ; %bb.0: +; GFX9-FLUSH-MAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-MAD-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-MAD-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: v_mac_f32_e32 v2, -2.0, v1 +; GFX9-FLUSH-MAD-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-FLUSH-MAD-NEXT: s_endpgm +; +; GFX9-DENORM-FASTFMA-MAD-LABEL: fmuladd_neg_2.0_a_b_f32: +; GFX9-DENORM-FASTFMA-MAD: ; %bb.0: +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-DENORM-FASTFMA-MAD-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: v_fma_f32 v1, v1, -2.0, v2 +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_endpgm +; +; GFX9-FLUSH-FMAC-LABEL: fmuladd_neg_2.0_a_b_f32: +; GFX9-FLUSH-FMAC: ; %bb.0: +; GFX9-FLUSH-FMAC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-FMAC-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: v_fmac_f32_e32 v2, -2.0, v1 +; GFX9-FLUSH-FMAC-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-FLUSH-FMAC-NEXT: s_endpgm +; +; GFX9-DENORM-FASTFMA-FMAC-LABEL: fmuladd_neg_2.0_a_b_f32: +; GFX9-DENORM-FASTFMA-FMAC: ; %bb.0: +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: v_fmac_f32_e32 v2, -2.0, v1 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_endpgm +; +; GFX10-LABEL: fmuladd_neg_2.0_a_b_f32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: global_load_dword v1, v0, s[0:1] glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_fmac_f32_e32 v2, -2.0, v1 +; GFX10-NEXT: global_store_dword v0, v2, s[0:1] +; GFX10-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %gep.0 = getelementptr float, ptr addrspace(1) %out, i32 %tid %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 @@ -247,25 +1204,120 @@ define amdgpu_kernel void @fmuladd_neg_2.0_a_b_f32(ptr addrspace(1) %out, ptr ad ret void } -; XXX -; GCN-LABEL: {{^}}fmuladd_neg_2.0_neg_a_b_f32 -; GCN: {{buffer|flat|global}}_load_dword [[R1:v[0-9]+]], -; GCN: {{buffer|flat|global}}_load_dword [[R2:v[0-9]+]], - -; GCN-FLUSH-MAD: v_mac_f32_e32 [[R2]], 2.0, [[R1]] -; GCN-FLUSH-FMAC: v_fmac_f32_e32 [[R2]], 2.0, [[R1]] - -; SI-FLUSH: buffer_store_dword [[R2]] -; VI-FLUSH: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[R2]] - -; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, [[R2]] - -; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] -; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]] - -; SI-DENORM: buffer_store_dword [[RESULT]] -; VI-DENORM: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] define amdgpu_kernel void @fmuladd_neg_2.0_neg_a_b_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { +; SI-FLUSH-LABEL: fmuladd_neg_2.0_neg_a_b_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s3, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s2, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: v_mac_f32_e32 v3, 2.0, v2 +; SI-FLUSH-NEXT: buffer_store_dword v3, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-LABEL: fmuladd_neg_2.0_neg_a_b_f32: +; SI-DENORM-FASTFMA: ; %bb.0: +; SI-DENORM-FASTFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-FASTFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: v_fma_f32 v2, v2, 2.0, v3 +; SI-DENORM-FASTFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: fmuladd_neg_2.0_neg_a_b_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v2, v2 +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v3, v2 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; GFX9-FLUSH-MAD-LABEL: fmuladd_neg_2.0_neg_a_b_f32: +; GFX9-FLUSH-MAD: ; %bb.0: +; GFX9-FLUSH-MAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-MAD-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-MAD-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: v_mac_f32_e32 v2, 2.0, v1 +; GFX9-FLUSH-MAD-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-FLUSH-MAD-NEXT: s_endpgm +; +; GFX9-DENORM-FASTFMA-MAD-LABEL: fmuladd_neg_2.0_neg_a_b_f32: +; GFX9-DENORM-FASTFMA-MAD: ; %bb.0: +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-DENORM-FASTFMA-MAD-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: v_fma_f32 v1, v1, 2.0, v2 +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_endpgm +; +; GFX9-FLUSH-FMAC-LABEL: fmuladd_neg_2.0_neg_a_b_f32: +; GFX9-FLUSH-FMAC: ; %bb.0: +; GFX9-FLUSH-FMAC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-FMAC-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: v_fmac_f32_e32 v2, 2.0, v1 +; GFX9-FLUSH-FMAC-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-FLUSH-FMAC-NEXT: s_endpgm +; +; GFX9-DENORM-FASTFMA-FMAC-LABEL: fmuladd_neg_2.0_neg_a_b_f32: +; GFX9-DENORM-FASTFMA-FMAC: ; %bb.0: +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: v_fmac_f32_e32 v2, 2.0, v1 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_endpgm +; +; GFX10-LABEL: fmuladd_neg_2.0_neg_a_b_f32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: global_load_dword v1, v0, s[0:1] glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_fmac_f32_e32 v2, 2.0, v1 +; GFX10-NEXT: global_store_dword v0, v2, s[0:1] +; GFX10-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %gep.0 = getelementptr float, ptr addrspace(1) %out, i32 %tid %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 @@ -281,24 +1333,120 @@ define amdgpu_kernel void @fmuladd_neg_2.0_neg_a_b_f32(ptr addrspace(1) %out, pt ret void } -; GCN-LABEL: {{^}}fmuladd_2.0_neg_a_b_f32: -; GCN: {{buffer|flat|global}}_load_dword [[R1:v[0-9]+]], -; GCN: {{buffer|flat|global}}_load_dword [[R2:v[0-9]+]], - -; GCN-FLUSH-MAD: v_mac_f32_e32 [[R2]], -2.0, [[R1]] -; GCN-FLUSH-FMAC: v_fmac_f32_e32 [[R2]], -2.0, [[R1]] - -; SI-FLUSH: buffer_store_dword [[R2]] -; VI-FLUSH: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[R2]] - -; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], -2.0, [[R2]] - -; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] -; GCN-DENORM-SLOWFMA: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]] - -; SI-DENORM: buffer_store_dword [[RESULT]] -; VI-DENORM: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] define amdgpu_kernel void @fmuladd_2.0_neg_a_b_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { +; SI-FLUSH-LABEL: fmuladd_2.0_neg_a_b_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s3, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s2, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: v_mac_f32_e32 v3, -2.0, v2 +; SI-FLUSH-NEXT: buffer_store_dword v3, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-LABEL: fmuladd_2.0_neg_a_b_f32: +; SI-DENORM-FASTFMA: ; %bb.0: +; SI-DENORM-FASTFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-FASTFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: v_fma_f32 v2, v2, -2.0, v3 +; SI-DENORM-FASTFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: fmuladd_2.0_neg_a_b_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v2, v2 +; SI-DENORM-SLOWFMA-NEXT: v_sub_f32_e32 v2, v3, v2 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; GFX9-FLUSH-MAD-LABEL: fmuladd_2.0_neg_a_b_f32: +; GFX9-FLUSH-MAD: ; %bb.0: +; GFX9-FLUSH-MAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-MAD-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-MAD-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: v_mac_f32_e32 v2, -2.0, v1 +; GFX9-FLUSH-MAD-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-FLUSH-MAD-NEXT: s_endpgm +; +; GFX9-DENORM-FASTFMA-MAD-LABEL: fmuladd_2.0_neg_a_b_f32: +; GFX9-DENORM-FASTFMA-MAD: ; %bb.0: +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-DENORM-FASTFMA-MAD-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-MAD-NEXT: v_fma_f32 v1, v1, -2.0, v2 +; GFX9-DENORM-FASTFMA-MAD-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-FASTFMA-MAD-NEXT: s_endpgm +; +; GFX9-FLUSH-FMAC-LABEL: fmuladd_2.0_neg_a_b_f32: +; GFX9-FLUSH-FMAC: ; %bb.0: +; GFX9-FLUSH-FMAC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-FMAC-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: v_fmac_f32_e32 v2, -2.0, v1 +; GFX9-FLUSH-FMAC-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-FLUSH-FMAC-NEXT: s_endpgm +; +; GFX9-DENORM-FASTFMA-FMAC-LABEL: fmuladd_2.0_neg_a_b_f32: +; GFX9-DENORM-FASTFMA-FMAC: ; %bb.0: +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-FASTFMA-FMAC-NEXT: v_fmac_f32_e32 v2, -2.0, v1 +; GFX9-DENORM-FASTFMA-FMAC-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-DENORM-FASTFMA-FMAC-NEXT: s_endpgm +; +; GFX10-LABEL: fmuladd_2.0_neg_a_b_f32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: global_load_dword v1, v0, s[0:1] glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_fmac_f32_e32 v2, -2.0, v1 +; GFX10-NEXT: global_store_dword v0, v2, s[0:1] +; GFX10-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %gep.0 = getelementptr float, ptr addrspace(1) %out, i32 %tid %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 @@ -314,23 +1462,107 @@ define amdgpu_kernel void @fmuladd_2.0_neg_a_b_f32(ptr addrspace(1) %out, ptr ad ret void } -; GCN-LABEL: {{^}}fmuladd_2.0_a_neg_b_f32: -; GCN: {{buffer|flat|global}}_load_dword [[R1:v[0-9]+]], -; GCN: {{buffer|flat|global}}_load_dword [[R2:v[0-9]+]], -; GCN-FLUSH-MAD: v_mad_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, -[[R2]] -; GCN-FLUSH-FMAC: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, -[[R2]] - -; SI-FLUSH: buffer_store_dword [[RESULT]] -; VI-FLUSH: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] - -; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, -[[R2]] - -; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] -; GCN-DENORM-SLOWFMA: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]] - -; SI-DENORM: buffer_store_dword [[RESULT]] -; VI-DENORM: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] define amdgpu_kernel void @fmuladd_2.0_a_neg_b_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { +; SI-FLUSH-LABEL: fmuladd_2.0_a_neg_b_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s3, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s2, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: v_mad_f32 v2, v2, 2.0, -v3 +; SI-FLUSH-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-LABEL: fmuladd_2.0_a_neg_b_f32: +; SI-DENORM-FASTFMA: ; %bb.0: +; SI-DENORM-FASTFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-FASTFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-FASTFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-NEXT: v_fma_f32 v2, v2, 2.0, -v3 +; SI-DENORM-FASTFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: fmuladd_2.0_a_neg_b_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v2, v2 +; SI-DENORM-SLOWFMA-NEXT: v_sub_f32_e32 v2, v2, v3 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; GFX9-FLUSH-MAD-LABEL: fmuladd_2.0_a_neg_b_f32: +; GFX9-FLUSH-MAD: ; %bb.0: +; GFX9-FLUSH-MAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-MAD-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-MAD-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-MAD-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-MAD-NEXT: v_mad_f32 v1, v1, 2.0, -v2 +; GFX9-FLUSH-MAD-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-FLUSH-MAD-NEXT: s_endpgm +; +; GFX9-DENORM-LABEL: fmuladd_2.0_a_neg_b_f32: +; GFX9-DENORM: ; %bb.0: +; GFX9-DENORM-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: v_fma_f32 v1, v1, 2.0, -v2 +; GFX9-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-NEXT: s_endpgm +; +; GFX9-FLUSH-FMAC-LABEL: fmuladd_2.0_a_neg_b_f32: +; GFX9-FLUSH-FMAC: ; %bb.0: +; GFX9-FLUSH-FMAC-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-FMAC-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-FMAC-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-FMAC-NEXT: v_fma_f32 v1, v1, 2.0, -v2 +; GFX9-FLUSH-FMAC-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-FLUSH-FMAC-NEXT: s_endpgm +; +; GFX10-LABEL: fmuladd_2.0_a_neg_b_f32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: global_load_dword v1, v0, s[0:1] glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_fma_f32 v1, v1, 2.0, -v2 +; GFX10-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %gep.0 = getelementptr float, ptr addrspace(1) %out, i32 %tid %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 @@ -346,23 +1578,150 @@ define amdgpu_kernel void @fmuladd_2.0_a_neg_b_f32(ptr addrspace(1) %out, ptr ad ret void } -; GCN-LABEL: {{^}}mad_sub_f32: -; GCN: {{buffer|flat|global}}_load_dword [[REGA:v[0-9]+]] -; GCN: {{buffer|flat|global}}_load_dword [[REGB:v[0-9]+]] -; GCN: {{buffer|flat|global}}_load_dword [[REGC:v[0-9]+]] -; GCN-FLUSH: v_mad_f32 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], -[[REGC]] - -; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], -[[REGC]] - -; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]] -; GCN-DENORM-SLOWFMA-CONTRACT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]] - -; GCN-DENORM-STRICT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]] -; GCN-DENORM-STRICT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]] - -; SI: buffer_store_dword [[RESULT]] -; VI: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] define amdgpu_kernel void @mad_sub_f32(ptr addrspace(1) noalias nocapture %out, ptr addrspace(1) noalias nocapture readonly %ptr) #0 { +; SI-FLUSH-LABEL: mad_sub_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s7, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s6, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-FLUSH-NEXT: v_mad_f32 v2, v2, v3, -v4 +; SI-FLUSH-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-STRICT-LABEL: mad_sub_f32: +; SI-DENORM-FASTFMA-STRICT: ; %bb.0: +; SI-DENORM-FASTFMA-STRICT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mul_f32_e32 v2, v2, v3 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_sub_f32_e32 v2, v2, v4 +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: mad_sub_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-SLOWFMA-NEXT: v_mul_f32_e32 v2, v2, v3 +; SI-DENORM-SLOWFMA-NEXT: v_sub_f32_e32 v2, v2, v4 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-CONTRACT-LABEL: mad_sub_f32: +; SI-DENORM-FASTFMA-CONTRACT: ; %bb.0: +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_fma_f32 v2, v2, v3, -v4 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_endpgm +; +; GFX9-FLUSH-LABEL: mad_sub_f32: +; GFX9-FLUSH: ; %bb.0: +; GFX9-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX9-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v1, v0, s[2:3] glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: v_mad_f32 v1, v1, v2, -v3 +; GFX9-FLUSH-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-FLUSH-NEXT: s_endpgm +; +; GFX9-DENORM-LABEL: mad_sub_f32: +; GFX9-DENORM: ; %bb.0: +; GFX9-DENORM-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX9-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v1, v0, s[2:3] glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: v_mul_f32_e32 v1, v1, v2 +; GFX9-DENORM-NEXT: v_sub_f32_e32 v1, v1, v3 +; GFX9-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-NEXT: s_endpgm +; +; GFX10-FLUSH-LABEL: mad_sub_f32: +; GFX10-FLUSH: ; %bb.0: +; GFX10-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX10-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v1, v0, s[2:3] glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: v_mad_f32 v1, v1, v2, -v3 +; GFX10-FLUSH-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-FLUSH-NEXT: s_endpgm +; +; GFX10-DENORM-LABEL: mad_sub_f32: +; GFX10-DENORM: ; %bb.0: +; GFX10-DENORM-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX10-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v1, v0, s[2:3] glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: v_mul_f32_e32 v1, v1, v2 +; GFX10-DENORM-NEXT: v_sub_f32_e32 v1, v1, v3 +; GFX10-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-DENORM-NEXT: s_endpgm %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 %tid.ext = sext i32 %tid to i64 %gep0 = getelementptr float, ptr addrspace(1) %ptr, i64 %tid.ext @@ -380,24 +1739,150 @@ define amdgpu_kernel void @mad_sub_f32(ptr addrspace(1) noalias nocapture %out, ret void } -; GCN-LABEL: {{^}}mad_sub_inv_f32: -; GCN: {{buffer|flat|global}}_load_dword [[REGA:v[0-9]+]] -; GCN: {{buffer|flat|global}}_load_dword [[REGB:v[0-9]+]] -; GCN: {{buffer|flat|global}}_load_dword [[REGC:v[0-9]+]] - -; GCN-FLUSH: v_mad_f32 [[RESULT:v[0-9]+]], -[[REGA]], [[REGB]], [[REGC]] - -; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], -[[REGA]], [[REGB]], [[REGC]] - -; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]] -; GCN-DENORM-SLOWFMA-CONTRACT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]] - -; GCN-DENORM-STRICT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]] -; GCN-DENORM-STRICT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]] - -; SI: buffer_store_dword [[RESULT]] -; VI: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] define amdgpu_kernel void @mad_sub_inv_f32(ptr addrspace(1) noalias nocapture %out, ptr addrspace(1) noalias nocapture readonly %ptr) #0 { +; SI-FLUSH-LABEL: mad_sub_inv_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s7, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s6, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-FLUSH-NEXT: v_mad_f32 v2, -v2, v3, v4 +; SI-FLUSH-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-STRICT-LABEL: mad_sub_inv_f32: +; SI-DENORM-FASTFMA-STRICT: ; %bb.0: +; SI-DENORM-FASTFMA-STRICT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mul_f32_e32 v2, v2, v3 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_sub_f32_e32 v2, v4, v2 +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: mad_sub_inv_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-SLOWFMA-NEXT: v_mul_f32_e32 v2, v2, v3 +; SI-DENORM-SLOWFMA-NEXT: v_sub_f32_e32 v2, v4, v2 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-CONTRACT-LABEL: mad_sub_inv_f32: +; SI-DENORM-FASTFMA-CONTRACT: ; %bb.0: +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_fma_f32 v2, -v2, v3, v4 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_endpgm +; +; GFX9-FLUSH-LABEL: mad_sub_inv_f32: +; GFX9-FLUSH: ; %bb.0: +; GFX9-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX9-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v1, v0, s[2:3] glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: v_mad_f32 v1, -v1, v2, v3 +; GFX9-FLUSH-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-FLUSH-NEXT: s_endpgm +; +; GFX9-DENORM-LABEL: mad_sub_inv_f32: +; GFX9-DENORM: ; %bb.0: +; GFX9-DENORM-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX9-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v1, v0, s[2:3] glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: v_mul_f32_e32 v1, v1, v2 +; GFX9-DENORM-NEXT: v_sub_f32_e32 v1, v3, v1 +; GFX9-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-NEXT: s_endpgm +; +; GFX10-FLUSH-LABEL: mad_sub_inv_f32: +; GFX10-FLUSH: ; %bb.0: +; GFX10-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX10-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v1, v0, s[2:3] glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: v_mad_f32 v1, -v1, v2, v3 +; GFX10-FLUSH-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-FLUSH-NEXT: s_endpgm +; +; GFX10-DENORM-LABEL: mad_sub_inv_f32: +; GFX10-DENORM: ; %bb.0: +; GFX10-DENORM-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX10-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v1, v0, s[2:3] glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: v_mul_f32_e32 v1, v1, v2 +; GFX10-DENORM-NEXT: v_sub_f32_e32 v1, v3, v1 +; GFX10-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-DENORM-NEXT: s_endpgm %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 %tid.ext = sext i32 %tid to i64 %gep0 = getelementptr float, ptr addrspace(1) %ptr, i64 %tid.ext @@ -415,23 +1900,150 @@ define amdgpu_kernel void @mad_sub_inv_f32(ptr addrspace(1) noalias nocapture %o ret void } -; GCN-LABEL: {{^}}mad_sub_fabs_f32: -; GCN: {{buffer|flat|global}}_load_dword [[REGA:v[0-9]+]] -; GCN: {{buffer|flat|global}}_load_dword [[REGB:v[0-9]+]] -; GCN: {{buffer|flat|global}}_load_dword [[REGC:v[0-9]+]] -; GCN-FLUSH: v_mad_f32 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], -|[[REGC]]| - -; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], -|[[REGC]]| - -; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]] -; GCN-DENORM-SLOWFMA-CONTRACT: v_sub_f32_e64 [[RESULT:v[0-9]+]], [[TMP]], |[[REGC]]| - -; GCN-DENORM-STRICT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]] -; GCN-DENORM-STRICT: v_sub_f32_e64 [[RESULT:v[0-9]+]], [[TMP]], |[[REGC]]| - -; SI: buffer_store_dword [[RESULT]] -; VI: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] define amdgpu_kernel void @mad_sub_fabs_f32(ptr addrspace(1) noalias nocapture %out, ptr addrspace(1) noalias nocapture readonly %ptr) #0 { +; SI-FLUSH-LABEL: mad_sub_fabs_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s7, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s6, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-FLUSH-NEXT: v_mad_f32 v2, v2, v3, -|v4| +; SI-FLUSH-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-STRICT-LABEL: mad_sub_fabs_f32: +; SI-DENORM-FASTFMA-STRICT: ; %bb.0: +; SI-DENORM-FASTFMA-STRICT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mul_f32_e32 v2, v2, v3 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_sub_f32_e64 v2, v2, |v4| +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: mad_sub_fabs_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-SLOWFMA-NEXT: v_mul_f32_e32 v2, v2, v3 +; SI-DENORM-SLOWFMA-NEXT: v_sub_f32_e64 v2, v2, |v4| +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-CONTRACT-LABEL: mad_sub_fabs_f32: +; SI-DENORM-FASTFMA-CONTRACT: ; %bb.0: +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_fma_f32 v2, v2, v3, -|v4| +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_endpgm +; +; GFX9-FLUSH-LABEL: mad_sub_fabs_f32: +; GFX9-FLUSH: ; %bb.0: +; GFX9-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX9-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v1, v0, s[2:3] glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: v_mad_f32 v1, v1, v2, -|v3| +; GFX9-FLUSH-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-FLUSH-NEXT: s_endpgm +; +; GFX9-DENORM-LABEL: mad_sub_fabs_f32: +; GFX9-DENORM: ; %bb.0: +; GFX9-DENORM-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX9-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v1, v0, s[2:3] glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: v_mul_f32_e32 v1, v1, v2 +; GFX9-DENORM-NEXT: v_sub_f32_e64 v1, v1, |v3| +; GFX9-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-NEXT: s_endpgm +; +; GFX10-FLUSH-LABEL: mad_sub_fabs_f32: +; GFX10-FLUSH: ; %bb.0: +; GFX10-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX10-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v1, v0, s[2:3] glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: v_mad_f32 v1, v1, v2, -|v3| +; GFX10-FLUSH-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-FLUSH-NEXT: s_endpgm +; +; GFX10-DENORM-LABEL: mad_sub_fabs_f32: +; GFX10-DENORM: ; %bb.0: +; GFX10-DENORM-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX10-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v1, v0, s[2:3] glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: v_mul_f32_e32 v1, v1, v2 +; GFX10-DENORM-NEXT: v_sub_f32_e64 v1, v1, |v3| +; GFX10-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-DENORM-NEXT: s_endpgm %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 %tid.ext = sext i32 %tid to i64 %gep0 = getelementptr float, ptr addrspace(1) %ptr, i64 %tid.ext @@ -450,24 +2062,150 @@ define amdgpu_kernel void @mad_sub_fabs_f32(ptr addrspace(1) noalias nocapture % ret void } -; GCN-LABEL: {{^}}mad_sub_fabs_inv_f32: -; GCN: {{buffer|flat|global}}_load_dword [[REGA:v[0-9]+]] -; GCN: {{buffer|flat|global}}_load_dword [[REGB:v[0-9]+]] -; GCN: {{buffer|flat|global}}_load_dword [[REGC:v[0-9]+]] -; GCN-FLUSH-MAD: v_mad_f32 [[RESULT:v[0-9]+]], -[[REGA]], [[REGB]], |[[REGC]]| -; GCN-FLUSH-FMA: v_fma_f32 [[RESULT:v[0-9]+]], -[[REGA]], [[REGB]], |[[REGC]]| - -; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], -[[REGA]], [[REGB]], |[[REGC]]| - -; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]] -; GCN-DENORM-SLOWFMA-CONTRACT: v_sub_f32_e64 [[RESULT:v[0-9]+]], |[[REGC]]|, [[TMP]] - -; GCN-DENORM-STRICT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]] -; GCN-DENORM-STRICT: v_sub_f32_e64 [[RESULT:v[0-9]+]], |[[REGC]]|, [[TMP]] - -; SI: buffer_store_dword [[RESULT]] -; VI: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] define amdgpu_kernel void @mad_sub_fabs_inv_f32(ptr addrspace(1) noalias nocapture %out, ptr addrspace(1) noalias nocapture readonly %ptr) #0 { +; SI-FLUSH-LABEL: mad_sub_fabs_inv_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s7, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s6, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-FLUSH-NEXT: v_mad_f32 v2, -v2, v3, |v4| +; SI-FLUSH-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-STRICT-LABEL: mad_sub_fabs_inv_f32: +; SI-DENORM-FASTFMA-STRICT: ; %bb.0: +; SI-DENORM-FASTFMA-STRICT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mul_f32_e32 v2, v2, v3 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_sub_f32_e64 v2, |v4|, v2 +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: mad_sub_fabs_inv_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-SLOWFMA-NEXT: v_mul_f32_e32 v2, v2, v3 +; SI-DENORM-SLOWFMA-NEXT: v_sub_f32_e64 v2, |v4|, v2 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-CONTRACT-LABEL: mad_sub_fabs_inv_f32: +; SI-DENORM-FASTFMA-CONTRACT: ; %bb.0: +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_fma_f32 v2, -v2, v3, |v4| +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_endpgm +; +; GFX9-FLUSH-LABEL: mad_sub_fabs_inv_f32: +; GFX9-FLUSH: ; %bb.0: +; GFX9-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX9-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v1, v0, s[2:3] glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: v_mad_f32 v1, -v1, v2, |v3| +; GFX9-FLUSH-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-FLUSH-NEXT: s_endpgm +; +; GFX9-DENORM-LABEL: mad_sub_fabs_inv_f32: +; GFX9-DENORM: ; %bb.0: +; GFX9-DENORM-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX9-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v1, v0, s[2:3] glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: v_mul_f32_e32 v1, v1, v2 +; GFX9-DENORM-NEXT: v_sub_f32_e64 v1, |v3|, v1 +; GFX9-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-NEXT: s_endpgm +; +; GFX10-FLUSH-LABEL: mad_sub_fabs_inv_f32: +; GFX10-FLUSH: ; %bb.0: +; GFX10-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX10-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v1, v0, s[2:3] glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: v_mad_f32 v1, -v1, v2, |v3| +; GFX10-FLUSH-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-FLUSH-NEXT: s_endpgm +; +; GFX10-DENORM-LABEL: mad_sub_fabs_inv_f32: +; GFX10-DENORM: ; %bb.0: +; GFX10-DENORM-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX10-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v1, v0, s[2:3] glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: v_mul_f32_e32 v1, v1, v2 +; GFX10-DENORM-NEXT: v_sub_f32_e64 v1, |v3|, v1 +; GFX10-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-DENORM-NEXT: s_endpgm %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 %tid.ext = sext i32 %tid to i64 %gep0 = getelementptr float, ptr addrspace(1) %ptr, i64 %tid.ext @@ -486,26 +2224,150 @@ define amdgpu_kernel void @mad_sub_fabs_inv_f32(ptr addrspace(1) noalias nocaptu ret void } -; GCN-LABEL: {{^}}neg_neg_mad_f32: -; GCN: {{buffer|flat|global}}_load_dword [[REGA:v[0-9]+]] -; GCN: {{buffer|flat|global}}_load_dword [[REGB:v[0-9]+]] -; GCN: {{buffer|flat|global}}_load_dword [[REGC:v[0-9]+]] - -; GCN-FLUSH: v_mac_f32_e32 [[REGC]], [[REGA]], [[REGB]] -; SI-FLUSH: buffer_store_dword [[REGC]] -; VI-FLUSH: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[REGC]] - -; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], [[REGC]] - -; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]] -; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]] - -; GCN-DENORM-STRICT: v_mul_f32_e32 [[TMP:v[0-9]+]], [[REGA]], [[REGB]] -; GCN-DENORM-STRICT: v_add_f32_e32 [[RESULT:v[0-9]+]], [[REGC]], [[TMP]] - -; SI-DENORM: buffer_store_dword [[RESULT]] -; VI-DENORM: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] define amdgpu_kernel void @neg_neg_mad_f32(ptr addrspace(1) noalias nocapture %out, ptr addrspace(1) noalias nocapture readonly %ptr) #0 { +; SI-FLUSH-LABEL: neg_neg_mad_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s7, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s6, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-FLUSH-NEXT: v_mac_f32_e32 v4, v2, v3 +; SI-FLUSH-NEXT: buffer_store_dword v4, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-STRICT-LABEL: neg_neg_mad_f32: +; SI-DENORM-FASTFMA-STRICT: ; %bb.0: +; SI-DENORM-FASTFMA-STRICT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mul_f32_e32 v2, v2, v3 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_add_f32_e32 v2, v4, v2 +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: neg_neg_mad_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-SLOWFMA-NEXT: v_mul_f32_e32 v2, v2, v3 +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v4, v2 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-CONTRACT-LABEL: neg_neg_mad_f32: +; SI-DENORM-FASTFMA-CONTRACT: ; %bb.0: +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_fma_f32 v2, v2, v3, v4 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_endpgm +; +; GFX9-FLUSH-LABEL: neg_neg_mad_f32: +; GFX9-FLUSH: ; %bb.0: +; GFX9-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX9-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v1, v0, s[2:3] glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: v_mac_f32_e32 v3, v1, v2 +; GFX9-FLUSH-NEXT: global_store_dword v0, v3, s[0:1] +; GFX9-FLUSH-NEXT: s_endpgm +; +; GFX9-DENORM-LABEL: neg_neg_mad_f32: +; GFX9-DENORM: ; %bb.0: +; GFX9-DENORM-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX9-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v1, v0, s[2:3] glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: v_mul_f32_e32 v1, v1, v2 +; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v3, v1 +; GFX9-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-NEXT: s_endpgm +; +; GFX10-FLUSH-LABEL: neg_neg_mad_f32: +; GFX10-FLUSH: ; %bb.0: +; GFX10-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX10-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v1, v0, s[2:3] glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: v_mac_f32_e32 v3, v1, v2 +; GFX10-FLUSH-NEXT: global_store_dword v0, v3, s[0:1] +; GFX10-FLUSH-NEXT: s_endpgm +; +; GFX10-DENORM-LABEL: neg_neg_mad_f32: +; GFX10-DENORM: ; %bb.0: +; GFX10-DENORM-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX10-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v1, v0, s[2:3] glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: v_mul_f32_e32 v1, v1, v2 +; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v3, v1 +; GFX10-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-DENORM-NEXT: s_endpgm %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 %tid.ext = sext i32 %tid to i64 %gep0 = getelementptr float, ptr addrspace(1) %ptr, i64 %tid.ext @@ -525,23 +2387,150 @@ define amdgpu_kernel void @neg_neg_mad_f32(ptr addrspace(1) noalias nocapture %o ret void } -; GCN-LABEL: {{^}}mad_fabs_sub_f32: -; GCN: {{buffer|flat|global}}_load_dword [[REGA:v[0-9]+]] -; GCN: {{buffer|flat|global}}_load_dword [[REGB:v[0-9]+]] -; GCN: {{buffer|flat|global}}_load_dword [[REGC:v[0-9]+]] -; GCN-FLUSH: v_mad_f32 [[RESULT:v[0-9]+]], [[REGA]], |[[REGB]]|, -[[REGC]] - -; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[REGA]], |[[REGB]]|, -[[REGC]] - -; GCN-DENORM-SLOWFMA-CONTRACT: v_mul_f32_e64 [[TMP:v[0-9]+]], [[REGA]], |[[REGB]]| -; GCN-DENORM-SLOWFMA-CONTRACT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]] - -; GCN-DENORM-STRICT: v_mul_f32_e64 [[TMP:v[0-9]+]], [[REGA]], |[[REGB]]| -; GCN-DENORM-STRICT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[REGC]] - -; SI: buffer_store_dword [[RESULT]] -; VI: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] define amdgpu_kernel void @mad_fabs_sub_f32(ptr addrspace(1) noalias nocapture %out, ptr addrspace(1) noalias nocapture readonly %ptr) #0 { +; SI-FLUSH-LABEL: mad_fabs_sub_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s7, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s6, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-FLUSH-NEXT: v_mad_f32 v2, v2, |v3|, -v4 +; SI-FLUSH-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-STRICT-LABEL: mad_fabs_sub_f32: +; SI-DENORM-FASTFMA-STRICT: ; %bb.0: +; SI-DENORM-FASTFMA-STRICT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mul_f32_e64 v2, v2, |v3| +; SI-DENORM-FASTFMA-STRICT-NEXT: v_sub_f32_e32 v2, v2, v4 +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: mad_fabs_sub_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-SLOWFMA-NEXT: v_mul_f32_e64 v2, v2, |v3| +; SI-DENORM-SLOWFMA-NEXT: v_sub_f32_e32 v2, v2, v4 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-CONTRACT-LABEL: mad_fabs_sub_f32: +; SI-DENORM-FASTFMA-CONTRACT: ; %bb.0: +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s7, 0xf000 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s6, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:8 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_fma_f32 v2, v2, |v3|, -v4 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_endpgm +; +; GFX9-FLUSH-LABEL: mad_fabs_sub_f32: +; GFX9-FLUSH: ; %bb.0: +; GFX9-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX9-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v1, v0, s[2:3] glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: v_mad_f32 v1, v1, |v2|, -v3 +; GFX9-FLUSH-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-FLUSH-NEXT: s_endpgm +; +; GFX9-DENORM-LABEL: mad_fabs_sub_f32: +; GFX9-DENORM: ; %bb.0: +; GFX9-DENORM-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX9-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v1, v0, s[2:3] glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: v_mul_f32_e64 v1, v1, |v2| +; GFX9-DENORM-NEXT: v_sub_f32_e32 v1, v1, v3 +; GFX9-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-NEXT: s_endpgm +; +; GFX10-FLUSH-LABEL: mad_fabs_sub_f32: +; GFX10-FLUSH: ; %bb.0: +; GFX10-FLUSH-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX10-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v1, v0, s[2:3] glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: v_mad_f32 v1, v1, |v2|, -v3 +; GFX10-FLUSH-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-FLUSH-NEXT: s_endpgm +; +; GFX10-DENORM-LABEL: mad_fabs_sub_f32: +; GFX10-DENORM: ; %bb.0: +; GFX10-DENORM-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX10-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v1, v0, s[2:3] glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v2, v0, s[2:3] offset:4 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v3, v0, s[2:3] offset:8 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: v_mul_f32_e64 v1, v1, |v2| +; GFX10-DENORM-NEXT: v_sub_f32_e32 v1, v1, v3 +; GFX10-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-DENORM-NEXT: s_endpgm %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 %tid.ext = sext i32 %tid to i64 %gep0 = getelementptr float, ptr addrspace(1) %ptr, i64 %tid.ext @@ -560,24 +2549,126 @@ define amdgpu_kernel void @mad_fabs_sub_f32(ptr addrspace(1) noalias nocapture % ret void } -; GCN-LABEL: {{^}}fsub_c_fadd_a_a_f32: -; GCN: {{buffer|flat|global}}_load_dword [[R1:v[0-9]+]], -; GCN: {{buffer|flat|global}}_load_dword [[R2:v[0-9]+]], -; GCN-FLUSH: v_mac_f32_e32 [[R2]], -2.0, [[R1]] -; SI-FLUSH: buffer_store_dword [[R2]] -; VI-FLUSH: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[R2]] - -; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], -2.0, [[R2]] - -; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] -; GCN-DENORM-SLOWFMA-CONTRACT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]] - -; GCN-DENORM-STRICT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] -; GCN-DENORM-STRICT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]] - -; SI-DENORM: buffer_store_dword [[RESULT]] -; VI-DENORM: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] define amdgpu_kernel void @fsub_c_fadd_a_a_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { +; SI-FLUSH-LABEL: fsub_c_fadd_a_a_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s3, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s2, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: v_mac_f32_e32 v3, -2.0, v2 +; SI-FLUSH-NEXT: buffer_store_dword v3, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-STRICT-LABEL: fsub_c_fadd_a_a_f32: +; SI-DENORM-FASTFMA-STRICT: ; %bb.0: +; SI-DENORM-FASTFMA-STRICT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: v_add_f32_e32 v2, v2, v2 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_sub_f32_e32 v2, v3, v2 +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: fsub_c_fadd_a_a_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v2, v2 +; SI-DENORM-SLOWFMA-NEXT: v_sub_f32_e32 v2, v3, v2 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-CONTRACT-LABEL: fsub_c_fadd_a_a_f32: +; SI-DENORM-FASTFMA-CONTRACT: ; %bb.0: +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_fma_f32 v2, v2, -2.0, v3 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_endpgm +; +; GFX9-FLUSH-LABEL: fsub_c_fadd_a_a_f32: +; GFX9-FLUSH: ; %bb.0: +; GFX9-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: v_mac_f32_e32 v2, -2.0, v1 +; GFX9-FLUSH-NEXT: global_store_dword v0, v2, s[0:1] +; GFX9-FLUSH-NEXT: s_endpgm +; +; GFX9-DENORM-LABEL: fsub_c_fadd_a_a_f32: +; GFX9-DENORM: ; %bb.0: +; GFX9-DENORM-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v1, v1 +; GFX9-DENORM-NEXT: v_sub_f32_e32 v1, v2, v1 +; GFX9-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-NEXT: s_endpgm +; +; GFX10-FLUSH-LABEL: fsub_c_fadd_a_a_f32: +; GFX10-FLUSH: ; %bb.0: +; GFX10-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX10-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v1, v0, s[0:1] glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: v_mac_f32_e32 v2, -2.0, v1 +; GFX10-FLUSH-NEXT: global_store_dword v0, v2, s[0:1] +; GFX10-FLUSH-NEXT: s_endpgm +; +; GFX10-DENORM-LABEL: fsub_c_fadd_a_a_f32: +; GFX10-DENORM: ; %bb.0: +; GFX10-DENORM-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX10-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v1, v0, s[0:1] glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v1, v1 +; GFX10-DENORM-NEXT: v_sub_f32_e32 v1, v2, v1 +; GFX10-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-DENORM-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone %gep.0 = getelementptr float, ptr addrspace(1) %out, i32 %tid %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 @@ -593,22 +2684,126 @@ define amdgpu_kernel void @fsub_c_fadd_a_a_f32(ptr addrspace(1) %out, ptr addrsp ret void } -; GCN-LABEL: {{^}}fsub_fadd_a_a_c_f32: -; GCN: {{buffer|flat|global}}_load_dword [[R1:v[0-9]+]], -; GCN: {{buffer|flat|global}}_load_dword [[R2:v[0-9]+]], -; GCN-FLUSH: v_mad_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, -[[R2]] - -; GCN-DENORM-FASTFMA-CONTRACT: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], 2.0, -[[R2]] - -; GCN-DENORM-SLOWFMA-CONTRACT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] -; GCN-DENORM-SLOWFMA-CONTRACT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]] - -; GCN-DENORM-STRICT: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]] -; GCN-DENORM-STRICT: v_sub_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]] - -; SI: buffer_store_dword [[RESULT]] -; VI: {{global|flat}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] define amdgpu_kernel void @fsub_fadd_a_a_c_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { +; SI-FLUSH-LABEL: fsub_fadd_a_a_c_f32: +; SI-FLUSH: ; %bb.0: +; SI-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-FLUSH-NEXT: s_mov_b32 s3, 0xf000 +; SI-FLUSH-NEXT: s_mov_b32 s2, 0 +; SI-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-FLUSH-NEXT: v_mov_b32_e32 v1, 0 +; SI-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-FLUSH-NEXT: s_waitcnt vmcnt(0) +; SI-FLUSH-NEXT: v_mad_f32 v2, v2, 2.0, -v3 +; SI-FLUSH-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-FLUSH-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-STRICT-LABEL: fsub_fadd_a_a_c_f32: +; SI-DENORM-FASTFMA-STRICT: ; %bb.0: +; SI-DENORM-FASTFMA-STRICT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-STRICT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-STRICT-NEXT: v_add_f32_e32 v2, v2, v2 +; SI-DENORM-FASTFMA-STRICT-NEXT: v_sub_f32_e32 v2, v2, v3 +; SI-DENORM-FASTFMA-STRICT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-STRICT-NEXT: s_endpgm +; +; SI-DENORM-SLOWFMA-LABEL: fsub_fadd_a_a_c_f32: +; SI-DENORM-SLOWFMA: ; %bb.0: +; SI-DENORM-SLOWFMA-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-SLOWFMA-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-SLOWFMA-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-SLOWFMA-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-SLOWFMA-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-SLOWFMA-NEXT: v_add_f32_e32 v2, v2, v2 +; SI-DENORM-SLOWFMA-NEXT: v_sub_f32_e32 v2, v2, v3 +; SI-DENORM-SLOWFMA-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-SLOWFMA-NEXT: s_endpgm +; +; SI-DENORM-FASTFMA-CONTRACT-LABEL: fsub_fadd_a_a_c_f32: +; SI-DENORM-FASTFMA-CONTRACT: ; %bb.0: +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s3, 0xf000 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_mov_b32 s2, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_mov_b32_e32 v1, 0 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_waitcnt vmcnt(0) +; SI-DENORM-FASTFMA-CONTRACT-NEXT: v_fma_f32 v2, v2, 2.0, -v3 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-DENORM-FASTFMA-CONTRACT-NEXT: s_endpgm +; +; GFX9-FLUSH-LABEL: fsub_fadd_a_a_c_f32: +; GFX9-FLUSH: ; %bb.0: +; GFX9-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX9-FLUSH-NEXT: v_mad_f32 v1, v1, 2.0, -v2 +; GFX9-FLUSH-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-FLUSH-NEXT: s_endpgm +; +; GFX9-DENORM-LABEL: fsub_fadd_a_a_c_f32: +; GFX9-DENORM: ; %bb.0: +; GFX9-DENORM-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX9-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v1, v0, s[0:1] glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc +; GFX9-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v1, v1 +; GFX9-DENORM-NEXT: v_sub_f32_e32 v1, v1, v2 +; GFX9-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9-DENORM-NEXT: s_endpgm +; +; GFX10-FLUSH-LABEL: fsub_fadd_a_a_c_f32: +; GFX10-FLUSH: ; %bb.0: +; GFX10-FLUSH-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX10-FLUSH-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-FLUSH-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v1, v0, s[0:1] glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc dlc +; GFX10-FLUSH-NEXT: s_waitcnt vmcnt(0) +; GFX10-FLUSH-NEXT: v_mad_f32 v1, v1, 2.0, -v2 +; GFX10-FLUSH-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-FLUSH-NEXT: s_endpgm +; +; GFX10-DENORM-LABEL: fsub_fadd_a_a_c_f32: +; GFX10-DENORM: ; %bb.0: +; GFX10-DENORM-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX10-DENORM-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX10-DENORM-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v1, v0, s[0:1] glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: global_load_dword v2, v0, s[0:1] offset:4 glc dlc +; GFX10-DENORM-NEXT: s_waitcnt vmcnt(0) +; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v1, v1 +; GFX10-DENORM-NEXT: v_sub_f32_e32 v1, v1, v2 +; GFX10-DENORM-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10-DENORM-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone %gep.0 = getelementptr float, ptr addrspace(1) %out, i32 %tid %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 diff --git a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir index 7fad2f4..a88b1ec 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir @@ -75,7 +75,8 @@ stack: body: | bb.0: ; CHECK-LABEL: name: fold_frame_index__s_add_i32__fi_materializedconst_0 - ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 %stack.0, 256, implicit-def $scc + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 256 + ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 %stack.0, [[S_MOV_B32_]], implicit-def $scc ; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]] ; CHECK-NEXT: SI_RETURN implicit $sgpr4 %0:sreg_32 = S_MOV_B32 %stack.0 diff --git a/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir b/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir index cc43142..2f2d727 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir @@ -46,7 +46,8 @@ body: | %2:sreg_32 = S_LSHL2_ADD_U32 %0, %1, implicit-def $scc ... # GCN-LABEL: name: test_frameindex{{$}} -# GCN: %1:sreg_32 = S_ADD_I32 %stack.0, 70 +# GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 70 +# GCN-NEXT: %1:sreg_32 = S_ADD_I32 %stack.0, [[S_MOV_B32_]] --- name: test_frameindex tracksRegLiveness: true diff --git a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll index 15cda62..f2fe61f 100644 --- a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll +++ b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll @@ -360,7 +360,8 @@ entry: ; s_add_i32. ; GCN-LABEL: {{^}}fi_sop2_s_add_u32_literal_error: -; GCN: s_add_u32 [[ADD_LO:s[0-9]+]], 0, 0x2010 +; GCN: s_movk_i32 [[S_MOVK_I32_:s[0-9]+]], 0x1000 +; GCN: s_add_u32 [[ADD_LO:s[0-9]+]], 0x1010, [[S_MOVK_I32_]] ; GCN: s_addc_u32 [[ADD_HI:s[0-9]+]], s{{[0-9]+}}, 0 define amdgpu_kernel void @fi_sop2_s_add_u32_literal_error() #0 { entry: diff --git a/llvm/test/CodeGen/AMDGPU/freeze.ll b/llvm/test/CodeGen/AMDGPU/freeze.ll index ac4f0df..308e86b 100644 --- a/llvm/test/CodeGen/AMDGPU/freeze.ll +++ b/llvm/test/CodeGen/AMDGPU/freeze.ll @@ -5692,10 +5692,6 @@ define void @freeze_v3i16(ptr addrspace(1) %ptra, ptr addrspace(1) %ptrb) { ; GFX6-SDAG-NEXT: s_mov_b32 s5, s6 ; GFX6-SDAG-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[4:7], 0 addr64 ; GFX6-SDAG-NEXT: s_waitcnt vmcnt(0) -; GFX6-SDAG-NEXT: v_lshrrev_b32_e32 v4, 16, v0 -; GFX6-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX6-SDAG-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX6-SDAG-NEXT: v_or_b32_e32 v0, v0, v4 ; GFX6-SDAG-NEXT: buffer_store_short v1, v[2:3], s[4:7], 0 addr64 offset:4 ; GFX6-SDAG-NEXT: buffer_store_dword v0, v[2:3], s[4:7], 0 addr64 ; GFX6-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) @@ -5725,10 +5721,6 @@ define void @freeze_v3i16(ptr addrspace(1) %ptra, ptr addrspace(1) %ptrb) { ; GFX7-SDAG-NEXT: s_mov_b32 s5, s6 ; GFX7-SDAG-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[4:7], 0 addr64 ; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) -; GFX7-SDAG-NEXT: v_lshrrev_b32_e32 v4, 16, v0 -; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX7-SDAG-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-SDAG-NEXT: v_or_b32_e32 v0, v0, v4 ; GFX7-SDAG-NEXT: buffer_store_short v1, v[2:3], s[4:7], 0 addr64 offset:4 ; GFX7-SDAG-NEXT: buffer_store_dword v0, v[2:3], s[4:7], 0 addr64 ; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) @@ -6351,10 +6343,6 @@ define void @freeze_v3f16(ptr addrspace(1) %ptra, ptr addrspace(1) %ptrb) { ; GFX6-SDAG-NEXT: s_mov_b32 s5, s6 ; GFX6-SDAG-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[4:7], 0 addr64 ; GFX6-SDAG-NEXT: s_waitcnt vmcnt(0) -; GFX6-SDAG-NEXT: v_and_b32_e32 v4, 0xffff, v0 -; GFX6-SDAG-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; GFX6-SDAG-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX6-SDAG-NEXT: v_or_b32_e32 v0, v4, v0 ; GFX6-SDAG-NEXT: buffer_store_short v1, v[2:3], s[4:7], 0 addr64 offset:4 ; GFX6-SDAG-NEXT: buffer_store_dword v0, v[2:3], s[4:7], 0 addr64 ; GFX6-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) @@ -6384,10 +6372,6 @@ define void @freeze_v3f16(ptr addrspace(1) %ptra, ptr addrspace(1) %ptrb) { ; GFX7-SDAG-NEXT: s_mov_b32 s5, s6 ; GFX7-SDAG-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[4:7], 0 addr64 ; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) -; GFX7-SDAG-NEXT: v_and_b32_e32 v4, 0xffff, v0 -; GFX7-SDAG-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; GFX7-SDAG-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX7-SDAG-NEXT: v_or_b32_e32 v0, v4, v0 ; GFX7-SDAG-NEXT: buffer_store_short v1, v[2:3], s[4:7], 0 addr64 offset:4 ; GFX7-SDAG-NEXT: buffer_store_dword v0, v[2:3], s[4:7], 0 addr64 ; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) @@ -12347,14 +12331,9 @@ define void @freeze_v3i8(ptr addrspace(1) %ptra, ptr addrspace(1) %ptrb) { ; GFX6-SDAG-NEXT: s_mov_b32 s5, s6 ; GFX6-SDAG-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 ; GFX6-SDAG-NEXT: s_waitcnt vmcnt(0) -; GFX6-SDAG-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX6-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v4 ; GFX6-SDAG-NEXT: v_lshrrev_b32_e32 v1, 16, v0 -; GFX6-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0 -; GFX6-SDAG-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX6-SDAG-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX6-SDAG-NEXT: buffer_store_byte v1, v[2:3], s[4:7], 0 addr64 offset:2 ; GFX6-SDAG-NEXT: buffer_store_short v0, v[2:3], s[4:7], 0 addr64 +; GFX6-SDAG-NEXT: buffer_store_byte v1, v[2:3], s[4:7], 0 addr64 offset:2 ; GFX6-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; GFX6-SDAG-NEXT: s_setpc_b64 s[30:31] ; @@ -12392,14 +12371,9 @@ define void @freeze_v3i8(ptr addrspace(1) %ptra, ptr addrspace(1) %ptrb) { ; GFX7-SDAG-NEXT: s_mov_b32 s5, s6 ; GFX7-SDAG-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 ; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) -; GFX7-SDAG-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX7-SDAG-NEXT: v_and_b32_e32 v4, 0xff, v4 ; GFX7-SDAG-NEXT: v_lshrrev_b32_e32 v1, 16, v0 -; GFX7-SDAG-NEXT: v_and_b32_e32 v0, 0xff, v0 -; GFX7-SDAG-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-SDAG-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-SDAG-NEXT: buffer_store_byte v1, v[2:3], s[4:7], 0 addr64 offset:2 ; GFX7-SDAG-NEXT: buffer_store_short v0, v[2:3], s[4:7], 0 addr64 +; GFX7-SDAG-NEXT: buffer_store_byte v1, v[2:3], s[4:7], 0 addr64 offset:2 ; GFX7-SDAG-NEXT: s_waitcnt vmcnt(0) ; GFX7-SDAG-NEXT: s_setpc_b64 s[30:31] ; @@ -12474,11 +12448,7 @@ define void @freeze_v3i8(ptr addrspace(1) %ptra, ptr addrspace(1) %ptrb) { ; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-SDAG-NEXT: global_load_dword v0, v[0:1], off ; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) -; GFX10-SDAG-NEXT: v_lshrrev_b16 v1, 8, v0 -; GFX10-SDAG-NEXT: v_lshrrev_b32_e32 v4, 16, v0 -; GFX10-SDAG-NEXT: v_lshlrev_b16 v1, 8, v1 -; GFX10-SDAG-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX10-SDAG-NEXT: global_store_byte v[2:3], v4, off offset:2 +; GFX10-SDAG-NEXT: global_store_byte_d16_hi v[2:3], v0, off offset:2 ; GFX10-SDAG-NEXT: global_store_short v[2:3], v0, off ; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31] ; @@ -12499,36 +12469,15 @@ define void @freeze_v3i8(ptr addrspace(1) %ptra, ptr addrspace(1) %ptrb) { ; GFX10-GISEL-NEXT: global_store_byte_d16_hi v[2:3], v0, off offset:2 ; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-SDAG-TRUE16-LABEL: freeze_v3i8: -; GFX11-SDAG-TRUE16: ; %bb.0: -; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SDAG-TRUE16-NEXT: global_load_b32 v1, v[0:1], off -; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0 -; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-SDAG-TRUE16-NEXT: v_lshrrev_b16 v0.l, 8, v1.l -; GFX11-SDAG-TRUE16-NEXT: v_and_b16 v0.h, 0xff, v1.l -; GFX11-SDAG-TRUE16-NEXT: v_mov_b16_e32 v4.l, v1.h -; GFX11-SDAG-TRUE16-NEXT: v_lshlrev_b16 v0.l, 8, v0.l -; GFX11-SDAG-TRUE16-NEXT: v_or_b16 v0.l, v0.h, v0.l -; GFX11-SDAG-TRUE16-NEXT: s_clause 0x1 -; GFX11-SDAG-TRUE16-NEXT: global_store_b8 v[2:3], v4, off offset:2 -; GFX11-SDAG-TRUE16-NEXT: global_store_b16 v[2:3], v0, off -; GFX11-SDAG-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SDAG-FAKE16-LABEL: freeze_v3i8: -; GFX11-SDAG-FAKE16: ; %bb.0: -; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SDAG-FAKE16-NEXT: global_load_b32 v0, v[0:1], off -; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-SDAG-FAKE16-NEXT: v_lshrrev_b16 v1, 8, v0 -; GFX11-SDAG-FAKE16-NEXT: v_and_b32_e32 v4, 0xff, v0 -; GFX11-SDAG-FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; GFX11-SDAG-FAKE16-NEXT: v_lshlrev_b16 v1, 8, v1 -; GFX11-SDAG-FAKE16-NEXT: v_or_b32_e32 v1, v4, v1 -; GFX11-SDAG-FAKE16-NEXT: s_clause 0x1 -; GFX11-SDAG-FAKE16-NEXT: global_store_b8 v[2:3], v0, off offset:2 -; GFX11-SDAG-FAKE16-NEXT: global_store_b16 v[2:3], v1, off -; GFX11-SDAG-FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX11-SDAG-LABEL: freeze_v3i8: +; GFX11-SDAG: ; %bb.0: +; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SDAG-NEXT: global_load_b32 v0, v[0:1], off +; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) +; GFX11-SDAG-NEXT: s_clause 0x1 +; GFX11-SDAG-NEXT: global_store_d16_hi_b8 v[2:3], v0, off offset:2 +; GFX11-SDAG-NEXT: global_store_b16 v[2:3], v0, off +; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-GISEL-LABEL: freeze_v3i8: ; GFX11-GISEL: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll b/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll index 723e3ef..1602e31 100644 --- a/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll +++ b/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll @@ -4326,9 +4326,8 @@ define amdgpu_ps <2 x half> @global_load_saddr_i16_d16hi_undef_hi(ptr addrspace( ; ; GFX12-GISEL-TRUE16-LABEL: global_load_saddr_i16_d16hi_undef_hi: ; GFX12-GISEL-TRUE16: ; %bb.0: -; GFX12-GISEL-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3] +; GFX12-GISEL-TRUE16-NEXT: global_load_d16_hi_b16 v0, v0, s[2:3] ; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-GISEL-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; GFX12-GISEL-TRUE16-NEXT: ; return to shader part epilog ; ; GFX12-GISEL-FAKE16-LABEL: global_load_saddr_i16_d16hi_undef_hi: @@ -4366,9 +4365,8 @@ define amdgpu_ps <2 x half> @global_load_saddr_i16_d16hi_undef_hi_immneg128(ptr ; ; GFX12-GISEL-TRUE16-LABEL: global_load_saddr_i16_d16hi_undef_hi_immneg128: ; GFX12-GISEL-TRUE16: ; %bb.0: -; GFX12-GISEL-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3] offset:-128 +; GFX12-GISEL-TRUE16-NEXT: global_load_d16_hi_b16 v0, v0, s[2:3] offset:-128 ; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-GISEL-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; GFX12-GISEL-TRUE16-NEXT: ; return to shader part epilog ; ; GFX12-GISEL-FAKE16-LABEL: global_load_saddr_i16_d16hi_undef_hi_immneg128: diff --git a/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll b/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll index 1c298014..3001248 100644 --- a/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll +++ b/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll @@ -6,16 +6,24 @@ define amdgpu_gfx [13 x i32] @issue130120() { ; CHECK: ; %bb.0: ; %bb ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CHECK-NEXT: v_mov_b32_e32 v0, 0 -; CHECK-NEXT: s_add_i32 s0, s32, 0xf0 -; CHECK-NEXT: s_add_i32 s1, s32, 0xf4 -; CHECK-NEXT: s_add_i32 s2, s32, 0xf8 -; CHECK-NEXT: s_add_i32 s3, s32, 0xfc +; CHECK-NEXT: s_movk_i32 s1, 0xf4 +; CHECK-NEXT: s_movk_i32 s2, 0xf8 +; CHECK-NEXT: s_movk_i32 s3, 0xfc +; CHECK-NEXT: s_movk_i32 s34, 0x100 ; CHECK-NEXT: v_mov_b32_e32 v1, v0 -; CHECK-NEXT: s_add_i32 s34, s32, 0x100 -; CHECK-NEXT: s_add_i32 s35, s32, 0x104 -; CHECK-NEXT: s_add_i32 s36, s32, 0x108 -; CHECK-NEXT: s_add_i32 s37, s32, 0x110 -; CHECK-NEXT: s_add_i32 s38, s32, 0x120 +; CHECK-NEXT: s_movk_i32 s35, 0x104 +; CHECK-NEXT: s_movk_i32 s36, 0x108 +; CHECK-NEXT: s_movk_i32 s37, 0x110 +; CHECK-NEXT: s_movk_i32 s38, 0x120 +; CHECK-NEXT: s_add_i32 s0, s32, 0xf0 +; CHECK-NEXT: s_add_i32 s1, s32, s1 +; CHECK-NEXT: s_add_i32 s2, s32, s2 +; CHECK-NEXT: s_add_i32 s3, s32, s3 +; CHECK-NEXT: s_add_i32 s34, s32, s34 +; CHECK-NEXT: s_add_i32 s35, s32, s35 +; CHECK-NEXT: s_add_i32 s36, s32, s36 +; CHECK-NEXT: s_add_i32 s37, s32, s37 +; CHECK-NEXT: s_add_i32 s38, s32, s38 ; CHECK-NEXT: s_or_b32 s39, s32, 4 ; CHECK-NEXT: s_or_b32 s40, s32, 8 ; CHECK-NEXT: s_or_b32 s41, s32, 12 diff --git a/llvm/test/CodeGen/AMDGPU/kernel-args.ll b/llvm/test/CodeGen/AMDGPU/kernel-args.ll index bad2e60..a2da887 100644 --- a/llvm/test/CodeGen/AMDGPU/kernel-args.ll +++ b/llvm/test/CodeGen/AMDGPU/kernel-args.ll @@ -1025,67 +1025,74 @@ define amdgpu_kernel void @v3i16_arg(ptr addrspace(1) nocapture %out, <3 x i16> ; ; EG-LABEL: v3i16_arg: ; EG: ; %bb.0: ; %entry -; EG-NEXT: ALU 0, @10, KC0[], KC1[] -; EG-NEXT: TEX 1 @6 -; EG-NEXT: ALU 14, @11, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T3.X, 0 -; EG-NEXT: MEM_RAT MSKOR T2.XW, T0.X +; EG-NEXT: ALU 0, @12, KC0[], KC1[] +; EG-NEXT: TEX 2 @6 +; EG-NEXT: ALU 19, @13, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T6.X, T7.X, 0 +; EG-NEXT: MEM_RAT MSKOR T5.XW, T8.X ; EG-NEXT: CF_END ; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_16 T1.X, T0.X, 44, #3 -; EG-NEXT: VTX_READ_16 T0.X, T0.X, 48, #3 -; EG-NEXT: ALU clause starting at 10: -; EG-NEXT: MOV * T0.X, 0.0, -; EG-NEXT: ALU clause starting at 11: +; EG-NEXT: VTX_READ_16 T6.X, T5.X, 44, #3 +; EG-NEXT: VTX_READ_16 T7.X, T5.X, 46, #3 +; EG-NEXT: VTX_READ_16 T5.X, T5.X, 48, #3 +; EG-NEXT: ALU clause starting at 12: +; EG-NEXT: MOV * T5.X, 0.0, +; EG-NEXT: ALU clause starting at 13: ; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x, ; EG-NEXT: 4(5.605194e-45), 0(0.000000e+00) ; EG-NEXT: AND_INT T1.W, PV.W, literal.x, -; EG-NEXT: AND_INT * T2.W, T0.X, literal.y, +; EG-NEXT: AND_INT * T2.W, T5.X, literal.y, ; EG-NEXT: 3(4.203895e-45), 65535(9.183409e-41) ; EG-NEXT: LSHL * T1.W, PV.W, literal.x, ; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00) -; EG-NEXT: LSHL T2.X, T2.W, PV.W, -; EG-NEXT: LSHL * T2.W, literal.x, PV.W, +; EG-NEXT: LSHL T5.X, T2.W, PV.W, +; EG-NEXT: LSHL * T5.W, literal.x, PV.W, ; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; EG-NEXT: MOV T2.Y, 0.0, -; EG-NEXT: MOV * T2.Z, 0.0, -; EG-NEXT: LSHR T0.X, T0.W, literal.x, -; EG-NEXT: LSHR * T3.X, KC0[2].Y, literal.x, +; EG-NEXT: MOV T5.Y, 0.0, +; EG-NEXT: MOV * T5.Z, 0.0, +; EG-NEXT: LSHR T8.X, T0.W, literal.x, +; EG-NEXT: LSHL T0.W, T7.X, literal.y, +; EG-NEXT: AND_INT * T1.W, T6.X, literal.z, +; EG-NEXT: 2(2.802597e-45), 16(2.242078e-44) +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT T6.X, PV.W, PS, +; EG-NEXT: LSHR * T7.X, KC0[2].Y, literal.x, ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) ; ; CM-LABEL: v3i16_arg: ; CM: ; %bb.0: ; %entry ; CM-NEXT: ALU 0, @12, KC0[], KC1[] -; CM-NEXT: TEX 0 @8 -; CM-NEXT: ALU 13, @13, KC0[CB0:0-32], KC1[] -; CM-NEXT: MEM_RAT MSKOR T1.XW, T2.X -; CM-NEXT: ALU 1, @27, KC0[CB0:0-32], KC1[] -; CM-NEXT: TEX 0 @10 -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X +; CM-NEXT: TEX 2 @6 +; CM-NEXT: ALU 19, @13, KC0[CB0:0-32], KC1[] +; CM-NEXT: MEM_RAT MSKOR T5.XW, T8.X +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T6.X, T7.X ; CM-NEXT: CF_END -; CM-NEXT: Fetch clause starting at 8: -; CM-NEXT: VTX_READ_16 T1.X, T0.X, 48, #3 -; CM-NEXT: Fetch clause starting at 10: -; CM-NEXT: VTX_READ_16 T0.X, T0.X, 44, #3 +; CM-NEXT: Fetch clause starting at 6: +; CM-NEXT: VTX_READ_16 T6.X, T5.X, 44, #3 +; CM-NEXT: VTX_READ_16 T7.X, T5.X, 46, #3 +; CM-NEXT: VTX_READ_16 T5.X, T5.X, 48, #3 ; CM-NEXT: ALU clause starting at 12: -; CM-NEXT: MOV * T0.X, 0.0, +; CM-NEXT: MOV * T5.X, 0.0, ; CM-NEXT: ALU clause starting at 13: ; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x, ; CM-NEXT: 4(5.605194e-45), 0(0.000000e+00) ; CM-NEXT: AND_INT * T1.W, PV.W, literal.x, ; CM-NEXT: 3(4.203895e-45), 0(0.000000e+00) -; CM-NEXT: AND_INT T0.Z, T1.X, literal.x, +; CM-NEXT: AND_INT T0.Z, T5.X, literal.x, ; CM-NEXT: LSHL * T1.W, PV.W, literal.y, ; CM-NEXT: 65535(9.183409e-41), 3(4.203895e-45) -; CM-NEXT: LSHL T1.X, PV.Z, PV.W, -; CM-NEXT: LSHL * T1.W, literal.x, PV.W, +; CM-NEXT: LSHL T5.X, PV.Z, PV.W, +; CM-NEXT: LSHL * T5.W, literal.x, PV.W, ; CM-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; CM-NEXT: MOV T1.Y, 0.0, -; CM-NEXT: MOV * T1.Z, 0.0, -; CM-NEXT: LSHR * T2.X, T0.W, literal.x, +; CM-NEXT: MOV T5.Y, 0.0, +; CM-NEXT: MOV * T5.Z, 0.0, +; CM-NEXT: LSHL T0.Z, T7.X, literal.x, +; CM-NEXT: AND_INT * T1.W, T6.X, literal.y, BS:VEC_120/SCL_212 +; CM-NEXT: 16(2.242078e-44), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T6.X, PV.Z, PV.W, +; CM-NEXT: LSHR * T7.X, KC0[2].Y, literal.x, ; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00) -; CM-NEXT: ALU clause starting at 27: -; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x, +; CM-NEXT: LSHR * T8.X, T0.W, literal.x, ; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00) entry: store <3 x i16> %in, ptr addrspace(1) %out, align 4 @@ -2669,47 +2676,205 @@ define amdgpu_kernel void @v8i16_arg(ptr addrspace(1) %out, <8 x i16> %in) { ; ; EG-LABEL: v8i16_arg: ; EG: ; %bb.0: ; %entry -; EG-NEXT: ALU 0, @14, KC0[], KC1[] -; EG-NEXT: TEX 3 @6 -; EG-NEXT: ALU 4, @15, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.XYZW, T0.X, 1 +; EG-NEXT: ALU 1, @36, KC0[], KC1[] +; EG-NEXT: TEX 0 @20 +; EG-NEXT: ALU 5, @38, KC0[], KC1[] +; EG-NEXT: TEX 0 @22 +; EG-NEXT: ALU 5, @44, KC0[], KC1[] +; EG-NEXT: TEX 0 @24 +; EG-NEXT: ALU 5, @50, KC0[], KC1[] +; EG-NEXT: TEX 0 @26 +; EG-NEXT: ALU 5, @56, KC0[], KC1[] +; EG-NEXT: TEX 0 @28 +; EG-NEXT: ALU 5, @62, KC0[], KC1[] +; EG-NEXT: TEX 0 @30 +; EG-NEXT: ALU 5, @68, KC0[], KC1[] +; EG-NEXT: TEX 0 @32 +; EG-NEXT: ALU 5, @74, KC0[], KC1[] +; EG-NEXT: TEX 0 @34 +; EG-NEXT: ALU 8, @80, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T7.XYZW, T8.X, 1 ; EG-NEXT: CF_END ; EG-NEXT: PAD -; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_16 T1.X, T0.X, 52, #3 -; EG-NEXT: VTX_READ_16 T2.X, T0.X, 54, #3 -; EG-NEXT: VTX_READ_16 T3.X, T0.X, 62, #3 -; EG-NEXT: VTX_READ_16 T0.X, T0.X, 60, #3 -; EG-NEXT: ALU clause starting at 14: -; EG-NEXT: MOV * T0.X, 0.0, -; EG-NEXT: ALU clause starting at 15: -; EG-NEXT: MOV T1.Y, T2.X, -; EG-NEXT: MOV * T1.Z, T0.X, BS:VEC_120/SCL_212 -; EG-NEXT: LSHR T0.X, KC0[2].Y, literal.x, -; EG-NEXT: MOV * T1.W, T3.X, -; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: Fetch clause starting at 20: +; EG-NEXT: VTX_READ_16 T8.X, T7.X, 66, #3 +; EG-NEXT: Fetch clause starting at 22: +; EG-NEXT: VTX_READ_16 T8.X, T7.X, 58, #3 +; EG-NEXT: Fetch clause starting at 24: +; EG-NEXT: VTX_READ_16 T8.X, T7.X, 64, #3 +; EG-NEXT: Fetch clause starting at 26: +; EG-NEXT: VTX_READ_16 T8.X, T7.X, 56, #3 +; EG-NEXT: Fetch clause starting at 28: +; EG-NEXT: VTX_READ_16 T8.X, T7.X, 62, #3 +; EG-NEXT: Fetch clause starting at 30: +; EG-NEXT: VTX_READ_16 T8.X, T7.X, 54, #3 +; EG-NEXT: Fetch clause starting at 32: +; EG-NEXT: VTX_READ_16 T8.X, T7.X, 60, #3 +; EG-NEXT: Fetch clause starting at 34: +; EG-NEXT: VTX_READ_16 T7.X, T7.X, 52, #3 +; EG-NEXT: ALU clause starting at 36: +; EG-NEXT: MOV * T0.Y, T3.X, +; EG-NEXT: MOV * T7.X, 0.0, +; EG-NEXT: ALU clause starting at 38: +; EG-NEXT: LSHL T0.W, T8.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 16(2.242078e-44), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV T3.X, PV.W, +; EG-NEXT: MOV * T0.Y, T5.X, +; EG-NEXT: ALU clause starting at 44: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: LSHL * T1.W, T8.X, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV * T0.Y, T3.X, +; EG-NEXT: ALU clause starting at 50: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, T8.X, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T3.X, PV.W, +; EG-NEXT: MOV * T0.Y, T5.X, +; EG-NEXT: ALU clause starting at 56: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, T8.X, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV * T0.Y, T2.X, +; EG-NEXT: ALU clause starting at 62: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: LSHL * T1.W, T8.X, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T2.X, PV.W, +; EG-NEXT: MOV * T0.Y, T4.X, +; EG-NEXT: ALU clause starting at 68: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: LSHL * T1.W, T8.X, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV * T0.Y, T2.X, +; EG-NEXT: ALU clause starting at 74: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, T8.X, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T7.Z, PV.W, PS, +; EG-NEXT: MOV T2.X, PV.Z, +; EG-NEXT: MOV * T0.Y, T4.X, +; EG-NEXT: ALU clause starting at 80: +; EG-NEXT: LSHR T8.X, KC0[2].Y, literal.x, +; EG-NEXT: AND_INT T0.W, T0.Y, literal.y, +; EG-NEXT: AND_INT * T1.W, T7.X, literal.z, +; EG-NEXT: 2(2.802597e-45), -65536(nan) +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T7.X, PV.W, PS, +; EG-NEXT: MOV T4.X, PV.X, +; EG-NEXT: MOV * T7.W, T3.X, +; EG-NEXT: MOV * T7.Y, T5.X, ; ; CM-LABEL: v8i16_arg: ; CM: ; %bb.0: ; %entry -; CM-NEXT: ALU 0, @14, KC0[], KC1[] -; CM-NEXT: TEX 3 @6 -; CM-NEXT: ALU 4, @15, KC0[CB0:0-32], KC1[] -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T1, T0.X +; CM-NEXT: ALU 1, @36, KC0[], KC1[] +; CM-NEXT: TEX 0 @20 +; CM-NEXT: ALU 5, @38, KC0[], KC1[] +; CM-NEXT: TEX 0 @22 +; CM-NEXT: ALU 5, @44, KC0[], KC1[] +; CM-NEXT: TEX 0 @24 +; CM-NEXT: ALU 5, @50, KC0[], KC1[] +; CM-NEXT: TEX 0 @26 +; CM-NEXT: ALU 5, @56, KC0[], KC1[] +; CM-NEXT: TEX 0 @28 +; CM-NEXT: ALU 5, @62, KC0[], KC1[] +; CM-NEXT: TEX 0 @30 +; CM-NEXT: ALU 5, @68, KC0[], KC1[] +; CM-NEXT: TEX 0 @32 +; CM-NEXT: ALU 5, @74, KC0[], KC1[] +; CM-NEXT: TEX 0 @34 +; CM-NEXT: ALU 8, @80, KC0[CB0:0-32], KC1[] +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T7, T8.X ; CM-NEXT: CF_END ; CM-NEXT: PAD -; CM-NEXT: Fetch clause starting at 6: -; CM-NEXT: VTX_READ_16 T1.X, T0.X, 52, #3 -; CM-NEXT: VTX_READ_16 T2.X, T0.X, 54, #3 -; CM-NEXT: VTX_READ_16 T3.X, T0.X, 62, #3 -; CM-NEXT: VTX_READ_16 T0.X, T0.X, 60, #3 -; CM-NEXT: ALU clause starting at 14: -; CM-NEXT: MOV * T0.X, 0.0, -; CM-NEXT: ALU clause starting at 15: -; CM-NEXT: MOV T1.Y, T2.X, -; CM-NEXT: MOV * T1.Z, T0.X, BS:VEC_120/SCL_212 -; CM-NEXT: LSHR T0.X, KC0[2].Y, literal.x, -; CM-NEXT: MOV * T1.W, T3.X, -; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; CM-NEXT: Fetch clause starting at 20: +; CM-NEXT: VTX_READ_16 T8.X, T7.X, 66, #3 +; CM-NEXT: Fetch clause starting at 22: +; CM-NEXT: VTX_READ_16 T8.X, T7.X, 58, #3 +; CM-NEXT: Fetch clause starting at 24: +; CM-NEXT: VTX_READ_16 T8.X, T7.X, 64, #3 +; CM-NEXT: Fetch clause starting at 26: +; CM-NEXT: VTX_READ_16 T8.X, T7.X, 56, #3 +; CM-NEXT: Fetch clause starting at 28: +; CM-NEXT: VTX_READ_16 T8.X, T7.X, 62, #3 +; CM-NEXT: Fetch clause starting at 30: +; CM-NEXT: VTX_READ_16 T8.X, T7.X, 54, #3 +; CM-NEXT: Fetch clause starting at 32: +; CM-NEXT: VTX_READ_16 T8.X, T7.X, 60, #3 +; CM-NEXT: Fetch clause starting at 34: +; CM-NEXT: VTX_READ_16 T7.X, T7.X, 52, #3 +; CM-NEXT: ALU clause starting at 36: +; CM-NEXT: MOV * T0.Y, T3.X, +; CM-NEXT: MOV * T7.X, 0.0, +; CM-NEXT: ALU clause starting at 38: +; CM-NEXT: LSHL T0.Z, T8.X, literal.x, +; CM-NEXT: AND_INT * T0.W, T0.Y, literal.y, +; CM-NEXT: 16(2.242078e-44), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.W, PV.Z, +; CM-NEXT: MOV T3.X, PV.W, +; CM-NEXT: MOV * T0.Y, T5.X, +; CM-NEXT: ALU clause starting at 44: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, T8.X, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T5.X, PV.W, +; CM-NEXT: MOV * T0.Y, T3.X, +; CM-NEXT: ALU clause starting at 50: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, T8.X, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T3.X, PV.W, +; CM-NEXT: MOV * T0.Y, T5.X, +; CM-NEXT: ALU clause starting at 56: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, T8.X, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T5.X, PV.W, +; CM-NEXT: MOV * T0.Y, T2.X, +; CM-NEXT: ALU clause starting at 62: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, T8.X, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T2.X, PV.W, +; CM-NEXT: MOV * T0.Y, T4.X, +; CM-NEXT: ALU clause starting at 68: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, T8.X, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T4.X, PV.W, +; CM-NEXT: MOV * T0.Y, T2.X, +; CM-NEXT: ALU clause starting at 74: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, T8.X, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T7.Z, PV.Z, PV.W, +; CM-NEXT: MOV T2.X, PV.Z, +; CM-NEXT: MOV * T0.Y, T4.X, +; CM-NEXT: ALU clause starting at 80: +; CM-NEXT: LSHR T8.X, KC0[2].Y, literal.x, +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.y, +; CM-NEXT: AND_INT * T0.W, T7.X, literal.z, +; CM-NEXT: 2(2.802597e-45), -65536(nan) +; CM-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; CM-NEXT: OR_INT * T7.X, PV.Z, PV.W, +; CM-NEXT: MOV T4.X, PV.X, +; CM-NEXT: MOV * T7.W, T3.X, +; CM-NEXT: MOV * T7.Y, T5.X, entry: store <8 x i16> %in, ptr addrspace(1) %out ret void @@ -3453,68 +3618,392 @@ define amdgpu_kernel void @v16i16_arg(ptr addrspace(1) %out, <16 x i16> %in) { ; ; EG-LABEL: v16i16_arg: ; EG: ; %bb.0: ; %entry -; EG-NEXT: ALU 0, @22, KC0[], KC1[] -; EG-NEXT: TEX 7 @6 -; EG-NEXT: ALU 10, @23, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.XYZW, T2.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T7.XYZW, T0.X, 1 +; EG-NEXT: ALU 1, @68, KC0[], KC1[] +; EG-NEXT: TEX 0 @36 +; EG-NEXT: ALU 5, @70, KC0[], KC1[] +; EG-NEXT: TEX 0 @38 +; EG-NEXT: ALU 5, @76, KC0[], KC1[] +; EG-NEXT: TEX 0 @40 +; EG-NEXT: ALU 5, @82, KC0[], KC1[] +; EG-NEXT: TEX 0 @42 +; EG-NEXT: ALU 5, @88, KC0[], KC1[] +; EG-NEXT: TEX 0 @44 +; EG-NEXT: ALU 5, @94, KC0[], KC1[] +; EG-NEXT: TEX 0 @46 +; EG-NEXT: ALU 5, @100, KC0[], KC1[] +; EG-NEXT: TEX 0 @48 +; EG-NEXT: ALU 5, @106, KC0[], KC1[] +; EG-NEXT: TEX 0 @50 +; EG-NEXT: ALU 5, @112, KC0[], KC1[] +; EG-NEXT: TEX 0 @52 +; EG-NEXT: ALU 5, @118, KC0[], KC1[] +; EG-NEXT: TEX 0 @54 +; EG-NEXT: ALU 5, @124, KC0[], KC1[] +; EG-NEXT: TEX 0 @56 +; EG-NEXT: ALU 5, @130, KC0[], KC1[] +; EG-NEXT: TEX 0 @58 +; EG-NEXT: ALU 5, @136, KC0[], KC1[] +; EG-NEXT: TEX 0 @60 +; EG-NEXT: ALU 5, @142, KC0[], KC1[] +; EG-NEXT: TEX 0 @62 +; EG-NEXT: ALU 5, @148, KC0[], KC1[] +; EG-NEXT: TEX 0 @64 +; EG-NEXT: ALU 5, @154, KC0[], KC1[] +; EG-NEXT: TEX 0 @66 +; EG-NEXT: ALU 13, @160, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T12.XYZW, T14.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T11.XYZW, T13.X, 1 ; EG-NEXT: CF_END -; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_16 T1.X, T0.X, 84, #3 -; EG-NEXT: VTX_READ_16 T2.X, T0.X, 86, #3 -; EG-NEXT: VTX_READ_16 T3.X, T0.X, 94, #3 -; EG-NEXT: VTX_READ_16 T4.X, T0.X, 78, #3 -; EG-NEXT: VTX_READ_16 T5.X, T0.X, 76, #3 -; EG-NEXT: VTX_READ_16 T6.X, T0.X, 92, #3 -; EG-NEXT: VTX_READ_16 T7.X, T0.X, 68, #3 -; EG-NEXT: VTX_READ_16 T0.X, T0.X, 70, #3 -; EG-NEXT: ALU clause starting at 22: -; EG-NEXT: MOV * T0.X, 0.0, -; EG-NEXT: ALU clause starting at 23: -; EG-NEXT: MOV T1.Y, T2.X, -; EG-NEXT: MOV * T7.Y, T0.X, -; EG-NEXT: MOV * T1.Z, T6.X, -; EG-NEXT: LSHR T0.X, KC0[2].Y, literal.x, -; EG-NEXT: MOV T7.Z, T5.X, +; EG-NEXT: Fetch clause starting at 36: +; EG-NEXT: VTX_READ_16 T12.X, T11.X, 98, #3 +; EG-NEXT: Fetch clause starting at 38: +; EG-NEXT: VTX_READ_16 T12.X, T11.X, 90, #3 +; EG-NEXT: Fetch clause starting at 40: +; EG-NEXT: VTX_READ_16 T12.X, T11.X, 82, #3 +; EG-NEXT: Fetch clause starting at 42: +; EG-NEXT: VTX_READ_16 T12.X, T11.X, 74, #3 +; EG-NEXT: Fetch clause starting at 44: +; EG-NEXT: VTX_READ_16 T12.X, T11.X, 96, #3 +; EG-NEXT: Fetch clause starting at 46: +; EG-NEXT: VTX_READ_16 T12.X, T11.X, 88, #3 +; EG-NEXT: Fetch clause starting at 48: +; EG-NEXT: VTX_READ_16 T12.X, T11.X, 80, #3 +; EG-NEXT: Fetch clause starting at 50: +; EG-NEXT: VTX_READ_16 T12.X, T11.X, 72, #3 +; EG-NEXT: Fetch clause starting at 52: +; EG-NEXT: VTX_READ_16 T12.X, T11.X, 94, #3 +; EG-NEXT: Fetch clause starting at 54: +; EG-NEXT: VTX_READ_16 T12.X, T11.X, 86, #3 +; EG-NEXT: Fetch clause starting at 56: +; EG-NEXT: VTX_READ_16 T12.X, T11.X, 78, #3 +; EG-NEXT: Fetch clause starting at 58: +; EG-NEXT: VTX_READ_16 T12.X, T11.X, 70, #3 +; EG-NEXT: Fetch clause starting at 60: +; EG-NEXT: VTX_READ_16 T12.X, T11.X, 92, #3 +; EG-NEXT: Fetch clause starting at 62: +; EG-NEXT: VTX_READ_16 T12.X, T11.X, 84, #3 +; EG-NEXT: Fetch clause starting at 64: +; EG-NEXT: VTX_READ_16 T13.X, T11.X, 76, #3 +; EG-NEXT: Fetch clause starting at 66: +; EG-NEXT: VTX_READ_16 T11.X, T11.X, 68, #3 +; EG-NEXT: ALU clause starting at 68: +; EG-NEXT: MOV * T0.Y, T3.X, +; EG-NEXT: MOV * T11.X, 0.0, +; EG-NEXT: ALU clause starting at 70: +; EG-NEXT: LSHL T0.W, T12.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 16(2.242078e-44), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV T3.X, PV.W, +; EG-NEXT: MOV * T0.Y, T5.X, +; EG-NEXT: ALU clause starting at 76: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: LSHL * T1.W, T12.X, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV * T0.Y, T7.X, +; EG-NEXT: ALU clause starting at 82: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: LSHL * T1.W, T12.X, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T7.X, PV.W, +; EG-NEXT: MOV * T0.Y, T9.X, +; EG-NEXT: ALU clause starting at 88: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: LSHL * T1.W, T12.X, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T9.X, PV.W, +; EG-NEXT: MOV * T0.Y, T3.X, +; EG-NEXT: ALU clause starting at 94: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, T12.X, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T3.X, PV.W, +; EG-NEXT: MOV * T0.Y, T5.X, +; EG-NEXT: ALU clause starting at 100: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, T12.X, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV * T0.Y, T7.X, +; EG-NEXT: ALU clause starting at 106: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, T12.X, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T7.X, PV.W, +; EG-NEXT: MOV * T0.Y, T9.X, +; EG-NEXT: ALU clause starting at 112: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, T12.X, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T9.X, PV.W, +; EG-NEXT: MOV * T0.Y, T2.X, +; EG-NEXT: ALU clause starting at 118: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: LSHL * T1.W, T12.X, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T2.X, PV.W, +; EG-NEXT: MOV * T0.Y, T4.X, +; EG-NEXT: ALU clause starting at 124: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: LSHL * T1.W, T12.X, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV * T0.Y, T6.X, +; EG-NEXT: ALU clause starting at 130: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: LSHL * T1.W, T12.X, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T6.X, PV.W, +; EG-NEXT: MOV * T0.Y, T8.X, +; EG-NEXT: ALU clause starting at 136: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: LSHL * T1.W, T12.X, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T8.X, PV.W, +; EG-NEXT: MOV * T0.Y, T2.X, +; EG-NEXT: ALU clause starting at 142: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, T12.X, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T12.Z, PV.W, PS, +; EG-NEXT: MOV T2.X, PV.Z, +; EG-NEXT: MOV * T0.Y, T4.X, +; EG-NEXT: ALU clause starting at 148: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, T12.X, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T12.X, PV.W, PS, +; EG-NEXT: MOV T4.X, PV.X, +; EG-NEXT: MOV * T0.Y, T6.X, +; EG-NEXT: ALU clause starting at 154: +; EG-NEXT: AND_INT T0.W, T0.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, T13.X, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T11.Z, PV.W, PS, +; EG-NEXT: MOV T6.X, PV.Z, +; EG-NEXT: MOV * T0.Y, T8.X, +; EG-NEXT: ALU clause starting at 160: +; EG-NEXT: LSHR T13.X, KC0[2].Y, literal.x, ; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.y, ; EG-NEXT: 2(2.802597e-45), 16(2.242078e-44) -; EG-NEXT: LSHR T2.X, PV.W, literal.x, -; EG-NEXT: MOV T7.W, T4.X, -; EG-NEXT: MOV * T1.W, T3.X, -; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: LSHR T14.X, PV.W, literal.x, +; EG-NEXT: AND_INT T0.W, T0.Y, literal.y, +; EG-NEXT: AND_INT * T1.W, T11.X, literal.z, +; EG-NEXT: 2(2.802597e-45), -65536(nan) +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T11.X, PV.W, PS, +; EG-NEXT: MOV T8.X, PV.X, +; EG-NEXT: MOV * T12.W, T3.X, +; EG-NEXT: MOV T12.Y, T5.X, +; EG-NEXT: MOV T11.W, T7.X, BS:VEC_120/SCL_212 +; EG-NEXT: MOV * T11.Y, T9.X, ; ; CM-LABEL: v16i16_arg: ; CM: ; %bb.0: ; %entry -; CM-NEXT: ALU 0, @22, KC0[], KC1[] -; CM-NEXT: TEX 7 @6 -; CM-NEXT: ALU 11, @23, KC0[CB0:0-32], KC1[] -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T7, T2.X -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T1, T0.X +; CM-NEXT: ALU 1, @68, KC0[], KC1[] +; CM-NEXT: TEX 0 @36 +; CM-NEXT: ALU 5, @70, KC0[], KC1[] +; CM-NEXT: TEX 0 @38 +; CM-NEXT: ALU 5, @76, KC0[], KC1[] +; CM-NEXT: TEX 0 @40 +; CM-NEXT: ALU 5, @82, KC0[], KC1[] +; CM-NEXT: TEX 0 @42 +; CM-NEXT: ALU 5, @88, KC0[], KC1[] +; CM-NEXT: TEX 0 @44 +; CM-NEXT: ALU 5, @94, KC0[], KC1[] +; CM-NEXT: TEX 0 @46 +; CM-NEXT: ALU 5, @100, KC0[], KC1[] +; CM-NEXT: TEX 0 @48 +; CM-NEXT: ALU 5, @106, KC0[], KC1[] +; CM-NEXT: TEX 0 @50 +; CM-NEXT: ALU 5, @112, KC0[], KC1[] +; CM-NEXT: TEX 0 @52 +; CM-NEXT: ALU 5, @118, KC0[], KC1[] +; CM-NEXT: TEX 0 @54 +; CM-NEXT: ALU 5, @124, KC0[], KC1[] +; CM-NEXT: TEX 0 @56 +; CM-NEXT: ALU 5, @130, KC0[], KC1[] +; CM-NEXT: TEX 0 @58 +; CM-NEXT: ALU 5, @136, KC0[], KC1[] +; CM-NEXT: TEX 0 @60 +; CM-NEXT: ALU 5, @142, KC0[], KC1[] +; CM-NEXT: TEX 0 @62 +; CM-NEXT: ALU 5, @148, KC0[], KC1[] +; CM-NEXT: TEX 0 @64 +; CM-NEXT: ALU 5, @154, KC0[], KC1[] +; CM-NEXT: TEX 0 @66 +; CM-NEXT: ALU 14, @160, KC0[CB0:0-32], KC1[] +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T11, T14.X +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T12, T13.X ; CM-NEXT: CF_END -; CM-NEXT: Fetch clause starting at 6: -; CM-NEXT: VTX_READ_16 T1.X, T0.X, 84, #3 -; CM-NEXT: VTX_READ_16 T2.X, T0.X, 86, #3 -; CM-NEXT: VTX_READ_16 T3.X, T0.X, 78, #3 -; CM-NEXT: VTX_READ_16 T4.X, T0.X, 94, #3 -; CM-NEXT: VTX_READ_16 T5.X, T0.X, 76, #3 -; CM-NEXT: VTX_READ_16 T6.X, T0.X, 92, #3 -; CM-NEXT: VTX_READ_16 T7.X, T0.X, 68, #3 -; CM-NEXT: VTX_READ_16 T0.X, T0.X, 70, #3 -; CM-NEXT: ALU clause starting at 22: -; CM-NEXT: MOV * T0.X, 0.0, -; CM-NEXT: ALU clause starting at 23: -; CM-NEXT: MOV * T1.Y, T2.X, -; CM-NEXT: MOV T7.Y, T0.X, -; CM-NEXT: MOV T1.Z, T6.X, BS:VEC_120/SCL_212 +; CM-NEXT: Fetch clause starting at 36: +; CM-NEXT: VTX_READ_16 T12.X, T11.X, 98, #3 +; CM-NEXT: Fetch clause starting at 38: +; CM-NEXT: VTX_READ_16 T12.X, T11.X, 90, #3 +; CM-NEXT: Fetch clause starting at 40: +; CM-NEXT: VTX_READ_16 T12.X, T11.X, 82, #3 +; CM-NEXT: Fetch clause starting at 42: +; CM-NEXT: VTX_READ_16 T12.X, T11.X, 74, #3 +; CM-NEXT: Fetch clause starting at 44: +; CM-NEXT: VTX_READ_16 T12.X, T11.X, 96, #3 +; CM-NEXT: Fetch clause starting at 46: +; CM-NEXT: VTX_READ_16 T12.X, T11.X, 88, #3 +; CM-NEXT: Fetch clause starting at 48: +; CM-NEXT: VTX_READ_16 T12.X, T11.X, 80, #3 +; CM-NEXT: Fetch clause starting at 50: +; CM-NEXT: VTX_READ_16 T12.X, T11.X, 72, #3 +; CM-NEXT: Fetch clause starting at 52: +; CM-NEXT: VTX_READ_16 T12.X, T11.X, 94, #3 +; CM-NEXT: Fetch clause starting at 54: +; CM-NEXT: VTX_READ_16 T12.X, T11.X, 86, #3 +; CM-NEXT: Fetch clause starting at 56: +; CM-NEXT: VTX_READ_16 T12.X, T11.X, 78, #3 +; CM-NEXT: Fetch clause starting at 58: +; CM-NEXT: VTX_READ_16 T12.X, T11.X, 70, #3 +; CM-NEXT: Fetch clause starting at 60: +; CM-NEXT: VTX_READ_16 T12.X, T11.X, 92, #3 +; CM-NEXT: Fetch clause starting at 62: +; CM-NEXT: VTX_READ_16 T12.X, T11.X, 84, #3 +; CM-NEXT: Fetch clause starting at 64: +; CM-NEXT: VTX_READ_16 T13.X, T11.X, 76, #3 +; CM-NEXT: Fetch clause starting at 66: +; CM-NEXT: VTX_READ_16 T11.X, T11.X, 68, #3 +; CM-NEXT: ALU clause starting at 68: +; CM-NEXT: MOV * T0.Y, T3.X, +; CM-NEXT: MOV * T11.X, 0.0, +; CM-NEXT: ALU clause starting at 70: +; CM-NEXT: LSHL T0.Z, T12.X, literal.x, +; CM-NEXT: AND_INT * T0.W, T0.Y, literal.y, +; CM-NEXT: 16(2.242078e-44), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.W, PV.Z, +; CM-NEXT: MOV T3.X, PV.W, +; CM-NEXT: MOV * T0.Y, T5.X, +; CM-NEXT: ALU clause starting at 76: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, T12.X, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T5.X, PV.W, +; CM-NEXT: MOV * T0.Y, T7.X, +; CM-NEXT: ALU clause starting at 82: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, T12.X, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T7.X, PV.W, +; CM-NEXT: MOV * T0.Y, T9.X, +; CM-NEXT: ALU clause starting at 88: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, T12.X, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T9.X, PV.W, +; CM-NEXT: MOV * T0.Y, T3.X, +; CM-NEXT: ALU clause starting at 94: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, T12.X, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T3.X, PV.W, +; CM-NEXT: MOV * T0.Y, T5.X, +; CM-NEXT: ALU clause starting at 100: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, T12.X, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T5.X, PV.W, +; CM-NEXT: MOV * T0.Y, T7.X, +; CM-NEXT: ALU clause starting at 106: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, T12.X, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T7.X, PV.W, +; CM-NEXT: MOV * T0.Y, T9.X, +; CM-NEXT: ALU clause starting at 112: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, T12.X, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T9.X, PV.W, +; CM-NEXT: MOV * T0.Y, T2.X, +; CM-NEXT: ALU clause starting at 118: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, T12.X, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T2.X, PV.W, +; CM-NEXT: MOV * T0.Y, T4.X, +; CM-NEXT: ALU clause starting at 124: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, T12.X, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T4.X, PV.W, +; CM-NEXT: MOV * T0.Y, T6.X, +; CM-NEXT: ALU clause starting at 130: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, T12.X, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T6.X, PV.W, +; CM-NEXT: MOV * T0.Y, T8.X, +; CM-NEXT: ALU clause starting at 136: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, T12.X, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T8.X, PV.W, +; CM-NEXT: MOV * T0.Y, T2.X, +; CM-NEXT: ALU clause starting at 142: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, T12.X, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T12.Z, PV.Z, PV.W, +; CM-NEXT: MOV T2.X, PV.Z, +; CM-NEXT: MOV * T0.Y, T4.X, +; CM-NEXT: ALU clause starting at 148: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, T12.X, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T12.X, PV.Z, PV.W, +; CM-NEXT: MOV T4.X, PV.X, +; CM-NEXT: MOV * T0.Y, T6.X, +; CM-NEXT: ALU clause starting at 154: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, T13.X, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T11.Z, PV.Z, PV.W, +; CM-NEXT: MOV T6.X, PV.Z, +; CM-NEXT: MOV * T0.Y, T8.X, +; CM-NEXT: ALU clause starting at 160: ; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x, ; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; CM-NEXT: LSHR T0.X, PV.W, literal.x, -; CM-NEXT: MOV T7.Z, T5.X, -; CM-NEXT: MOV * T1.W, T4.X, BS:VEC_120/SCL_212 -; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00) -; CM-NEXT: LSHR T2.X, KC0[2].Y, literal.x, -; CM-NEXT: MOV * T7.W, T3.X, +; CM-NEXT: LSHR * T13.X, PV.W, literal.x, ; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; CM-NEXT: LSHR T14.X, KC0[2].Y, literal.x, +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.y, +; CM-NEXT: AND_INT * T0.W, T11.X, literal.z, +; CM-NEXT: 2(2.802597e-45), -65536(nan) +; CM-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; CM-NEXT: OR_INT * T11.X, PV.Z, PV.W, +; CM-NEXT: MOV T8.X, PV.X, +; CM-NEXT: MOV * T12.W, T3.X, +; CM-NEXT: MOV T12.Y, T5.X, +; CM-NEXT: MOV * T11.W, T7.X, BS:VEC_120/SCL_212 +; CM-NEXT: MOV * T11.Y, T9.X, entry: store <16 x i16> %in, ptr addrspace(1) %out ret void diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll index 4309cfbe..c29c52c 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll @@ -11,6 +11,12 @@ declare <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp4(i32 %src, i32 %scale, i declare <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp8(<2 x i32> %src, i32 %scale, i32 %scale_sel) declare <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.bf8(<2 x i32> %src, i32 %scale, i32 %scale_sel) declare <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp4(i32 %src, i32 %scale, i32 %scale_sel) +declare <16 x half> @llvm.amdgcn.cvt.scale.pk16.f16.fp6(<3 x i32> %src, i32 %scale, i32 %scale_sel) +declare <16 x bfloat> @llvm.amdgcn.cvt.scale.pk16.bf16.fp6(<3 x i32> %src, i32 %scale, i32 %scale_sel) +declare <16 x half> @llvm.amdgcn.cvt.scale.pk16.f16.bf6(<3 x i32> %src, i32 %scale, i32 %scale_sel) +declare <16 x bfloat> @llvm.amdgcn.cvt.scale.pk16.bf16.bf6(<3 x i32> %src, i32 %scale, i32 %scale_sel) +declare <16 x float> @llvm.amdgcn.cvt.scale.pk16.f32.fp6(<3 x i32> %src, i32 %scale, i32 %scale_sel) +declare <16 x float> @llvm.amdgcn.cvt.scale.pk16.f32.bf6(<3 x i32> %src, i32 %scale, i32 %scale_sel) define amdgpu_ps void @test_cvt_scale_pk8_f16_fp8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) { ; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f16_fp8_vv: @@ -162,3 +168,207 @@ define amdgpu_ps void @test_cvt_scale_pk8_f32_fp4_vv(i32 %src, i32 %scale, ptr a store <8 x float> %cvt, ptr addrspace(1) %out, align 32 ret void } + +define amdgpu_ps void @test_cvt_scale_pk16_f16_fp6_vv(<3 x i32> %src, i32 %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_cvt_scale_pk16_f16_fp6_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_cvt_scale_pk16_f16_fp6 v[6:13], v[0:2], v3 +; GFX1250-SDAG-NEXT: s_clause 0x1 +; GFX1250-SDAG-NEXT: global_store_b128 v[4:5], v[10:13], off offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v[4:5], v[6:9], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_cvt_scale_pk16_f16_fp6_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_cvt_scale_pk16_f16_fp6 v[6:13], v[0:2], v3 +; GFX1250-GISEL-NEXT: s_clause 0x1 +; GFX1250-GISEL-NEXT: global_store_b128 v[4:5], v[6:9], off +; GFX1250-GISEL-NEXT: global_store_b128 v[4:5], v[10:13], off offset:16 +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <16 x half> @llvm.amdgcn.cvt.scale.pk16.f16.fp6(<3 x i32> %src, i32 %scale, i32 0) + store <16 x half> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_cvt_scale_pk16_f16_fp6_sl(<3 x i32> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_cvt_scale_pk16_f16_fp6_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v10, s0 :: v_dual_mov_b32 v11, s1 +; GFX1250-SDAG-NEXT: v_mov_b32_e32 v12, s2 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scale_pk16_f16_fp6 v[2:9], v[10:12], 0x64 scale_sel:1 +; GFX1250-SDAG-NEXT: s_clause 0x1 +; GFX1250-SDAG-NEXT: global_store_b128 v[0:1], v[6:9], off offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v[0:1], v[2:5], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_cvt_scale_pk16_f16_fp6_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v12, s2 :: v_dual_mov_b32 v11, s1 +; GFX1250-GISEL-NEXT: v_mov_b32_e32 v10, s0 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scale_pk16_f16_fp6 v[2:9], v[10:12], 0x64 scale_sel:1 +; GFX1250-GISEL-NEXT: s_clause 0x1 +; GFX1250-GISEL-NEXT: global_store_b128 v[0:1], v[2:5], off +; GFX1250-GISEL-NEXT: global_store_b128 v[0:1], v[6:9], off offset:16 +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <16 x half> @llvm.amdgcn.cvt.scale.pk16.f16.fp6(<3 x i32> %src, i32 100, i32 1) + store <16 x half> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_cvt_scale_pk16_bf16_fp6_vv(<3 x i32> %src, i32 %scale, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_cvt_scale_pk16_bf16_fp6_vv: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_cvt_scale_pk16_bf16_fp6 v[6:13], v[0:2], v3 scale_sel:2 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[4:5], v[10:13], off offset:16 +; GFX1250-NEXT: global_store_b128 v[4:5], v[6:9], off +; GFX1250-NEXT: s_endpgm + %cvt = tail call <16 x bfloat> @llvm.amdgcn.cvt.scale.pk16.bf16.fp6(<3 x i32> %src, i32 %scale, i32 2) + store <16 x bfloat> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_cvt_scale_pk16_bf16_fp6_sl(<3 x i32> inreg %src, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_cvt_scale_pk16_bf16_fp6_sl: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v10, s0 :: v_dual_mov_b32 v11, s1 +; GFX1250-NEXT: v_mov_b32_e32 v12, s2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_scale_pk16_bf16_fp6 v[2:9], v[10:12], 0x64 scale_sel:3 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[0:1], v[6:9], off offset:16 +; GFX1250-NEXT: global_store_b128 v[0:1], v[2:5], off +; GFX1250-NEXT: s_endpgm + %cvt = tail call <16 x bfloat> @llvm.amdgcn.cvt.scale.pk16.bf16.fp6(<3 x i32> %src, i32 100, i32 3) + store <16 x bfloat> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_cvt_scale_pk16_f16_bf6_vv(<3 x i32> %src, i32 %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_cvt_scale_pk16_f16_bf6_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_cvt_scale_pk16_f16_bf6 v[6:13], v[0:2], v3 scale_sel:4 +; GFX1250-SDAG-NEXT: s_clause 0x1 +; GFX1250-SDAG-NEXT: global_store_b128 v[4:5], v[10:13], off offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v[4:5], v[6:9], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_cvt_scale_pk16_f16_bf6_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_cvt_scale_pk16_f16_bf6 v[6:13], v[0:2], v3 scale_sel:4 +; GFX1250-GISEL-NEXT: s_clause 0x1 +; GFX1250-GISEL-NEXT: global_store_b128 v[4:5], v[6:9], off +; GFX1250-GISEL-NEXT: global_store_b128 v[4:5], v[10:13], off offset:16 +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <16 x half> @llvm.amdgcn.cvt.scale.pk16.f16.bf6(<3 x i32> %src, i32 %scale, i32 4) + store <16 x half> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_cvt_scale_pk16_f16_bf6_sl(<3 x i32> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_cvt_scale_pk16_f16_bf6_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v10, s0 :: v_dual_mov_b32 v11, s1 +; GFX1250-SDAG-NEXT: v_mov_b32_e32 v12, s2 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scale_pk16_f16_bf6 v[2:9], v[10:12], 0x64 scale_sel:5 +; GFX1250-SDAG-NEXT: s_clause 0x1 +; GFX1250-SDAG-NEXT: global_store_b128 v[0:1], v[6:9], off offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v[0:1], v[2:5], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_cvt_scale_pk16_f16_bf6_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v12, s2 :: v_dual_mov_b32 v11, s1 +; GFX1250-GISEL-NEXT: v_mov_b32_e32 v10, s0 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scale_pk16_f16_bf6 v[2:9], v[10:12], 0x64 scale_sel:5 +; GFX1250-GISEL-NEXT: s_clause 0x1 +; GFX1250-GISEL-NEXT: global_store_b128 v[0:1], v[2:5], off +; GFX1250-GISEL-NEXT: global_store_b128 v[0:1], v[6:9], off offset:16 +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <16 x half> @llvm.amdgcn.cvt.scale.pk16.f16.bf6(<3 x i32> %src, i32 100, i32 5) + store <16 x half> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_cvt_scale_pk16_bf16_bf6_vv(<3 x i32> %src, i32 %scale, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_cvt_scale_pk16_bf16_bf6_vv: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_cvt_scale_pk16_bf16_bf6 v[6:13], v[0:2], v3 scale_sel:6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[4:5], v[10:13], off offset:16 +; GFX1250-NEXT: global_store_b128 v[4:5], v[6:9], off +; GFX1250-NEXT: s_endpgm + %cvt = tail call <16 x bfloat> @llvm.amdgcn.cvt.scale.pk16.bf16.bf6(<3 x i32> %src, i32 %scale, i32 6) + store <16 x bfloat> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_cvt_scale_pk16_bf16_bf6_sl(<3 x i32> inreg %src, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_cvt_scale_pk16_bf16_bf6_sl: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v10, s0 :: v_dual_mov_b32 v11, s1 +; GFX1250-NEXT: v_mov_b32_e32 v12, s2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_scale_pk16_bf16_bf6 v[2:9], v[10:12], 0x64 scale_sel:7 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[0:1], v[6:9], off offset:16 +; GFX1250-NEXT: global_store_b128 v[0:1], v[2:5], off +; GFX1250-NEXT: s_endpgm + %cvt = tail call <16 x bfloat> @llvm.amdgcn.cvt.scale.pk16.bf16.bf6(<3 x i32> %src, i32 100, i32 7) + store <16 x bfloat> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_cvt_scale_pk16_f32_fp6_vv(<3 x i32> %src, i32 %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_cvt_scale_pk16_f32_fp6_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_cvt_scale_pk16_f32_fp6 v[6:21], v[0:2], v3 scale_sel:5 +; GFX1250-SDAG-NEXT: s_clause 0x3 +; GFX1250-SDAG-NEXT: global_store_b128 v[4:5], v[18:21], off offset:48 +; GFX1250-SDAG-NEXT: global_store_b128 v[4:5], v[14:17], off offset:32 +; GFX1250-SDAG-NEXT: global_store_b128 v[4:5], v[10:13], off offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v[4:5], v[6:9], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_cvt_scale_pk16_f32_fp6_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_cvt_scale_pk16_f32_fp6 v[6:21], v[0:2], v3 scale_sel:5 +; GFX1250-GISEL-NEXT: s_clause 0x3 +; GFX1250-GISEL-NEXT: global_store_b128 v[4:5], v[6:9], off +; GFX1250-GISEL-NEXT: global_store_b128 v[4:5], v[10:13], off offset:16 +; GFX1250-GISEL-NEXT: global_store_b128 v[4:5], v[14:17], off offset:32 +; GFX1250-GISEL-NEXT: global_store_b128 v[4:5], v[18:21], off offset:48 +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <16 x float> @llvm.amdgcn.cvt.scale.pk16.f32.fp6(<3 x i32> %src, i32 %scale, i32 5) + store <16 x float> %cvt, ptr addrspace(1) %out, align 16 + ret void +} + +define amdgpu_ps void @test_cvt_scale_pk16_f32_bf6_vv(<3 x i32> %src, i32 %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_cvt_scale_pk16_f32_bf6_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_cvt_scale_pk16_f32_bf6 v[6:21], v[0:2], v3 scale_sel:6 +; GFX1250-SDAG-NEXT: s_clause 0x3 +; GFX1250-SDAG-NEXT: global_store_b128 v[4:5], v[18:21], off offset:48 +; GFX1250-SDAG-NEXT: global_store_b128 v[4:5], v[14:17], off offset:32 +; GFX1250-SDAG-NEXT: global_store_b128 v[4:5], v[10:13], off offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v[4:5], v[6:9], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_cvt_scale_pk16_f32_bf6_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_cvt_scale_pk16_f32_bf6 v[6:21], v[0:2], v3 scale_sel:6 +; GFX1250-GISEL-NEXT: s_clause 0x3 +; GFX1250-GISEL-NEXT: global_store_b128 v[4:5], v[6:9], off +; GFX1250-GISEL-NEXT: global_store_b128 v[4:5], v[10:13], off offset:16 +; GFX1250-GISEL-NEXT: global_store_b128 v[4:5], v[14:17], off offset:32 +; GFX1250-GISEL-NEXT: global_store_b128 v[4:5], v[18:21], off offset:48 +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <16 x float> @llvm.amdgcn.cvt.scale.pk16.f32.bf6(<3 x i32> %src, i32 %scale, i32 6) + store <16 x float> %cvt, ptr addrspace(1) %out, align 16 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk16.gfx1250.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk16.gfx1250.ll new file mode 100644 index 0000000..dfb9089 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk16.gfx1250.ll @@ -0,0 +1,303 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250-SDAG %s +; RUN: llc -global-isel=1 -global-isel-abort=2 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250-GISEL %s + +declare <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.bf6.f32(<16 x float> %src, float %scale) +declare <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.fp6.f32(<16 x float> %src, float %scale) +declare <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.bf6.bf16(<16 x bfloat> %src, float %scale) +declare <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.bf6.f16(<16 x half> %src, float %scale) +declare <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.fp6.bf16(<16 x bfloat> %src, float %scale) +declare <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.fp6.f16(<16 x half> %src, float %scale) + +define amdgpu_ps void @test_scalef32_pk16_bf6_f32_vv(<16 x float> %src, float %scale, ptr addrspace(1) %out) { +; GFX1210-SDAG-LABEL: test_scalef32_pk16_bf6_f32_vv: +; GFX1250-SDAG-LABEL: test_scalef32_pk16_bf6_f32_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v23, v18 :: v_dual_mov_b32 v22, v17 +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk16_bf6_f32 v[18:20], v[0:15], v16 +; GFX1250-SDAG-NEXT: global_store_b96 v[22:23], v[18:20], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk16_bf6_f32_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v22, v17 :: v_dual_mov_b32 v23, v18 +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk16_bf6_f32 v[18:20], v[0:15], v16 +; GFX1250-GISEL-NEXT: global_store_b96 v[22:23], v[18:20], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.bf6.f32(<16 x float> %src, float %scale) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk16_bf6_f32_sl(<16 x float> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk16_bf6_f32_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v10, s8 :: v_dual_mov_b32 v11, s9 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v12, s10 :: v_dual_mov_b32 v13, s11 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v14, s12 :: v_dual_mov_b32 v15, s13 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v16, s14 :: v_dual_mov_b32 v17, s15 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk16_bf6_f32 v[18:20], v[2:17], 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b96 v[0:1], v[18:20], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk16_bf6_f32_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk16_bf6_f32 v[18:20], v[2:17], 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b96 v[0:1], v[18:20], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.bf6.f32(<16 x float> %src, float 100.0) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk16_fp6_f32_vv(<16 x float> %src, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk16_fp6_f32_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v23, v18 :: v_dual_mov_b32 v22, v17 +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk16_fp6_f32 v[18:20], v[0:15], v16 +; GFX1250-SDAG-NEXT: global_store_b96 v[22:23], v[18:20], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk16_fp6_f32_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v22, v17 :: v_dual_mov_b32 v23, v18 +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk16_fp6_f32 v[18:20], v[0:15], v16 +; GFX1250-GISEL-NEXT: global_store_b96 v[22:23], v[18:20], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.fp6.f32(<16 x float> %src, float %scale) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk16_fp6_f32_sl(<16 x float> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk16_fp6_f32_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v10, s8 :: v_dual_mov_b32 v11, s9 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v12, s10 :: v_dual_mov_b32 v13, s11 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v14, s12 :: v_dual_mov_b32 v15, s13 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v16, s14 :: v_dual_mov_b32 v17, s15 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk16_fp6_f32 v[18:20], v[2:17], 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b96 v[0:1], v[18:20], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk16_fp6_f32_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk16_fp6_f32 v[18:20], v[2:17], 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b96 v[0:1], v[18:20], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.fp6.f32(<16 x float> %src, float 100.0) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk16_bf6_bf16_vv(<16 x bfloat> %src, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk16_bf6_bf16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v14, v9 +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk16_bf6_bf16 v[10:12], v[0:7], v8 +; GFX1250-SDAG-NEXT: global_store_b96 v[14:15], v[10:12], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk16_bf6_bf16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v14, v9 +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk16_bf6_bf16 v[10:12], v[0:7], v8 +; GFX1250-GISEL-NEXT: global_store_b96 v[14:15], v[10:12], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.bf6.bf16(<16 x bfloat> %src, float %scale) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk16_bf6_bf16_sl(<16 x bfloat> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk16_bf6_bf16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk16_bf6_bf16 v[10:12], v[2:9], 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b96 v[0:1], v[10:12], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk16_bf6_bf16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk16_bf6_bf16 v[10:12], v[2:9], 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b96 v[0:1], v[10:12], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.bf6.bf16(<16 x bfloat> %src, float 100.0) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk16_bf6_f16_vv(<16 x half> %src, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk16_bf6_f16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v14, v9 +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk16_bf6_f16 v[10:12], v[0:7], v8 +; GFX1250-SDAG-NEXT: global_store_b96 v[14:15], v[10:12], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk16_bf6_f16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v14, v9 :: v_dual_mov_b32 v15, v10 +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk16_bf6_f16 v[10:12], v[0:7], v8 +; GFX1250-GISEL-NEXT: global_store_b96 v[14:15], v[10:12], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.bf6.f16(<16 x half> %src, float %scale) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk16_bf6_f16_sl(<16 x half> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk16_bf6_f16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk16_bf6_f16 v[10:12], v[2:9], 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b96 v[0:1], v[10:12], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk16_bf6_f16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk16_bf6_f16 v[10:12], v[2:9], 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b96 v[0:1], v[10:12], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.bf6.f16(<16 x half> %src, float 100.0) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk16_fp6_bf16_vv(<16 x bfloat> %src, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk16_fp6_bf16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v14, v9 +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk16_fp6_bf16 v[10:12], v[0:7], v8 +; GFX1250-SDAG-NEXT: global_store_b96 v[14:15], v[10:12], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk16_fp6_bf16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v14, v9 +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk16_fp6_bf16 v[10:12], v[0:7], v8 +; GFX1250-GISEL-NEXT: global_store_b96 v[14:15], v[10:12], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.fp6.bf16(<16 x bfloat> %src, float %scale) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk16_fp6_bf16_sl(<16 x bfloat> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk16_fp6_bf16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk16_fp6_bf16 v[10:12], v[2:9], 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b96 v[0:1], v[10:12], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk16_fp6_bf16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk16_fp6_bf16 v[10:12], v[2:9], 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b96 v[0:1], v[10:12], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.fp6.bf16(<16 x bfloat> %src, float 100.0) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk16_fp6_f16_vv(<16 x half> %src, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk16_fp6_f16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v14, v9 +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk16_fp6_f16 v[10:12], v[0:7], v8 +; GFX1250-SDAG-NEXT: global_store_b96 v[14:15], v[10:12], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk16_fp6_f16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v14, v9 :: v_dual_mov_b32 v15, v10 +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk16_fp6_f16 v[10:12], v[0:7], v8 +; GFX1250-GISEL-NEXT: global_store_b96 v[14:15], v[10:12], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.fp6.f16(<16 x half> %src, float %scale) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk16_fp6_f16_sl(<16 x half> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk16_fp6_f16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk16_fp6_f16 v[10:12], v[2:9], 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b96 v[0:1], v[10:12], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk16_fp6_f16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk16_fp6_f16 v[10:12], v[2:9], 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b96 v[0:1], v[10:12], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.pk16.fp6.f16(<16 x half> %src, float 100.0) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk8.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk8.ll new file mode 100644 index 0000000..cd0b081 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk8.ll @@ -0,0 +1,403 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250-SDAG %s +; RUN: llc -global-isel=1 -global-isel-abort=2 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250-GISEL %s + +declare <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.fp8.bf16(<8 x bfloat> %src, float %scale) +declare <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.bf8.bf16(<8 x bfloat> %src, float %scale) +declare <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.fp8.f16(<8 x half> %src, float %scale) +declare <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.bf8.f16(<8 x half> %src, float %scale) +declare <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.fp8.f32(<8 x float> %src, float %scale) +declare <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.bf8.f32(<8 x float> %src, float %scale) +declare i32 @llvm.amdgcn.cvt.scalef32.pk8.fp4.f32(<8 x float> %src, float %scale) +declare i32 @llvm.amdgcn.cvt.scalef32.pk8.fp4.f16(<8 x half> %src, float %scale) +declare i32 @llvm.amdgcn.cvt.scalef32.pk8.fp4.bf16(<8 x bfloat> %src, float %scale) + +define amdgpu_ps void @test_scalef32_pk8_fp8_bf16_vv(<8 x bfloat> %src, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_fp8_bf16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v6, v5 +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_fp8_bf16 v[8:9], v[0:3], v4 +; GFX1250-SDAG-NEXT: global_store_b64 v[6:7], v[8:9], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_fp8_bf16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v6, v5 +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_fp8_bf16 v[8:9], v[0:3], v4 +; GFX1250-GISEL-NEXT: global_store_b64 v[6:7], v[8:9], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.fp8.bf16(<8 x bfloat> %src, float %scale) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_fp8_bf16_sl(<8 x bfloat> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_fp8_bf16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_fp8_bf16 v[6:7], v[2:5], 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_fp8_bf16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_fp8_bf16 v[6:7], v[2:5], 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.fp8.bf16(<8 x bfloat> %src, float 100.0) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_bf8_bf16_vv(<8 x bfloat> %src, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_bf8_bf16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v6, v5 +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_bf8_bf16 v[8:9], v[0:3], v4 +; GFX1250-SDAG-NEXT: global_store_b64 v[6:7], v[8:9], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_bf8_bf16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v6, v5 +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_bf8_bf16 v[8:9], v[0:3], v4 +; GFX1250-GISEL-NEXT: global_store_b64 v[6:7], v[8:9], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.bf8.bf16(<8 x bfloat> %src, float %scale) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_bf8_bf16_sl(<8 x bfloat> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_bf8_bf16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_bf8_bf16 v[6:7], v[2:5], 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_bf8_bf16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_bf8_bf16 v[6:7], v[2:5], 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.bf8.bf16(<8 x bfloat> %src, float 100.0) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_fp8_f16_vv(<8 x half> %src, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_fp8_f16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v6, v5 +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_fp8_f16 v[8:9], v[0:3], v4 +; GFX1250-SDAG-NEXT: global_store_b64 v[6:7], v[8:9], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_fp8_f16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v9, v6 +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_fp8_f16 v[6:7], v[0:3], v4 +; GFX1250-GISEL-NEXT: global_store_b64 v[8:9], v[6:7], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.fp8.f16(<8 x half> %src, float %scale) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_fp8_f16_sl(<8 x half> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_fp8_f16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_fp8_f16 v[6:7], v[2:5], 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_fp8_f16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_fp8_f16 v[6:7], v[2:5], 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.fp8.f16(<8 x half> %src, float 100.0) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_bf8_f16_vv(<8 x half> %src, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_bf8_f16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v6, v5 +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_bf8_f16 v[8:9], v[0:3], v4 +; GFX1250-SDAG-NEXT: global_store_b64 v[6:7], v[8:9], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_bf8_f16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v9, v6 +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_bf8_f16 v[6:7], v[0:3], v4 +; GFX1250-GISEL-NEXT: global_store_b64 v[8:9], v[6:7], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.bf8.f16(<8 x half> %src, float %scale) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_bf8_f16_sl(<8 x half> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_bf8_f16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_bf8_f16 v[6:7], v[2:5], 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_bf8_f16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_bf8_f16 v[6:7], v[2:5], 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.bf8.f16(<8 x half> %src, float 100.0) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_bf8_f32_vv(<8 x float> %src, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_bf8_f32_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v11, v10 :: v_dual_mov_b32 v10, v9 +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_bf8_f32 v[12:13], v[0:7], v8 +; GFX1250-SDAG-NEXT: global_store_b64 v[10:11], v[12:13], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_bf8_f32_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v12, v9 :: v_dual_mov_b32 v13, v10 +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_bf8_f32 v[10:11], v[0:7], v8 +; GFX1250-GISEL-NEXT: global_store_b64 v[12:13], v[10:11], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.bf8.f32(<8 x float> %src, float %scale) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_bf8_f32_sl(<8 x float> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_bf8_f32_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_bf8_f32 v[10:11], v[2:9], 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b64 v[0:1], v[10:11], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_bf8_f32_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_bf8_f32 v[10:11], v[2:9], 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b64 v[0:1], v[10:11], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.bf8.f32(<8 x float> %src, float 100.0) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_fp8_f32_vv(<8 x float> %src, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_fp8_f32_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v11, v10 :: v_dual_mov_b32 v10, v9 +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_fp8_f32 v[12:13], v[0:7], v8 +; GFX1250-SDAG-NEXT: global_store_b64 v[10:11], v[12:13], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_fp8_f32_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v12, v9 :: v_dual_mov_b32 v13, v10 +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_fp8_f32 v[10:11], v[0:7], v8 +; GFX1250-GISEL-NEXT: global_store_b64 v[12:13], v[10:11], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.fp8.f32(<8 x float> %src, float %scale) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_fp8_f32_sl(<8 x float> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_fp8_f32_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_fp8_f32 v[10:11], v[2:9], 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b64 v[0:1], v[10:11], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_fp8_f32_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_fp8_f32 v[10:11], v[2:9], 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b64 v[0:1], v[10:11], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.fp8.f32(<8 x float> %src, float 100.0) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_fp4_f32_vv(<8 x float> %src, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_fp4_f32_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v11, v10 :: v_dual_mov_b32 v10, v9 +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_fp4_f32 v9, v[0:7], v8 +; GFX1250-SDAG-NEXT: global_store_b32 v[10:11], v9, off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_fp4_f32_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v12, v9 :: v_dual_mov_b32 v13, v10 +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_fp4_f32 v9, v[0:7], v8 +; GFX1250-GISEL-NEXT: global_store_b32 v[12:13], v9, off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call i32 @llvm.amdgcn.cvt.scalef32.pk8.fp4.f32(<8 x float> %src, float %scale) + store i32 %cvt, ptr addrspace(1) %out, align 4 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_fp4_f32_sl(<8 x float> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_fp4_f32_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_fp4_f32 v10, v[2:9], 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b32 v[0:1], v10, off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_fp4_f32_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_fp4_f32 v10, v[2:9], 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b32 v[0:1], v10, off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call i32 @llvm.amdgcn.cvt.scalef32.pk8.fp4.f32(<8 x float> %src, float 100.0) + store i32 %cvt, ptr addrspace(1) %out, align 4 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_fp4_f16_vv(<8 x half> %src, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_fp4_f16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v6, v5 +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_fp4_f16 v5, v[0:3], v4 +; GFX1250-SDAG-NEXT: global_store_b32 v[6:7], v5, off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_fp4_f16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v9, v6 +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_fp4_f16 v5, v[0:3], v4 +; GFX1250-GISEL-NEXT: global_store_b32 v[8:9], v5, off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call i32 @llvm.amdgcn.cvt.scalef32.pk8.fp4.f16(<8 x half> %src, float %scale) + store i32 %cvt, ptr addrspace(1) %out, align 4 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_fp4_f16_sl(<8 x half> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_fp4_f16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_fp4_f16 v6, v[2:5], 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b32 v[0:1], v6, off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_fp4_f16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_fp4_f16 v6, v[2:5], 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b32 v[0:1], v6, off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call i32 @llvm.amdgcn.cvt.scalef32.pk8.fp4.f16(<8 x half> %src, float 100.0) + store i32 %cvt, ptr addrspace(1) %out, align 4 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_fp4_bf16_vv(<8 x bfloat> %src, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_fp4_bf16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v6, v5 +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_fp4_bf16 v5, v[0:3], v4 +; GFX1250-SDAG-NEXT: global_store_b32 v[6:7], v5, off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_fp4_bf16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v6, v5 +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_fp4_bf16 v5, v[0:3], v4 +; GFX1250-GISEL-NEXT: global_store_b32 v[6:7], v5, off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call i32 @llvm.amdgcn.cvt.scalef32.pk8.fp4.bf16(<8 x bfloat> %src, float %scale) + store i32 %cvt, ptr addrspace(1) %out, align 4 + ret void +} + +define amdgpu_ps void @test_scalef32_pk8_fp4_bf16_sl(<8 x bfloat> inreg %src, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_pk8_fp4_bf16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_pk8_fp4_bf16 v6, v[2:5], 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b32 v[0:1], v6, off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_pk8_fp4_bf16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_pk8_fp4_bf16 v6, v[2:5], 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b32 v[0:1], v6, off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call i32 @llvm.amdgcn.cvt.scalef32.pk8.fp4.bf16(<8 x bfloat> %src, float 100.0) + store i32 %cvt, ptr addrspace(1) %out, align 4 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.sr.pk.gfx1250.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.sr.pk.gfx1250.ll new file mode 100644 index 0000000..d33acf6 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.sr.pk.gfx1250.ll @@ -0,0 +1,385 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250-SDAG %s +; RUN: llc -global-isel=1 -global-isel-abort=2 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250-GISEL %s + +declare <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.fp8.bf16(<8 x bfloat> %src, i32 %sr, float %scale) +declare <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.bf8.bf16(<8 x bfloat> %src, i32 %sr, float %scale) +declare <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.fp8.f16(<8 x half> %src, i32 %sr, float %scale) +declare <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.bf8.f16(<8 x half> %src, i32 %sr, float %scale) +declare <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.fp8.f32(<8 x float> %src, i32 %sr, float %scale) +declare <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.bf8.f32(<8 x float> %src, i32 %sr, float %scale) +declare i32 @llvm.amdgcn.cvt.scalef32.sr.pk8.fp4.f32(<8 x float> %src, i32 %sr, float %scale) +declare i32 @llvm.amdgcn.cvt.scalef32.sr.pk8.fp4.f16(<8 x half> %src, i32 %sr, float %scale) +declare i32 @llvm.amdgcn.cvt.scalef32.sr.pk8.fp4.bf16(<8 x bfloat> %src, i32 %sr, float %scale) + +define amdgpu_ps void @test_scalef32_sr_pk8_fp8_bf16_vv(<8 x bfloat> %src, i32 %sr, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_fp8_bf16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_fp8_bf16 v[8:9], v[0:3], v4, v5 +; GFX1250-SDAG-NEXT: global_store_b64 v[6:7], v[8:9], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_fp8_bf16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_fp8_bf16 v[8:9], v[0:3], v4, v5 +; GFX1250-GISEL-NEXT: global_store_b64 v[6:7], v[8:9], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.fp8.bf16(<8 x bfloat> %src, i32 %sr, float %scale) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_fp8_bf16_sl(<8 x bfloat> inreg %src, i32 inreg %sr, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_fp8_bf16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_fp8_bf16 v[6:7], v[2:5], s4, 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_fp8_bf16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_fp8_bf16 v[6:7], v[2:5], s4, 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.fp8.bf16(<8 x bfloat> %src, i32 %sr, float 100.0) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_bf8_bf16_vv(<8 x bfloat> %src, i32 %sr, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_bf8_bf16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_bf8_bf16 v[8:9], v[0:3], v4, v5 +; GFX1250-SDAG-NEXT: global_store_b64 v[6:7], v[8:9], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_bf8_bf16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_bf8_bf16 v[8:9], v[0:3], v4, v5 +; GFX1250-GISEL-NEXT: global_store_b64 v[6:7], v[8:9], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.bf8.bf16(<8 x bfloat> %src, i32 %sr, float %scale) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_bf8_bf16_sl(<8 x bfloat> inreg %src, i32 inreg %sr, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_bf8_bf16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_bf8_bf16 v[6:7], v[2:5], s4, 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_bf8_bf16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_bf8_bf16 v[6:7], v[2:5], s4, 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.bf8.bf16(<8 x bfloat> %src, i32 %sr, float 100.0) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_fp8_f16_vv(<8 x half> %src, i32 %sr, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_fp8_f16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_fp8_f16 v[8:9], v[0:3], v4, v5 +; GFX1250-SDAG-NEXT: global_store_b64 v[6:7], v[8:9], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_fp8_f16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_fp8_f16 v[8:9], v[0:3], v4, v5 +; GFX1250-GISEL-NEXT: global_store_b64 v[6:7], v[8:9], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.fp8.f16(<8 x half> %src, i32 %sr, float %scale) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_fp8_f16_sl(<8 x half> inreg %src, i32 inreg %sr, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_fp8_f16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_fp8_f16 v[6:7], v[2:5], s4, 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_fp8_f16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_fp8_f16 v[6:7], v[2:5], s4, 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.fp8.f16(<8 x half> %src, i32 %sr, float 100.0) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_bf8_f16_vv(<8 x half> %src, i32 %sr, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_bf8_f16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_bf8_f16 v[8:9], v[0:3], v4, v5 +; GFX1250-SDAG-NEXT: global_store_b64 v[6:7], v[8:9], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_bf8_f16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_bf8_f16 v[8:9], v[0:3], v4, v5 +; GFX1250-GISEL-NEXT: global_store_b64 v[6:7], v[8:9], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.bf8.f16(<8 x half> %src, i32 %sr, float %scale) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_bf8_f16_sl(<8 x half> inreg %src, i32 inreg %sr, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_bf8_f16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_bf8_f16 v[6:7], v[2:5], s4, 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_bf8_f16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_bf8_f16 v[6:7], v[2:5], s4, 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b64 v[0:1], v[6:7], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.bf8.f16(<8 x half> %src, i32 %sr, float 100.0) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_bf8_f32_vv(<8 x float> %src, i32 %sr, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_bf8_f32_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_bf8_f32 v[12:13], v[0:7], v8, v9 +; GFX1250-SDAG-NEXT: global_store_b64 v[10:11], v[12:13], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_bf8_f32_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_bf8_f32 v[12:13], v[0:7], v8, v9 +; GFX1250-GISEL-NEXT: global_store_b64 v[10:11], v[12:13], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.bf8.f32(<8 x float> %src, i32 %sr, float %scale) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_bf8_f32_sl(<8 x float> inreg %src, i32 inreg %sr, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_bf8_f32_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_bf8_f32 v[10:11], v[2:9], s8, 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b64 v[0:1], v[10:11], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_bf8_f32_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_bf8_f32 v[10:11], v[2:9], s8, 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b64 v[0:1], v[10:11], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.bf8.f32(<8 x float> %src, i32 %sr, float 100.0) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_fp8_f32_vv(<8 x float> %src, i32 %sr, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_fp8_f32_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_fp8_f32 v[12:13], v[0:7], v8, v9 +; GFX1250-SDAG-NEXT: global_store_b64 v[10:11], v[12:13], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_fp8_f32_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_fp8_f32 v[12:13], v[0:7], v8, v9 +; GFX1250-GISEL-NEXT: global_store_b64 v[10:11], v[12:13], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.fp8.f32(<8 x float> %src, i32 %sr, float %scale) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_fp8_f32_sl(<8 x float> inreg %src, i32 inreg %sr, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_fp8_f32_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_fp8_f32 v[10:11], v[2:9], s8, 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b64 v[0:1], v[10:11], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_fp8_f32_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_fp8_f32 v[10:11], v[2:9], s8, 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b64 v[0:1], v[10:11], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <2 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk8.fp8.f32(<8 x float> %src, i32 %sr, float 100.0) + store <2 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_fp4_f32_vv(<8 x float> %src, i32 %sr, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_fp4_f32_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_fp4_f32 v12, v[0:7], v8, v9 +; GFX1250-SDAG-NEXT: global_store_b32 v[10:11], v12, off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_fp4_f32_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_fp4_f32 v12, v[0:7], v8, v9 +; GFX1250-GISEL-NEXT: global_store_b32 v[10:11], v12, off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call i32 @llvm.amdgcn.cvt.scalef32.sr.pk8.fp4.f32(<8 x float> %src, i32 %sr, float %scale) + store i32 %cvt, ptr addrspace(1) %out, align 4 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_fp4_f32_sl(<8 x float> inreg %src, i32 inreg %sr, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_fp4_f32_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_fp4_f32 v10, v[2:9], s8, 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b32 v[0:1], v10, off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_fp4_f32_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_fp4_f32 v10, v[2:9], s8, 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b32 v[0:1], v10, off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call i32 @llvm.amdgcn.cvt.scalef32.sr.pk8.fp4.f32(<8 x float> %src, i32 %sr, float 100.0) + store i32 %cvt, ptr addrspace(1) %out, align 4 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_fp4_f16_vv(<8 x half> %src, i32 %sr, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_fp4_f16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_fp4_f16 v8, v[0:3], v4, v5 +; GFX1250-SDAG-NEXT: global_store_b32 v[6:7], v8, off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_fp4_f16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_fp4_f16 v8, v[0:3], v4, v5 +; GFX1250-GISEL-NEXT: global_store_b32 v[6:7], v8, off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call i32 @llvm.amdgcn.cvt.scalef32.sr.pk8.fp4.f16(<8 x half> %src, i32 %sr, float %scale) + store i32 %cvt, ptr addrspace(1) %out, align 4 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_fp4_f16_sl(<8 x half> inreg %src, i32 inreg %sr, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_fp4_f16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_fp4_f16 v6, v[2:5], s4, 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b32 v[0:1], v6, off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_fp4_f16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_fp4_f16 v6, v[2:5], s4, 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b32 v[0:1], v6, off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call i32 @llvm.amdgcn.cvt.scalef32.sr.pk8.fp4.f16(<8 x half> %src, i32 %sr, float 100.0) + store i32 %cvt, ptr addrspace(1) %out, align 4 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_fp4_bf16_vv(<8 x bfloat> %src, i32 %sr, float %scale, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_fp4_bf16_vv: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_fp4_bf16 v8, v[0:3], v4, v5 +; GFX1250-SDAG-NEXT: global_store_b32 v[6:7], v8, off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_fp4_bf16_vv: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_fp4_bf16 v8, v[0:3], v4, v5 +; GFX1250-GISEL-NEXT: global_store_b32 v[6:7], v8, off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call i32 @llvm.amdgcn.cvt.scalef32.sr.pk8.fp4.bf16(<8 x bfloat> %src, i32 %sr, float %scale) + store i32 %cvt, ptr addrspace(1) %out, align 4 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk8_fp4_bf16_sl(<8 x bfloat> inreg %src, i32 inreg %sr, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk8_fp4_bf16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk8_fp4_bf16 v6, v[2:5], s4, 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b32 v[0:1], v6, off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk8_fp4_bf16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk8_fp4_bf16 v6, v[2:5], s4, 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b32 v[0:1], v6, off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call i32 @llvm.amdgcn.cvt.scalef32.sr.pk8.fp4.bf16(<8 x bfloat> %src, i32 %sr, float 100.0) + store i32 %cvt, ptr addrspace(1) %out, align 4 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.sr.pk16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.sr.pk16.ll new file mode 100644 index 0000000..c439518 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.sr.pk16.ll @@ -0,0 +1,232 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s +; RUN: llc -global-isel=1 -global-isel-abort=2 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s + +declare <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.bf6.bf16(<16 x bfloat> %src, i32 %sr, float %scale) +declare <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.bf6.f16(<16 x half> %src, i32 %sr, float %scale) +declare <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.bf6.f32(<16 x float> %src, i32 %sr, float %scale) +declare <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.fp6.bf16(<16 x bfloat> %src, i32 %sr, float %scale) +declare <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.fp6.f16(<16 x half> %src, i32 %sr, float %scale) +declare <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.fp6.f32(<16 x float> %src, i32 %sr, float %scale) + +define amdgpu_ps void @test_scalef32_sr_pk16_bf6_bf16_vv(<16 x bfloat> %src, i32 %sr, float %scale, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_scalef32_sr_pk16_bf6_bf16_vv: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_cvt_scalef32_sr_pk16_bf6_bf16 v[12:14], v[0:7], v8, v9 +; GFX1250-NEXT: global_store_b96 v[10:11], v[12:14], off +; GFX1250-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.bf6.bf16(<16 x bfloat> %src, i32 %sr, float %scale) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk16_bf6_bf16_sl(<16 x bfloat> inreg %src, i32 inreg %sr, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_scalef32_sr_pk16_bf6_bf16_sl: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_scalef32_sr_pk16_bf6_bf16 v[10:12], v[2:9], s8, 0x42c80000 +; GFX1250-NEXT: global_store_b96 v[0:1], v[10:12], off +; GFX1250-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.bf6.bf16(<16 x bfloat> %src, i32 %sr, float 100.0) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk16_bf6_f16_vv(<16 x half> %src, i32 %sr, float %scale, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_scalef32_sr_pk16_bf6_f16_vv: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_cvt_scalef32_sr_pk16_bf6_f16 v[12:14], v[0:7], v8, v9 +; GFX1250-NEXT: global_store_b96 v[10:11], v[12:14], off +; GFX1250-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.bf6.f16(<16 x half> %src, i32 %sr, float %scale) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk16_bf6_f16_sl(<16 x half> inreg %src, i32 inreg %sr, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk16_bf6_f16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk16_bf6_f16 v[10:12], v[2:9], s8, 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b96 v[0:1], v[10:12], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk16_bf6_f16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk16_bf6_f16 v[10:12], v[2:9], s8, 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b96 v[0:1], v[10:12], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.bf6.f16(<16 x half> %src, i32 %sr, float 100.0) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk16_fp6_bf16_vv(<16 x bfloat> %src, i32 %sr, float %scale, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_scalef32_sr_pk16_fp6_bf16_vv: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_cvt_scalef32_sr_pk16_fp6_bf16 v[12:14], v[0:7], v8, v9 +; GFX1250-NEXT: global_store_b96 v[10:11], v[12:14], off +; GFX1250-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.fp6.bf16(<16 x bfloat> %src, i32 %sr, float %scale) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk16_fp6_bf16_sl(<16 x bfloat> inreg %src, i32 inreg %sr, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_scalef32_sr_pk16_fp6_bf16_sl: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_scalef32_sr_pk16_fp6_bf16 v[10:12], v[2:9], s8, 0x42c80000 +; GFX1250-NEXT: global_store_b96 v[0:1], v[10:12], off +; GFX1250-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.fp6.bf16(<16 x bfloat> %src, i32 %sr, float 100.0) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk16_fp6_f16_vv(<16 x half> %src, i32 %sr, float %scale, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_scalef32_sr_pk16_fp6_f16_vv: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_cvt_scalef32_sr_pk16_fp6_f16 v[12:14], v[0:7], v8, v9 +; GFX1250-NEXT: global_store_b96 v[10:11], v[12:14], off +; GFX1250-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.fp6.f16(<16 x half> %src, i32 %sr, float %scale) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk16_fp6_f16_sl(<16 x half> inreg %src, i32 inreg %sr, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk16_fp6_f16_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk16_fp6_f16 v[10:12], v[2:9], s8, 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b96 v[0:1], v[10:12], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk16_fp6_f16_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk16_fp6_f16 v[10:12], v[2:9], s8, 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b96 v[0:1], v[10:12], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.fp6.f16(<16 x half> %src, i32 %sr, float 100.0) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk16_bf6_f32_vv(<16 x float> %src, i32 %sr, float %scale, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_scalef32_sr_pk16_bf6_f32_vv: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_cvt_scalef32_sr_pk16_bf6_f32 v[20:22], v[0:15], v16, v17 +; GFX1250-NEXT: global_store_b96 v[18:19], v[20:22], off +; GFX1250-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.bf6.f32(<16 x float> %src, i32 %sr, float %scale) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk16_bf6_f32_sl(<16 x float> inreg %src, i32 inreg %sr, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk16_bf6_f32_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v10, s8 :: v_dual_mov_b32 v11, s9 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v12, s10 :: v_dual_mov_b32 v13, s11 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v14, s12 :: v_dual_mov_b32 v15, s13 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v16, s14 :: v_dual_mov_b32 v17, s15 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk16_bf6_f32 v[18:20], v[2:17], s16, 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b96 v[0:1], v[18:20], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk16_bf6_f32_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk16_bf6_f32 v[18:20], v[2:17], s16, 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b96 v[0:1], v[18:20], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.bf6.f32(<16 x float> %src, i32 %sr, float 100.0) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk16_fp6_f32_vv(<16 x float> %src, i32 %sr, float %scale, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_scalef32_sr_pk16_fp6_f32_vv: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_cvt_scalef32_sr_pk16_fp6_f32 v[20:22], v[0:15], v16, v17 +; GFX1250-NEXT: global_store_b96 v[18:19], v[20:22], off +; GFX1250-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.fp6.f32(<16 x float> %src, i32 %sr, float %scale) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} + +define amdgpu_ps void @test_scalef32_sr_pk16_fp6_f32_sl(<16 x float> inreg %src, i32 inreg %sr, ptr addrspace(1) %out) { +; GFX1250-SDAG-LABEL: test_scalef32_sr_pk16_fp6_f32_sl: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v9, s7 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v10, s8 :: v_dual_mov_b32 v11, s9 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v12, s10 :: v_dual_mov_b32 v13, s11 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v14, s12 :: v_dual_mov_b32 v15, s13 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v16, s14 :: v_dual_mov_b32 v17, s15 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cvt_scalef32_sr_pk16_fp6_f32 v[18:20], v[2:17], s16, 0x42c80000 +; GFX1250-SDAG-NEXT: global_store_b96 v[0:1], v[18:20], off +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: test_scalef32_sr_pk16_fp6_f32_sl: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[16:17], s[14:15] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[14:15], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[12:13], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[10:11], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[8:9], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cvt_scalef32_sr_pk16_fp6_f32 v[18:20], v[2:17], s16, 0x42c80000 +; GFX1250-GISEL-NEXT: global_store_b96 v[0:1], v[18:20], off +; GFX1250-GISEL-NEXT: s_endpgm + %cvt = tail call <3 x i32> @llvm.amdgcn.cvt.scalef32.sr.pk16.fp6.f32(<16 x float> %src, i32 %sr, float 100.0) + store <3 x i32> %cvt, ptr addrspace(1) %out, align 8 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.perm.pk.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.perm.pk.ll new file mode 100644 index 0000000..d2f96c4 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.perm.pk.ll @@ -0,0 +1,66 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s + +declare <2 x i32> @llvm.amdgcn.perm.pk16.b4.u4(i32, i32, <2 x i32>) +declare <3 x i32> @llvm.amdgcn.perm.pk16.b6.u4(i32, i64, <2 x i32>) +declare <4 x i32> @llvm.amdgcn.perm.pk16.b8.u4(i64, i64, <2 x i32>) + +define void @test_perm_pk16_b4_u4(i32 %a, i32 %b, <2 x i32> %c, ptr %out) { +; GFX1250-LABEL: test_perm_pk16_b4_u4: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_perm_pk16_b4_u4 v[0:1], v0, v1, v[2:3] +; GFX1250-NEXT: flat_store_b64 v[4:5], v[0:1] scope:SCOPE_SE +; GFX1250-NEXT: s_wait_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call <2 x i32> @llvm.amdgcn.perm.pk16.b4.u4(i32 %a, i32 %b, <2 x i32> %c) + store <2 x i32> %ret, ptr %out, align 8 + ret void +} + +define void @test_perm_pk16_b6_u4(i32 %a, i64 %b, <2 x i32> %c, ptr %out) { +; GFX1250-SDAG-LABEL: test_perm_pk16_b6_u4: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v9, v4 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v3, v2 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_mov_b32 v6, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_perm_pk16_b6_u4 v[0:2], v0, v[2:3], v[8:9] +; GFX1250-SDAG-NEXT: flat_store_b96 v[6:7], v[0:2] scope:SCOPE_SE +; GFX1250-SDAG-NEXT: s_wait_dscnt 0x0 +; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31] +; +; GFX1250-GISEL-LABEL: test_perm_pk16_b6_u4: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, v3 :: v_dual_mov_b32 v3, v4 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v5 :: v_dual_mov_b32 v5, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_perm_pk16_b6_u4 v[0:2], v0, v[8:9], v[2:3] +; GFX1250-GISEL-NEXT: flat_store_b96 v[4:5], v[0:2] scope:SCOPE_SE +; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 +; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call <3 x i32> @llvm.amdgcn.perm.pk16.b6.u4(i32 %a, i64 %b, <2 x i32> %c) + store <3 x i32> %ret, ptr %out, align 16 + ret void +} + +define void @test_perm_pk16_b8_u4(i64 %a, i64 %b, <2 x i32> %c, ptr %out) { +; GFX1250-LABEL: test_perm_pk16_b8_u4: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_perm_pk16_b8_u4 v[0:3], v[0:1], v[2:3], v[4:5] +; GFX1250-NEXT: flat_store_b128 v[6:7], v[0:3] scope:SCOPE_SE +; GFX1250-NEXT: s_wait_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %ret = tail call <4 x i32> @llvm.amdgcn.perm.pk16.b8.u4(i64 %a, i64 %b, <2 x i32> %c) + store <4 x i32> %ret, ptr %out, align 16 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.gfx1250.w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.gfx1250.w32.ll index 9149ed5..1bf865c 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.gfx1250.w32.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.gfx1250.w32.ll @@ -854,6 +854,1202 @@ bb: ret void } +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v40, v41 matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[42:43], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[42:43], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v40, v41 matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[42:43], v[32:35], off +; GISEL-NEXT: global_store_b128 v[42:43], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 1, <16 x i32> %A, i32 2, <16 x i32> %B, i16 0, <8 x float> %C, i32 1, i32 0, i32 %scale_src0, i32 1, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_ss(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, i32 inreg %scale_src0, i32 inreg %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_ss: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], s0, s1 matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_ss: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], s0, s1 matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 1, <16 x i32> %A, i32 2, <16 x i32> %B, i16 0, <8 x float> %C, i32 2, i32 1, i32 %scale_src0, i32 1, i32 2, i32 %scale_src1, i1 true, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_si_scale(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, i32 inreg %scale_src0, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_si_scale: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: s_movk_i32 s1, 0x64 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], s0, s1 matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_si_scale: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_mov_b32_e32 v42, 0x64 +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], s0, v42 matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32i.v16i32(i32 1, <16 x i32> %A, i32 2, <16 x i32> %B, i16 0, <8 x float> %C, i32 3, i32 2, i32 %scale_src0, i32 0, i32 1, i32 100, i1 false, i1 true) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_fp8_bf8(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp8_bf8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v40, v41 matrix_b_fmt:MATRIX_FMT_BF8 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[42:43], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[42:43], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp8_bf8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v40, v41 matrix_b_fmt:MATRIX_FMT_BF8 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[42:43], v[32:35], off +; GISEL-NEXT: global_store_b128 v[42:43], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 0, <16 x i32> %A, i32 1, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_fp8_fp6(<16 x i32> %A, <12 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp8_fp6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v36, v37 matrix_b_fmt:MATRIX_FMT_FP6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[38:39], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp8_fp6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v36, v37 matrix_b_fmt:MATRIX_FMT_FP6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[38:39], v[28:31], off +; GISEL-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v12i32(i32 0, <16 x i32> %A, i32 2, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_fp8_bf6(<16 x i32> %A, <12 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp8_bf6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v36, v37 matrix_b_fmt:MATRIX_FMT_BF6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[38:39], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp8_bf6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v36, v37 matrix_b_fmt:MATRIX_FMT_BF6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[38:39], v[28:31], off +; GISEL-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v12i32(i32 0, <16 x i32> %A, i32 3, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_fp8_fp4(<16 x i32> %A, <8 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp8_fp4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:15], v[16:23], v[24:31], v32, v33 matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[34:35], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp8_fp4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:15], v[16:23], v[24:31], v32, v33 matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[34:35], v[24:27], off +; GISEL-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v8i32(i32 0, <16 x i32> %A, i32 4, <8 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_bf8_fp8(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf8_fp8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v40, v41 matrix_a_fmt:MATRIX_FMT_BF8 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[42:43], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[42:43], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf8_fp8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v40, v41 matrix_a_fmt:MATRIX_FMT_BF8 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[42:43], v[32:35], off +; GISEL-NEXT: global_store_b128 v[42:43], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 1, <16 x i32> %A, i32 0, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_bf8_bf8(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf8_bf8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v40, v41 matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_BF8 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[42:43], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[42:43], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf8_bf8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v40, v41 matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_BF8 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[42:43], v[32:35], off +; GISEL-NEXT: global_store_b128 v[42:43], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 1, <16 x i32> %A, i32 1, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_bf8_fp6(<16 x i32> %A, <12 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf8_fp6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v36, v37 matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[38:39], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf8_fp6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v36, v37 matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[38:39], v[28:31], off +; GISEL-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v12i32(i32 1, <16 x i32> %A, i32 2, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_bf8_bf6(<16 x i32> %A, <12 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf8_bf6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v36, v37 matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_BF6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[38:39], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf8_bf6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v36, v37 matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_BF6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[38:39], v[28:31], off +; GISEL-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v12i32(i32 1, <16 x i32> %A, i32 3, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_bf8_fp4(<16 x i32> %A, <8 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf8_fp4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:15], v[16:23], v[24:31], v32, v33 matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[34:35], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf8_fp4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:15], v[16:23], v[24:31], v32, v33 matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[34:35], v[24:27], off +; GISEL-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v8i32(i32 1, <16 x i32> %A, i32 4, <8 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_fp6_fp8(<12 x i32> %A, <16 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp6_fp8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v36, v37 matrix_a_fmt:MATRIX_FMT_FP6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[38:39], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp6_fp8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v36, v37 matrix_a_fmt:MATRIX_FMT_FP6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[38:39], v[28:31], off +; GISEL-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v12i32.v16i32(i32 2, <12 x i32> %A, i32 0, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_fp6_bf8(<12 x i32> %A, <16 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp6_bf8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v36, v37 matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_BF8 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[38:39], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp6_bf8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v36, v37 matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_BF8 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[38:39], v[28:31], off +; GISEL-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v12i32.v16i32(i32 2, <12 x i32> %A, i32 1, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_fp6_fp6(<12 x i32> %A, <12 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp6_fp6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v32, v33 matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_FP6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[34:35], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp6_fp6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v32, v33 matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_FP6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[34:35], v[24:27], off +; GISEL-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v12i32.v12i32(i32 2, <12 x i32> %A, i32 2, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_fp6_bf6(<12 x i32> %A, <12 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp6_bf6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v32, v33 matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[34:35], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp6_bf6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v32, v33 matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[34:35], v[24:27], off +; GISEL-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v12i32.v12i32(i32 2, <12 x i32> %A, i32 4, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_fp6_fp4(<12 x i32> %A, <8 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp6_fp4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[20:27], v[0:11], v[12:19], v[20:27], v28, v29 matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[30:31], v[24:27], off offset:16 +; GFX1250-NEXT: global_store_b128 v[30:31], v[20:23], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp6_fp4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[20:27], v[0:11], v[12:19], v[20:27], v28, v29 matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[30:31], v[20:23], off +; GISEL-NEXT: global_store_b128 v[30:31], v[24:27], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v12i32.v8i32(i32 2, <12 x i32> %A, i32 4, <8 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_bf6_fp8(<12 x i32> %A, <16 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf6_fp8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v36, v37 matrix_a_fmt:MATRIX_FMT_BF6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[38:39], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf6_fp8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v36, v37 matrix_a_fmt:MATRIX_FMT_BF6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[38:39], v[28:31], off +; GISEL-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v12i32.v16i32(i32 3, <12 x i32> %A, i32 0, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_bf6_bf8(<12 x i32> %A, <16 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf6_bf8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v36, v37 matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_BF8 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[38:39], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf6_bf8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v36, v37 matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_BF8 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[38:39], v[28:31], off +; GISEL-NEXT: global_store_b128 v[38:39], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v12i32.v16i32(i32 3, <12 x i32> %A, i32 1, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_bf6_fp6(<12 x i32> %A, <12 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf6_fp6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v32, v33 matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_FP6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[34:35], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf6_fp6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v32, v33 matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_FP6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[34:35], v[24:27], off +; GISEL-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v12i32.v12i32(i32 3, <12 x i32> %A, i32 2, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_bf6_bf6(<12 x i32> %A, <12 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf6_bf6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v32, v33 matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[34:35], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf6_bf6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v32, v33 matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[34:35], v[24:27], off +; GISEL-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v12i32.v12i32(i32 3, <12 x i32> %A, i32 4, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_bf6_fp4(<12 x i32> %A, <8 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf6_fp4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[20:27], v[0:11], v[12:19], v[20:27], v28, v29 matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[30:31], v[24:27], off offset:16 +; GFX1250-NEXT: global_store_b128 v[30:31], v[20:23], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_bf6_fp4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[20:27], v[0:11], v[12:19], v[20:27], v28, v29 matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[30:31], v[20:23], off +; GISEL-NEXT: global_store_b128 v[30:31], v[24:27], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v12i32.v8i32(i32 3, <12 x i32> %A, i32 4, <8 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_fp4_fp8(<8 x i32> %A, <16 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp4_fp8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:7], v[8:23], v[24:31], v32, v33 matrix_a_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[34:35], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp4_fp8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:7], v[8:23], v[24:31], v32, v33 matrix_a_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[34:35], v[24:27], off +; GISEL-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v8i32.v16i32(i32 4, <8 x i32> %A, i32 0, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_fp4_bf8(<8 x i32> %A, <16 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp4_bf8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:7], v[8:23], v[24:31], v32, v33 matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_BF8 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[34:35], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp4_bf8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[24:31], v[0:7], v[8:23], v[24:31], v32, v33 matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_BF8 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[34:35], v[24:27], off +; GISEL-NEXT: global_store_b128 v[34:35], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v8i32.v16i32(i32 4, <8 x i32> %A, i32 1, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_fp4_fp6(<8 x i32> %A, <12 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp4_fp6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[20:27], v[0:7], v[8:19], v[20:27], v28, v29 matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_FP6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[30:31], v[24:27], off offset:16 +; GFX1250-NEXT: global_store_b128 v[30:31], v[20:23], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp4_fp6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[20:27], v[0:7], v[8:19], v[20:27], v28, v29 matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_FP6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[30:31], v[20:23], off +; GISEL-NEXT: global_store_b128 v[30:31], v[24:27], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v8i32.v12i32(i32 4, <8 x i32> %A, i32 2, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_fp4_bf6(<8 x i32> %A, <12 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp4_bf6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[20:27], v[0:7], v[8:19], v[20:27], v28, v29 matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[30:31], v[24:27], off offset:16 +; GFX1250-NEXT: global_store_b128 v[30:31], v[20:23], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp4_bf6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[20:27], v[0:7], v[8:19], v[20:27], v28, v29 matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[30:31], v[20:23], off +; GISEL-NEXT: global_store_b128 v[30:31], v[24:27], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v8i32.v12i32(i32 4, <8 x i32> %A, i32 4, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_fp4_fp4(<8 x i32> %A, <8 x i32> %B, <8 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp4_fp4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[16:23], v[0:7], v[8:15], v[16:23], v24, v25 matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[26:27], v[20:23], off offset:16 +; GFX1250-NEXT: global_store_b128 v[26:27], v[16:19], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_fp4_fp4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[16:23], v[0:7], v[8:15], v[16:23], v24, v25 matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[26:27], v[16:19], off +; GISEL-NEXT: global_store_b128 v[26:27], v[20:23], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v8i32.v8i32(i32 4, <8 x i32> %A, i32 4, <8 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i32 %scale_src0, i32 0, i32 0, i32 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v[40:41], v[42:43] matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[44:45], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[44:45], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v[40:41], v[42:43] matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[44:45], v[32:35], off +; GISEL-NEXT: global_store_b128 v[44:45], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32i.v16i32(i32 1, <16 x i32> %A, i32 2, <16 x i32> %B, i16 0, <8 x float> %C, i32 1, i32 0, i64 %scale_src0, i32 1, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_ss(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, i64 inreg %scale_src0, i64 inreg %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_ss: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], s[0:1], s[2:3] matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_ss: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], s[0:1], s[2:3] matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32i.v16i32(i32 1, <16 x i32> %A, i32 2, <16 x i32> %B, i16 0, <8 x float> %C, i32 2, i32 1, i64 %scale_src0, i32 1, i32 2, i64 %scale_src1, i1 true, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_si_scale(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, i64 inreg %scale_src0, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_si_scale: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: s_mov_b64 s[2:3], 0x64 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], s[0:1], s[2:3] matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_si_scale: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_mov_b64_e32 v[42:43], 0x64 +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], s[0:1], v[42:43] matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32i.v16i32(i32 1, <16 x i32> %A, i32 2, <16 x i32> %B, i16 0, <8 x float> %C, i32 3, i32 2, i64 %scale_src0, i32 0, i32 1, i64 100, i1 false, i1 true) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_fp8_bf8(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp8_bf8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v[40:41], v[42:43] matrix_b_fmt:MATRIX_FMT_BF8 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[44:45], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[44:45], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp8_bf8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v[40:41], v[42:43] matrix_b_fmt:MATRIX_FMT_BF8 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[44:45], v[32:35], off +; GISEL-NEXT: global_store_b128 v[44:45], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 0, <16 x i32> %A, i32 1, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_fp8_fp6(<16 x i32> %A, <12 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp8_fp6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v[36:37], v[38:39] matrix_b_fmt:MATRIX_FMT_FP6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp8_fp6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v[36:37], v[38:39] matrix_b_fmt:MATRIX_FMT_FP6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v12i32(i32 0, <16 x i32> %A, i32 2, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_fp8_bf6(<16 x i32> %A, <12 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp8_bf6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v[36:37], v[38:39] matrix_b_fmt:MATRIX_FMT_BF6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp8_bf6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v[36:37], v[38:39] matrix_b_fmt:MATRIX_FMT_BF6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v12i32(i32 0, <16 x i32> %A, i32 3, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_fp8_fp4(<16 x i32> %A, <8 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp8_fp4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:15], v[16:23], v[24:31], v[32:33], v[34:35] matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[36:37], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp8_fp4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:15], v[16:23], v[24:31], v[32:33], v[34:35] matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[36:37], v[24:27], off +; GISEL-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v8i32(i32 0, <16 x i32> %A, i32 4, <8 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_bf8_fp8(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf8_fp8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v[40:41], v[42:43] matrix_a_fmt:MATRIX_FMT_BF8 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[44:45], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[44:45], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf8_fp8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v[40:41], v[42:43] matrix_a_fmt:MATRIX_FMT_BF8 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[44:45], v[32:35], off +; GISEL-NEXT: global_store_b128 v[44:45], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 1, <16 x i32> %A, i32 0, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_bf8_bf8(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf8_bf8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v[40:41], v[42:43] matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_BF8 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[44:45], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[44:45], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf8_bf8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], v[40:41], v[42:43] matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_BF8 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[44:45], v[32:35], off +; GISEL-NEXT: global_store_b128 v[44:45], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 1, <16 x i32> %A, i32 1, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_bf8_fp6(<16 x i32> %A, <12 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf8_fp6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v[36:37], v[38:39] matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf8_fp6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v[36:37], v[38:39] matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v12i32(i32 1, <16 x i32> %A, i32 2, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_bf8_bf6(<16 x i32> %A, <12 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf8_bf6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v[36:37], v[38:39] matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_BF6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf8_bf6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:15], v[16:27], v[28:35], v[36:37], v[38:39] matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_BF6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v12i32(i32 1, <16 x i32> %A, i32 3, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_bf8_fp4(<16 x i32> %A, <8 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf8_fp4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:15], v[16:23], v[24:31], v[32:33], v[34:35] matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[36:37], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf8_fp4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:15], v[16:23], v[24:31], v[32:33], v[34:35] matrix_a_fmt:MATRIX_FMT_BF8 matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[36:37], v[24:27], off +; GISEL-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v8i32(i32 1, <16 x i32> %A, i32 4, <8 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_fp6_fp8(<12 x i32> %A, <16 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp6_fp8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v[36:37], v[38:39] matrix_a_fmt:MATRIX_FMT_FP6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp6_fp8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v[36:37], v[38:39] matrix_a_fmt:MATRIX_FMT_FP6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v12i32.v16i32(i32 2, <12 x i32> %A, i32 0, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_fp6_bf8(<12 x i32> %A, <16 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp6_bf8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v[36:37], v[38:39] matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_BF8 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp6_bf8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v[36:37], v[38:39] matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_BF8 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v12i32.v16i32(i32 2, <12 x i32> %A, i32 1, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_fp6_fp6(<12 x i32> %A, <12 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp6_fp6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v[32:33], v[34:35] matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_FP6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[36:37], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp6_fp6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v[32:33], v[34:35] matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_FP6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[36:37], v[24:27], off +; GISEL-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v12i32.v12i32(i32 2, <12 x i32> %A, i32 2, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_fp6_bf6(<12 x i32> %A, <12 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp6_bf6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v[32:33], v[34:35] matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[36:37], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp6_bf6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v[32:33], v[34:35] matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[36:37], v[24:27], off +; GISEL-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v12i32.v12i32(i32 2, <12 x i32> %A, i32 4, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_fp6_fp4(<12 x i32> %A, <8 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp6_fp4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[20:27], v[0:11], v[12:19], v[20:27], v[28:29], v[30:31] matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[32:33], v[24:27], off offset:16 +; GFX1250-NEXT: global_store_b128 v[32:33], v[20:23], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp6_fp4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[20:27], v[0:11], v[12:19], v[20:27], v[28:29], v[30:31] matrix_a_fmt:MATRIX_FMT_FP6 matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[32:33], v[20:23], off +; GISEL-NEXT: global_store_b128 v[32:33], v[24:27], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v12i32.v8i32(i32 2, <12 x i32> %A, i32 4, <8 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_bf6_fp8(<12 x i32> %A, <16 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf6_fp8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v[36:37], v[38:39] matrix_a_fmt:MATRIX_FMT_BF6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf6_fp8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v[36:37], v[38:39] matrix_a_fmt:MATRIX_FMT_BF6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v12i32.v16i32(i32 3, <12 x i32> %A, i32 0, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_bf6_bf8(<12 x i32> %A, <16 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf6_bf8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v[36:37], v[38:39] matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_BF8 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf6_bf8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[28:35], v[0:11], v[12:27], v[28:35], v[36:37], v[38:39] matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_BF8 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v12i32.v16i32(i32 3, <12 x i32> %A, i32 1, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_bf6_fp6(<12 x i32> %A, <12 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf6_fp6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v[32:33], v[34:35] matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_FP6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[36:37], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf6_fp6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v[32:33], v[34:35] matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_FP6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[36:37], v[24:27], off +; GISEL-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v12i32.v12i32(i32 3, <12 x i32> %A, i32 2, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_bf6_bf6(<12 x i32> %A, <12 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf6_bf6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v[32:33], v[34:35] matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[36:37], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf6_bf6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:11], v[12:23], v[24:31], v[32:33], v[34:35] matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[36:37], v[24:27], off +; GISEL-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v12i32.v12i32(i32 3, <12 x i32> %A, i32 4, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_bf6_fp4(<12 x i32> %A, <8 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf6_fp4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[20:27], v[0:11], v[12:19], v[20:27], v[28:29], v[30:31] matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[32:33], v[24:27], off offset:16 +; GFX1250-NEXT: global_store_b128 v[32:33], v[20:23], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_bf6_fp4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[20:27], v[0:11], v[12:19], v[20:27], v[28:29], v[30:31] matrix_a_fmt:MATRIX_FMT_BF6 matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[32:33], v[20:23], off +; GISEL-NEXT: global_store_b128 v[32:33], v[24:27], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v12i32.v8i32(i32 3, <12 x i32> %A, i32 4, <8 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_fp4_fp8(<8 x i32> %A, <16 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp4_fp8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:7], v[8:23], v[24:31], v[32:33], v[34:35] matrix_a_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[36:37], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp4_fp8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:7], v[8:23], v[24:31], v[32:33], v[34:35] matrix_a_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[36:37], v[24:27], off +; GISEL-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v8i32.v16i32(i32 4, <8 x i32> %A, i32 0, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_fp4_bf8(<8 x i32> %A, <16 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp4_bf8: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:7], v[8:23], v[24:31], v[32:33], v[34:35] matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_BF8 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[36:37], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp4_bf8: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[24:31], v[0:7], v[8:23], v[24:31], v[32:33], v[34:35] matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_BF8 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[36:37], v[24:27], off +; GISEL-NEXT: global_store_b128 v[36:37], v[28:31], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v8i32.v16i32(i32 4, <8 x i32> %A, i32 1, <16 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_fp4_fp6(<8 x i32> %A, <12 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp4_fp6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[20:27], v[0:7], v[8:19], v[20:27], v[28:29], v[30:31] matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_FP6 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[32:33], v[24:27], off offset:16 +; GFX1250-NEXT: global_store_b128 v[32:33], v[20:23], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp4_fp6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[20:27], v[0:7], v[8:19], v[20:27], v[28:29], v[30:31] matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_FP6 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[32:33], v[20:23], off +; GISEL-NEXT: global_store_b128 v[32:33], v[24:27], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v8i32.v12i32(i32 4, <8 x i32> %A, i32 2, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_fp4_bf6(<8 x i32> %A, <12 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp4_bf6: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[20:27], v[0:7], v[8:19], v[20:27], v[28:29], v[30:31] matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[32:33], v[24:27], off offset:16 +; GFX1250-NEXT: global_store_b128 v[32:33], v[20:23], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp4_bf6: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[20:27], v[0:7], v[8:19], v[20:27], v[28:29], v[30:31] matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[32:33], v[20:23], off +; GISEL-NEXT: global_store_b128 v[32:33], v[24:27], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v8i32.v12i32(i32 4, <8 x i32> %A, i32 4, <12 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_fp4_fp4(<8 x i32> %A, <8 x i32> %B, <8 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp4_fp4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[16:23], v[0:7], v[8:15], v[16:23], v[24:25], v[26:27] matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_FP4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[28:29], v[20:23], off offset:16 +; GFX1250-NEXT: global_store_b128 v[28:29], v[16:19], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_fp4_fp4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[16:23], v[0:7], v[8:15], v[16:23], v[24:25], v[26:27] matrix_a_fmt:MATRIX_FMT_FP4 matrix_b_fmt:MATRIX_FMT_FP4 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[28:29], v[16:19], off +; GISEL-NEXT: global_store_b128 v[28:29], v[20:23], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v8i32.v8i32(i32 4, <8 x i32> %A, i32 4, <8 x i32> %B, i16 0, <8 x float> %C, i32 0, i32 0, i64 %scale_src0, i32 0, i32 0, i64 %scale_src1, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + define amdgpu_ps void @test_wmma_f16_16x16x128_fp8_fp8(<16 x i32> %A, <16 x i32> %B, <8 x half> %C, ptr addrspace(1) %out) { ; GFX1250-LABEL: test_wmma_f16_16x16x128_fp8_fp8: ; GFX1250: ; %bb.0: ; %bb @@ -1040,6 +2236,170 @@ bb: ret void } +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], v40, v41 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[42:43], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[42:43], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[42:43], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[42:43], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], v40, v41 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[42:43], v[24:27], off +; GISEL-NEXT: global_store_b128 v[42:43], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[42:43], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[42:43], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 1, i32 0, i32 %scale_src0, i32 1, i32 0, i32 %scale_src1, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_ss(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i32 inreg %scale_src0, i32 inreg %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_ss: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s0, s1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_ss: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s0, s1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 2, i32 1, i32 %scale_src0, i32 1, i32 2, i32 %scale_src1, i1 true, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_si_scale(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i32 inreg %scale_src0, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_si_scale: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: s_movk_i32 s1, 0x64 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s0, s1 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_si_scale: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_mov_b32_e32 v42, 0x64 +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s0, v42 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 3, i32 2, i32 %scale_src0, i32 0, i32 1, i32 100, i1 false, i1 true) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], v[40:41], v[42:43] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[44:45], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[44:45], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[44:45], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[44:45], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], v[40:41], v[42:43] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[44:45], v[24:27], off +; GISEL-NEXT: global_store_b128 v[44:45], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[44:45], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[44:45], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 1, i32 0, i64 %scale_src0, i32 1, i32 0, i64 %scale_src1, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_ss(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i64 inreg %scale_src0, i64 inreg %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_ss: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s[0:1], s[2:3] matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_ss: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s[0:1], s[2:3] matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 2, i32 1, i64 %scale_src0, i32 1, i32 2, i64 %scale_src1, i1 true, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_si_scale(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i64 inreg %scale_src0, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_si_scale: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: s_mov_b64 s[2:3], 0x64 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s[0:1], s[2:3] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_si_scale: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_mov_b64_e32 v[42:43], 0x64 +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s[0:1], v[42:43] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 3, i32 2, i64 %scale_src0, i32 0, i32 1, i64 100, i1 false, i1 true) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + define amdgpu_ps void @test_swmmac_f32_16x16x64_bf16(<16 x bfloat> %A, <32 x bfloat> %B, <8 x float> %C, i16 %Index, ptr addrspace(1) %out) { ; GFX1250-LABEL: test_swmmac_f32_16x16x64_bf16: ; GFX1250: ; %bb.0: ; %bb @@ -1366,6 +2726,8 @@ declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x64.iu8.v8i32.v8i32(i1 immarg, <8 x declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1, <16 x half>, i1, <16 x half>, i16, <8 x float>, i1, i1) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x32.f16.v8f16.v16f16(i1, <16 x half>, i1, <16 x half>, i16, <8 x half>, i1, i1) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32, <16 x i32>, i32, <16 x i32>, i16, <8 x float>) +declare <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32, <16 x i32>, i32, <16 x i32>, i16, <8 x float>, i32, i32, i32, i32, i32, i32, i1, i1) +declare <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32, <16 x i32>, i32, <16 x i32>, i16, <8 x float>, i32, i32, i64, i32, i32, i64, i1, i1) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x128.fp8.fp8.v8f16.v16i32(<16 x i32>, <16 x i32>, i16, <8 x half>, i1, i1) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x128.fp8.bf8.v8f16.v16i32(<16 x i32>, <16 x i32>, i16, <8 x half>, i1, i1) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x128.bf8.fp8.v8f16.v16i32(<16 x i32>, <16 x i32>, i16, <8 x half>, i1, i1) @@ -1375,6 +2737,8 @@ declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.fp8.bf8.v8f32.v16i32(<16 x i declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.fp8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.bf8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1) declare <16 x float> @llvm.amdgcn.wmma.f32.32x16x128.f4.v16i32.v8i32.v16f32(<16 x i32>, <8 x i32>, i16, <16 x float>) +declare <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i32, i32, i32, i32, i1, i1) +declare <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i64, i32, i32, i64, i1, i1) declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x64.bf16.v8f32.v16bf16.v32bf16.i16(i1, <16 x bfloat>, i1, <32 x bfloat>, <8 x float>, i16, i1, i1) declare <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x64.bf16.v8bf16.v16bf16.v32bf16.i16(i1, <16 x bfloat>, i1, <32 x bfloat>, <8 x bfloat>, i16, i1, i1) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imm.gfx1250.w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imm.gfx1250.w32.ll index 12ea314..48303c0 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imm.gfx1250.w32.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imm.gfx1250.w32.ll @@ -1446,6 +1446,220 @@ bb: ret void } +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4(<16 x i32> %A, <16 x i32> %B, i32 inreg %scale_src0, i32 inreg %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[34:41], v[0:15], v[16:31], 1.0, s0, s1 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[32:33], v[38:41], off offset:16 +; GFX1250-NEXT: global_store_b128 v[32:33], v[34:37], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[34:41], v[0:15], v[16:31], 1.0, s0, s1 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[32:33], v[34:37], off +; GISEL-NEXT: global_store_b128 v[32:33], v[38:41], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 0, <16 x i32> %A, i32 0, <16 x i32> %B, i16 0, <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i32 %scale_src0, i32 1, i32 0, i32 %scale_src1, i1 true, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_non_splat(<16 x i32> %A, <16 x i32> %B, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_non_splat: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_dual_mov_b32 v34, 1.0 :: v_dual_mov_b32 v35, 2.0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_mov_b32 v36, v34 :: v_dual_mov_b32 v37, v34 +; GFX1250-NEXT: v_dual_mov_b32 v38, v34 :: v_dual_mov_b32 v39, v34 +; GFX1250-NEXT: v_dual_mov_b32 v40, v34 :: v_dual_mov_b32 v41, v34 +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[34:41], v[0:15], v[16:31], v[34:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[32:33], v[38:41], off offset:16 +; GFX1250-NEXT: global_store_b128 v[32:33], v[34:37], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_non_splat: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: s_mov_b32 s0, 1.0 +; GISEL-NEXT: s_mov_b32 s1, 2.0 +; GISEL-NEXT: s_mov_b32 s6, s0 +; GISEL-NEXT: s_mov_b32 s7, s0 +; GISEL-NEXT: s_mov_b32 s2, s0 +; GISEL-NEXT: s_mov_b32 s3, s0 +; GISEL-NEXT: s_mov_b32 s4, s0 +; GISEL-NEXT: s_mov_b32 s5, s0 +; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[6:7] +; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[4:5] +; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[2:3] +; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[0:1] +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[34:41], v[0:15], v[16:31], v[34:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[32:33], v[34:37], off +; GISEL-NEXT: global_store_b128 v[32:33], v[38:41], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 0, <16 x i32> %A, i32 0, <16 x i32> %B, i16 0, <8 x float> <float 1.0, float 2.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i32 1, i32 1, i32 0, i32 2, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_non_inlineable(<16 x i32> %A, <16 x i32> %B, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_non_inlineable: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_mov_b32_e32 v34, 0x40400000 +; GFX1250-NEXT: s_movk_i32 s0, 0x65 +; GFX1250-NEXT: s_movk_i32 s1, 0x64 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_mov_b32 v35, v34 :: v_dual_mov_b32 v36, v34 +; GFX1250-NEXT: v_dual_mov_b32 v37, v34 :: v_dual_mov_b32 v38, v34 +; GFX1250-NEXT: v_dual_mov_b32 v39, v34 :: v_dual_mov_b32 v40, v34 +; GFX1250-NEXT: v_mov_b32_e32 v41, v34 +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[34:41], v[0:15], v[16:31], v[34:41], s1, s0 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[32:33], v[38:41], off offset:16 +; GFX1250-NEXT: global_store_b128 v[32:33], v[34:37], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_non_inlineable: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: s_mov_b32 s0, 0x40400000 +; GISEL-NEXT: v_mov_b32_e32 v42, 0x64 +; GISEL-NEXT: s_mov_b32 s6, s0 +; GISEL-NEXT: s_mov_b32 s7, s0 +; GISEL-NEXT: s_mov_b32 s1, s0 +; GISEL-NEXT: s_mov_b32 s2, s0 +; GISEL-NEXT: s_mov_b32 s3, s0 +; GISEL-NEXT: s_mov_b32 s4, s0 +; GISEL-NEXT: s_mov_b32 s5, s0 +; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[6:7] +; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[4:5] +; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[2:3] +; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[0:1] +; GISEL-NEXT: v_mov_b32_e32 v43, 0x65 +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[34:41], v[0:15], v[16:31], v[34:41], v42, v43 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[32:33], v[34:37], off +; GISEL-NEXT: global_store_b128 v[32:33], v[38:41], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 0, <16 x i32> %A, i32 0, <16 x i32> %B, i16 0, <8 x float> <float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0>, i32 1, i32 0, i32 100, i32 1, i32 0, i32 101, i1 true, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4(<16 x i32> %A, <16 x i32> %B, i64 inreg %scale_src0, i64 inreg %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[34:41], v[0:15], v[16:31], 1.0, s[0:1], s[2:3] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[32:33], v[38:41], off offset:16 +; GFX1250-NEXT: global_store_b128 v[32:33], v[34:37], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[34:41], v[0:15], v[16:31], 1.0, s[0:1], s[2:3] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[32:33], v[34:37], off +; GISEL-NEXT: global_store_b128 v[32:33], v[38:41], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 0, <16 x i32> %A, i32 0, <16 x i32> %B, i16 0, <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i64 %scale_src0, i32 1, i32 0, i64 %scale_src1, i1 true, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_non_splat(<16 x i32> %A, <16 x i32> %B, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_non_splat: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_dual_mov_b32 v34, 1.0 :: v_dual_mov_b32 v35, 2.0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_mov_b32 v36, v34 :: v_dual_mov_b32 v37, v34 +; GFX1250-NEXT: v_dual_mov_b32 v38, v34 :: v_dual_mov_b32 v39, v34 +; GFX1250-NEXT: v_dual_mov_b32 v40, v34 :: v_dual_mov_b32 v41, v34 +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[34:41], v[0:15], v[16:31], v[34:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[32:33], v[38:41], off offset:16 +; GFX1250-NEXT: global_store_b128 v[32:33], v[34:37], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_non_splat: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: s_mov_b32 s0, 1.0 +; GISEL-NEXT: s_mov_b32 s1, 2.0 +; GISEL-NEXT: s_mov_b32 s6, s0 +; GISEL-NEXT: s_mov_b32 s7, s0 +; GISEL-NEXT: s_mov_b32 s2, s0 +; GISEL-NEXT: s_mov_b32 s3, s0 +; GISEL-NEXT: s_mov_b32 s4, s0 +; GISEL-NEXT: s_mov_b32 s5, s0 +; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[6:7] +; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[4:5] +; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[2:3] +; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[0:1] +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[34:41], v[0:15], v[16:31], v[34:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[32:33], v[34:37], off +; GISEL-NEXT: global_store_b128 v[32:33], v[38:41], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 0, <16 x i32> %A, i32 0, <16 x i32> %B, i16 0, <8 x float> <float 1.0, float 2.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i64 1, i32 1, i32 0, i64 2, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_non_inlineable(<16 x i32> %A, <16 x i32> %B, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_non_inlineable: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_mov_b32_e32 v34, 0x40400000 +; GFX1250-NEXT: s_mov_b64 s[0:1], 0x65 +; GFX1250-NEXT: s_mov_b64 s[2:3], 0x64 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_mov_b32 v35, v34 :: v_dual_mov_b32 v36, v34 +; GFX1250-NEXT: v_dual_mov_b32 v37, v34 :: v_dual_mov_b32 v38, v34 +; GFX1250-NEXT: v_dual_mov_b32 v39, v34 :: v_dual_mov_b32 v40, v34 +; GFX1250-NEXT: v_mov_b32_e32 v41, v34 +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[34:41], v[0:15], v[16:31], v[34:41], s[2:3], s[0:1] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[32:33], v[38:41], off offset:16 +; GFX1250-NEXT: global_store_b128 v[32:33], v[34:37], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_non_inlineable: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: s_mov_b32 s0, 0x40400000 +; GISEL-NEXT: v_mov_b64_e32 v[42:43], 0x64 +; GISEL-NEXT: s_mov_b32 s6, s0 +; GISEL-NEXT: s_mov_b32 s7, s0 +; GISEL-NEXT: s_mov_b32 s1, s0 +; GISEL-NEXT: s_mov_b32 s2, s0 +; GISEL-NEXT: s_mov_b32 s3, s0 +; GISEL-NEXT: s_mov_b32 s4, s0 +; GISEL-NEXT: s_mov_b32 s5, s0 +; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[6:7] +; GISEL-NEXT: v_mov_b64_e32 v[44:45], 0x65 +; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[4:5] +; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[2:3] +; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[0:1] +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[34:41], v[0:15], v[16:31], v[34:41], v[42:43], v[44:45] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[32:33], v[34:37], off +; GISEL-NEXT: global_store_b128 v[32:33], v[38:41], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 0, <16 x i32> %A, i32 0, <16 x i32> %B, i16 0, <8 x float> <float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0>, i32 1, i32 0, i64 100, i32 1, i32 0, i64 101, i1 true, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + define amdgpu_ps void @test_wmma_f16_16x16x128_fp8_fp8(<16 x i32> %A, <16 x i32> %B, ptr addrspace(1) %out) { ; GFX1250-LABEL: test_wmma_f16_16x16x128_fp8_fp8: ; GFX1250: ; %bb.0: ; %bb @@ -2316,6 +2530,312 @@ bb: ret void } +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4(<16 x i32> %A, <8 x i32> %B, i32 inreg %scale_src0, i32 inreg %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], 1.0, s0, s1 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], 1.0, s0, s1 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off +; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i32 %scale_src0, i32 1, i32 0, i32 %scale_src1, i1 true, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_non_splat(<16 x i32> %A, <8 x i32> %B, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_non_splat: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_dual_mov_b32 v26, 1.0 :: v_dual_mov_b32 v27, 2.0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_dual_mov_b32 v28, v26 :: v_dual_mov_b32 v29, v26 +; GFX1250-NEXT: v_dual_mov_b32 v30, v26 :: v_dual_mov_b32 v31, v26 +; GFX1250-NEXT: v_dual_mov_b32 v32, v26 :: v_dual_mov_b32 v33, v26 +; GFX1250-NEXT: v_dual_mov_b32 v34, v26 :: v_dual_mov_b32 v35, v26 +; GFX1250-NEXT: v_dual_mov_b32 v36, v26 :: v_dual_mov_b32 v37, v26 +; GFX1250-NEXT: v_dual_mov_b32 v38, v26 :: v_dual_mov_b32 v39, v26 +; GFX1250-NEXT: v_dual_mov_b32 v40, v26 :: v_dual_mov_b32 v41, v26 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_non_splat: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: s_mov_b32 s0, 1.0 +; GISEL-NEXT: s_mov_b32 s1, 2.0 +; GISEL-NEXT: s_mov_b32 s14, s0 +; GISEL-NEXT: s_mov_b32 s15, s0 +; GISEL-NEXT: s_mov_b32 s2, s0 +; GISEL-NEXT: s_mov_b32 s3, s0 +; GISEL-NEXT: s_mov_b32 s4, s0 +; GISEL-NEXT: s_mov_b32 s5, s0 +; GISEL-NEXT: s_mov_b32 s6, s0 +; GISEL-NEXT: s_mov_b32 s7, s0 +; GISEL-NEXT: s_mov_b32 s8, s0 +; GISEL-NEXT: s_mov_b32 s9, s0 +; GISEL-NEXT: s_mov_b32 s10, s0 +; GISEL-NEXT: s_mov_b32 s11, s0 +; GISEL-NEXT: s_mov_b32 s12, s0 +; GISEL-NEXT: s_mov_b32 s13, s0 +; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[14:15] +; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[8:9] +; GISEL-NEXT: v_mov_b64_e32 v[32:33], s[6:7] +; GISEL-NEXT: v_mov_b64_e32 v[30:31], s[4:5] +; GISEL-NEXT: v_mov_b64_e32 v[28:29], s[2:3] +; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[0:1] +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off +; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 1.0, float 2.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i32 1, i32 1, i32 0, i32 2, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_non_inlineable(<16 x i32> %A, <8 x i32> %B, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_non_inlineable: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_mov_b32_e32 v26, 0x40400000 +; GFX1250-NEXT: s_movk_i32 s0, 0x65 +; GFX1250-NEXT: s_movk_i32 s1, 0x64 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_dual_mov_b32 v27, v26 :: v_dual_mov_b32 v28, v26 +; GFX1250-NEXT: v_dual_mov_b32 v29, v26 :: v_dual_mov_b32 v30, v26 +; GFX1250-NEXT: v_dual_mov_b32 v31, v26 :: v_dual_mov_b32 v32, v26 +; GFX1250-NEXT: v_dual_mov_b32 v33, v26 :: v_dual_mov_b32 v34, v26 +; GFX1250-NEXT: v_dual_mov_b32 v35, v26 :: v_dual_mov_b32 v36, v26 +; GFX1250-NEXT: v_dual_mov_b32 v37, v26 :: v_dual_mov_b32 v38, v26 +; GFX1250-NEXT: v_dual_mov_b32 v39, v26 :: v_dual_mov_b32 v40, v26 +; GFX1250-NEXT: v_mov_b32_e32 v41, v26 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], s1, s0 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_non_inlineable: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: s_mov_b32 s0, 0x40400000 +; GISEL-NEXT: v_mov_b32_e32 v42, 0x64 +; GISEL-NEXT: s_mov_b32 s14, s0 +; GISEL-NEXT: s_mov_b32 s15, s0 +; GISEL-NEXT: s_mov_b32 s1, s0 +; GISEL-NEXT: s_mov_b32 s2, s0 +; GISEL-NEXT: s_mov_b32 s3, s0 +; GISEL-NEXT: s_mov_b32 s4, s0 +; GISEL-NEXT: s_mov_b32 s5, s0 +; GISEL-NEXT: s_mov_b32 s6, s0 +; GISEL-NEXT: s_mov_b32 s7, s0 +; GISEL-NEXT: s_mov_b32 s8, s0 +; GISEL-NEXT: s_mov_b32 s9, s0 +; GISEL-NEXT: s_mov_b32 s10, s0 +; GISEL-NEXT: s_mov_b32 s11, s0 +; GISEL-NEXT: s_mov_b32 s12, s0 +; GISEL-NEXT: s_mov_b32 s13, s0 +; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[14:15] +; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[8:9] +; GISEL-NEXT: v_mov_b64_e32 v[32:33], s[6:7] +; GISEL-NEXT: v_mov_b64_e32 v[30:31], s[4:5] +; GISEL-NEXT: v_mov_b64_e32 v[28:29], s[2:3] +; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[0:1] +; GISEL-NEXT: v_mov_b32_e32 v43, 0x65 +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], v42, v43 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off +; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0>, i32 1, i32 0, i32 100, i32 1, i32 0, i32 101, i1 true, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4(<16 x i32> %A, <8 x i32> %B, i64 inreg %scale_src0, i64 inreg %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], 1.0, s[0:1], s[2:3] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], 1.0, s[0:1], s[2:3] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off +; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i64 %scale_src0, i32 1, i32 0, i64 %scale_src1, i1 true, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_non_splat(<16 x i32> %A, <8 x i32> %B, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_non_splat: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_dual_mov_b32 v26, 1.0 :: v_dual_mov_b32 v27, 2.0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_dual_mov_b32 v28, v26 :: v_dual_mov_b32 v29, v26 +; GFX1250-NEXT: v_dual_mov_b32 v30, v26 :: v_dual_mov_b32 v31, v26 +; GFX1250-NEXT: v_dual_mov_b32 v32, v26 :: v_dual_mov_b32 v33, v26 +; GFX1250-NEXT: v_dual_mov_b32 v34, v26 :: v_dual_mov_b32 v35, v26 +; GFX1250-NEXT: v_dual_mov_b32 v36, v26 :: v_dual_mov_b32 v37, v26 +; GFX1250-NEXT: v_dual_mov_b32 v38, v26 :: v_dual_mov_b32 v39, v26 +; GFX1250-NEXT: v_dual_mov_b32 v40, v26 :: v_dual_mov_b32 v41, v26 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_non_splat: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: s_mov_b32 s0, 1.0 +; GISEL-NEXT: s_mov_b32 s1, 2.0 +; GISEL-NEXT: s_mov_b32 s14, s0 +; GISEL-NEXT: s_mov_b32 s15, s0 +; GISEL-NEXT: s_mov_b32 s2, s0 +; GISEL-NEXT: s_mov_b32 s3, s0 +; GISEL-NEXT: s_mov_b32 s4, s0 +; GISEL-NEXT: s_mov_b32 s5, s0 +; GISEL-NEXT: s_mov_b32 s6, s0 +; GISEL-NEXT: s_mov_b32 s7, s0 +; GISEL-NEXT: s_mov_b32 s8, s0 +; GISEL-NEXT: s_mov_b32 s9, s0 +; GISEL-NEXT: s_mov_b32 s10, s0 +; GISEL-NEXT: s_mov_b32 s11, s0 +; GISEL-NEXT: s_mov_b32 s12, s0 +; GISEL-NEXT: s_mov_b32 s13, s0 +; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[14:15] +; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[8:9] +; GISEL-NEXT: v_mov_b64_e32 v[32:33], s[6:7] +; GISEL-NEXT: v_mov_b64_e32 v[30:31], s[4:5] +; GISEL-NEXT: v_mov_b64_e32 v[28:29], s[2:3] +; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[0:1] +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off +; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 1.0, float 2.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i64 1, i32 1, i32 0, i64 2, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_non_inlineable(<16 x i32> %A, <8 x i32> %B, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_non_inlineable: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_mov_b32_e32 v26, 0x40400000 +; GFX1250-NEXT: s_mov_b64 s[0:1], 0x65 +; GFX1250-NEXT: s_mov_b64 s[2:3], 0x64 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_dual_mov_b32 v27, v26 :: v_dual_mov_b32 v28, v26 +; GFX1250-NEXT: v_dual_mov_b32 v29, v26 :: v_dual_mov_b32 v30, v26 +; GFX1250-NEXT: v_dual_mov_b32 v31, v26 :: v_dual_mov_b32 v32, v26 +; GFX1250-NEXT: v_dual_mov_b32 v33, v26 :: v_dual_mov_b32 v34, v26 +; GFX1250-NEXT: v_dual_mov_b32 v35, v26 :: v_dual_mov_b32 v36, v26 +; GFX1250-NEXT: v_dual_mov_b32 v37, v26 :: v_dual_mov_b32 v38, v26 +; GFX1250-NEXT: v_dual_mov_b32 v39, v26 :: v_dual_mov_b32 v40, v26 +; GFX1250-NEXT: v_mov_b32_e32 v41, v26 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], s[2:3], s[0:1] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_non_inlineable: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: s_mov_b32 s0, 0x40400000 +; GISEL-NEXT: v_mov_b64_e32 v[42:43], 0x64 +; GISEL-NEXT: s_mov_b32 s14, s0 +; GISEL-NEXT: s_mov_b32 s15, s0 +; GISEL-NEXT: s_mov_b32 s1, s0 +; GISEL-NEXT: s_mov_b32 s2, s0 +; GISEL-NEXT: s_mov_b32 s3, s0 +; GISEL-NEXT: s_mov_b32 s4, s0 +; GISEL-NEXT: s_mov_b32 s5, s0 +; GISEL-NEXT: s_mov_b32 s6, s0 +; GISEL-NEXT: s_mov_b32 s7, s0 +; GISEL-NEXT: s_mov_b32 s8, s0 +; GISEL-NEXT: s_mov_b32 s9, s0 +; GISEL-NEXT: s_mov_b32 s10, s0 +; GISEL-NEXT: s_mov_b32 s11, s0 +; GISEL-NEXT: s_mov_b32 s12, s0 +; GISEL-NEXT: s_mov_b32 s13, s0 +; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[14:15] +; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[8:9] +; GISEL-NEXT: v_mov_b64_e32 v[32:33], s[6:7] +; GISEL-NEXT: v_mov_b64_e32 v[30:31], s[4:5] +; GISEL-NEXT: v_mov_b64_e32 v[28:29], s[2:3] +; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[0:1] +; GISEL-NEXT: v_mov_b64_e32 v[44:45], 0x65 +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], v[42:43], v[44:45] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off +; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0>, i32 1, i32 0, i64 100, i32 1, i32 0, i64 101, i1 true, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x4.f32.v8f32.v2f32(i1, <2 x float>, i1, <2 x float>, i16, <8 x float>, i1, i1) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.bf16.v8f32.v16bf16(i1, <16 x bfloat>, i1, <16 x bfloat>, i16, <8 x float>, i1, i1) declare <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x32.bf16.v8bf16.v16bf16(i1, <16 x bfloat>, i1, <16 x bfloat>, i16, <8 x bfloat>, i1, i1) @@ -2332,6 +2852,8 @@ declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x64.iu8.v8i32.v8i32(i1 immarg, <8 x declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1, <16 x half>, i1, <16 x half>, i16, <8 x float>, i1, i1) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x32.f16.v8f16.v16f16(i1, <16 x half>, i1, <16 x half>, i16, <8 x half>, i1, i1) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32, <16 x i32>, i32, <16 x i32>, i16, <8 x float>) +declare <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32, <16 x i32>, i32, <16 x i32>, i16, <8 x float>, i32, i32, i32, i32, i32, i32, i1, i1) +declare <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32, <16 x i32>, i32, <16 x i32>, i16, <8 x float>, i32, i32, i64, i32, i32, i64, i1, i1) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x128.fp8.fp8.v8f16.v16i32(<16 x i32>, <16 x i32>, i16, <8 x half>, i1, i1) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x128.fp8.bf8.v8f16.v16i32(<16 x i32>, <16 x i32>, i16, <8 x half>, i1, i1) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x128.bf8.fp8.v8f16.v16i32(<16 x i32>, <16 x i32>, i16, <8 x half>, i1, i1) @@ -2341,3 +2863,5 @@ declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.fp8.bf8.v8f32.v16i32(<16 x i declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.fp8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.bf8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1) declare <16 x float> @llvm.amdgcn.wmma.f32.32x16x128.f4.v16i32.v8i32.v16f32(<16 x i32>, <8 x i32>, i16, <16 x float>) +declare <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i32, i32, i32, i32, i1, i1) +declare <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i64, i32, i32, i64, i1, i1) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imod.gfx1250.w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imod.gfx1250.w32.ll index bf8308b..8f674f8 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imod.gfx1250.w32.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imod.gfx1250.w32.ll @@ -1192,6 +1192,138 @@ bb: ret void } +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_negC(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_negC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_negC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 0, <16 x i32> %A, i32 0, <16 x i32> %B, i16 1, <8 x float> %C, i32 1, i32 0, i32 2, i32 1, i32 0, i32 4, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_neg_absC(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_neg_absC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_neg_absC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 0, <16 x i32> %A, i32 0, <16 x i32> %B, i16 3, <8 x float> %C, i32 1, i32 0, i32 2, i32 1, i32 0, i32 4, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_16x16x128_f8f6f4_ignoreC(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_ignoreC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_16x16x128_f8f6f4_ignoreC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 0, <16 x i32> %A, i32 0, <16 x i32> %B, i16 4, <8 x float> %C, i32 1, i32 0, i32 2, i32 1, i32 0, i32 4, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_negC(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_negC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_negC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 0, <16 x i32> %A, i32 0, <16 x i32> %B, i16 1, <8 x float> %C, i32 1, i32 0, i64 2, i32 1, i32 0, i64 4, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_neg_absC(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_neg_absC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_neg_absC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 0, <16 x i32> %A, i32 0, <16 x i32> %B, i16 3, <8 x float> %C, i32 1, i32 0, i64 2, i32 1, i32 0, i64 4, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_16x16x128_f8f6f4_ignoreC(<16 x i32> %A, <16 x i32> %B, <8 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_ignoreC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_16x16x128_f8f6f4_ignoreC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_16x16x128_f8f6f4 v[32:39], v[0:15], v[16:31], v[32:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x1 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:16 +; GISEL-NEXT: s_endpgm +bb: + %res = call <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32 0, <16 x i32> %A, i32 0, <16 x i32> %B, i16 4, <8 x float> %C, i32 1, i32 0, i64 2, i32 1, i32 0, i64 4, i1 false, i1 false) + store <8 x float> %res, ptr addrspace(1) %out + ret void +} + define amdgpu_ps void @test_wmma_f16_16x16x128_fp8_fp8_negC(<16 x i32> %A, <16 x i32> %B, <8 x half> %C, ptr addrspace(1) %out) { ; GFX1250-LABEL: test_wmma_f16_16x16x128_fp8_fp8_negC: ; GFX1250: ; %bb.0: ; %bb @@ -1750,6 +1882,162 @@ bb: ret void } +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_negC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_negC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_negC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 1, <16 x float> %C, i32 1, i32 0, i32 2, i32 1, i32 0, i32 4, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_neg_absC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_neg_absC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_neg_absC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 3, <16 x float> %C, i32 1, i32 0, i32 2, i32 1, i32 0, i32 4, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_ignoreC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_ignoreC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_ignoreC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 4, <16 x float> %C, i32 1, i32 0, i32 2, i32 1, i32 0, i32 4, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_negC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_negC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_negC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 1, <16 x float> %C, i32 1, i32 0, i64 2, i32 1, i32 0, i64 4, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_neg_absC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_neg_absC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_neg_absC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 3, <16 x float> %C, i32 1, i32 0, i64 2, i32 1, i32 0, i64 4, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_ignoreC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_ignoreC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_ignoreC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 4, <16 x float> %C, i32 1, i32 0, i64 2, i32 1, i32 0, i64 4, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + define amdgpu_ps void @test_swmmac_f32_16x16x64_bf16_negA(<16 x bfloat> %A, <32 x bfloat> %B, <8 x float> %C, i16 %Index, ptr addrspace(1) %out) { ; GFX1250-LABEL: test_swmmac_f32_16x16x64_bf16_negA: ; GFX1250: ; %bb.0: ; %bb @@ -2034,6 +2322,8 @@ declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x64.iu8.v8i32.v8i32(i1 immarg, <8 x declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1, <16 x half>, i1, <16 x half>, i16, <8 x float>, i1, i1) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x32.f16.v8f16.v16f16(i1, <16 x half>, i1, <16 x half>, i16, <8 x half>, i1, i1) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32, <16 x i32>, i32, <16 x i32>, i16, <8 x float>) +declare <8 x float> @llvm.amdgcn.wmma.scale.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32, <16 x i32>, i32, <16 x i32>, i16, <8 x float>, i32, i32, i32, i32, i32, i32, i1, i1) +declare <8 x float> @llvm.amdgcn.wmma.scale16.f32.16x16x128.f8f6f4.v8f32.v16i32.v16i32(i32, <16 x i32>, i32, <16 x i32>, i16, <8 x float>, i32, i32, i64, i32, i32, i64, i1, i1) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x128.fp8.fp8.v8f16.v16i32(<16 x i32>, <16 x i32>, i16, <8 x half>, i1, i1) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x128.fp8.bf8.v8f16.v16i32(<16 x i32>, <16 x i32>, i16, <8 x half>, i1, i1) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x128.bf8.fp8.v8f16.v16i32(<16 x i32>, <16 x i32>, i16, <8 x half>, i1, i1) @@ -2043,6 +2333,8 @@ declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.fp8.bf8.v8f32.v16i32(<16 x i declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.fp8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.bf8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1) declare <16 x float> @llvm.amdgcn.wmma.f32.32x16x128.f4.v16i32.v8i32.v16f32(<16 x i32>, <8 x i32>, i16, <16 x float>) +declare <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i32, i32, i32, i32, i1, i1) +declare <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i64, i32, i32, i64, i1, i1) declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x64.bf16.v8f32.v16bf16.v32bf16.i16(i1, <16 x bfloat>, i1, <32 x bfloat>, <8 x float>, i16, i1, i1) declare <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x64.bf16.v8bf16.v16bf16.v32bf16.i16(i1, <16 x bfloat>, i1, <32 x bfloat>, <8 x bfloat>, i16, i1, i1) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.ldexp.ll b/llvm/test/CodeGen/AMDGPU/llvm.ldexp.ll index 0e66b0a..22f562a 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.ldexp.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.ldexp.ll @@ -784,13 +784,13 @@ define <3 x half> @test_ldexp_v3f16_v3i32(<3 x half> %a, <3 x i32> %b) { ; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-GISEL-TRUE16-NEXT: v_mov_b32_e32 v5, 0x7fff ; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11-GISEL-TRUE16-NEXT: v_med3_i32 v4, 0xffff8000, v4, v5 ; GFX11-GISEL-TRUE16-NEXT: v_med3_i32 v2, 0xffff8000, v2, v5 ; GFX11-GISEL-TRUE16-NEXT: v_med3_i32 v3, 0xffff8000, v3, v5 -; GFX11-GISEL-TRUE16-NEXT: v_ldexp_f16_e32 v1.l, v1.l, v4.l -; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-GISEL-TRUE16-NEXT: v_med3_i32 v4, 0xffff8000, v4, v5 ; GFX11-GISEL-TRUE16-NEXT: v_ldexp_f16_e32 v0.l, v0.l, v2.l +; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-GISEL-TRUE16-NEXT: v_ldexp_f16_e32 v0.h, v0.h, v3.l +; GFX11-GISEL-TRUE16-NEXT: v_ldexp_f16_e32 v1.l, v1.l, v4.l ; GFX11-GISEL-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-GISEL-FAKE16-LABEL: test_ldexp_v3f16_v3i32: @@ -910,9 +910,9 @@ define <3 x half> @test_ldexp_v3f16_v3i16(<3 x half> %a, <3 x i16> %b) { ; GFX11-GISEL-TRUE16-LABEL: test_ldexp_v3f16_v3i16: ; GFX11-GISEL-TRUE16: ; %bb.0: ; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-GISEL-TRUE16-NEXT: v_ldexp_f16_e32 v1.l, v1.l, v3.l ; GFX11-GISEL-TRUE16-NEXT: v_ldexp_f16_e32 v0.l, v0.l, v2.l ; GFX11-GISEL-TRUE16-NEXT: v_ldexp_f16_e32 v0.h, v0.h, v2.h +; GFX11-GISEL-TRUE16-NEXT: v_ldexp_f16_e32 v1.l, v1.l, v3.l ; GFX11-GISEL-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-GISEL-FAKE16-LABEL: test_ldexp_v3f16_v3i16: diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll index bfc01ef..d59f72a 100644 --- a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll +++ b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll @@ -8343,53 +8343,53 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o ; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) ; GFX6-NEXT: s_lshr_b32 s42, s5, 30 -; GFX6-NEXT: s_lshr_b32 s36, s5, 28 -; GFX6-NEXT: s_lshr_b32 s38, s5, 29 -; GFX6-NEXT: s_lshr_b32 s30, s5, 26 -; GFX6-NEXT: s_lshr_b32 s34, s5, 27 -; GFX6-NEXT: s_lshr_b32 s26, s5, 24 -; GFX6-NEXT: s_lshr_b32 s28, s5, 25 -; GFX6-NEXT: s_lshr_b32 s22, s5, 22 -; GFX6-NEXT: s_lshr_b32 s24, s5, 23 -; GFX6-NEXT: s_lshr_b32 s18, s5, 20 -; GFX6-NEXT: s_lshr_b32 s20, s5, 21 -; GFX6-NEXT: s_lshr_b32 s14, s5, 18 -; GFX6-NEXT: s_lshr_b32 s16, s5, 19 -; GFX6-NEXT: s_lshr_b32 s10, s5, 16 -; GFX6-NEXT: s_lshr_b32 s12, s5, 17 -; GFX6-NEXT: s_lshr_b32 s6, s5, 14 -; GFX6-NEXT: s_lshr_b32 s8, s5, 15 -; GFX6-NEXT: s_mov_b32 s40, s5 +; GFX6-NEXT: s_lshr_b32 s36, s4, 30 +; GFX6-NEXT: s_lshr_b32 s38, s4, 31 +; GFX6-NEXT: s_lshr_b32 s30, s4, 28 +; GFX6-NEXT: s_lshr_b32 s34, s4, 29 +; GFX6-NEXT: s_lshr_b32 s26, s4, 26 +; GFX6-NEXT: s_lshr_b32 s28, s4, 27 +; GFX6-NEXT: s_lshr_b32 s22, s4, 24 +; GFX6-NEXT: s_lshr_b32 s24, s4, 25 +; GFX6-NEXT: s_lshr_b32 s18, s4, 22 +; GFX6-NEXT: s_lshr_b32 s20, s4, 23 +; GFX6-NEXT: s_lshr_b32 s14, s4, 20 +; GFX6-NEXT: s_lshr_b32 s16, s4, 21 +; GFX6-NEXT: s_lshr_b32 s10, s4, 18 +; GFX6-NEXT: s_lshr_b32 s12, s4, 19 +; GFX6-NEXT: s_lshr_b32 s6, s4, 16 +; GFX6-NEXT: s_lshr_b32 s8, s4, 17 ; GFX6-NEXT: s_ashr_i32 s7, s5, 31 -; GFX6-NEXT: s_bfe_i64 s[44:45], s[40:41], 0x10000 +; GFX6-NEXT: s_bfe_i64 s[44:45], s[4:5], 0x10000 ; GFX6-NEXT: v_mov_b32_e32 v4, s7 -; GFX6-NEXT: s_lshr_b32 s40, s5, 12 +; GFX6-NEXT: s_lshr_b32 s40, s4, 14 ; GFX6-NEXT: v_mov_b32_e32 v0, s44 ; GFX6-NEXT: v_mov_b32_e32 v1, s45 -; GFX6-NEXT: s_bfe_i64 s[44:45], s[4:5], 0x10000 +; GFX6-NEXT: s_mov_b32 s44, s5 +; GFX6-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x10000 ; GFX6-NEXT: v_mov_b32_e32 v6, s44 ; GFX6-NEXT: v_mov_b32_e32 v7, s45 -; GFX6-NEXT: s_lshr_b32 s44, s5, 13 +; GFX6-NEXT: s_lshr_b32 s44, s4, 15 ; GFX6-NEXT: v_mov_b32_e32 v2, s42 ; GFX6-NEXT: v_mov_b32_e32 v3, s43 -; GFX6-NEXT: s_lshr_b32 s42, s5, 10 +; GFX6-NEXT: s_lshr_b32 s42, s4, 12 ; GFX6-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x10000 ; GFX6-NEXT: v_mov_b32_e32 v8, s36 ; GFX6-NEXT: v_mov_b32_e32 v9, s37 -; GFX6-NEXT: s_lshr_b32 s36, s5, 11 +; GFX6-NEXT: s_lshr_b32 s36, s4, 13 ; GFX6-NEXT: v_mov_b32_e32 v10, s38 ; GFX6-NEXT: v_mov_b32_e32 v11, s39 -; GFX6-NEXT: s_lshr_b32 s38, s5, 8 +; GFX6-NEXT: s_lshr_b32 s38, s4, 10 ; GFX6-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x10000 ; GFX6-NEXT: v_mov_b32_e32 v12, s30 ; GFX6-NEXT: v_mov_b32_e32 v13, s31 -; GFX6-NEXT: s_lshr_b32 s30, s5, 9 +; GFX6-NEXT: s_lshr_b32 s30, s4, 11 ; GFX6-NEXT: v_mov_b32_e32 v14, s34 ; GFX6-NEXT: v_mov_b32_e32 v15, s35 -; GFX6-NEXT: s_lshr_b32 s34, s5, 6 +; GFX6-NEXT: s_lshr_b32 s34, s4, 8 ; GFX6-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x10000 ; GFX6-NEXT: v_mov_b32_e32 v5, s7 @@ -8397,190 +8397,191 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v2, s26 ; GFX6-NEXT: v_mov_b32_e32 v3, s27 -; GFX6-NEXT: s_lshr_b32 s26, s5, 7 +; GFX6-NEXT: s_lshr_b32 s26, s4, 9 ; GFX6-NEXT: v_mov_b32_e32 v4, s28 ; GFX6-NEXT: v_mov_b32_e32 v5, s29 -; GFX6-NEXT: s_lshr_b32 s28, s5, 4 +; GFX6-NEXT: s_lshr_b32 s28, s4, 6 ; GFX6-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:480 +; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:240 ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v8, s22 ; GFX6-NEXT: v_mov_b32_e32 v9, s23 -; GFX6-NEXT: s_lshr_b32 s22, s5, 5 +; GFX6-NEXT: s_lshr_b32 s22, s4, 7 ; GFX6-NEXT: v_mov_b32_e32 v10, s24 ; GFX6-NEXT: v_mov_b32_e32 v11, s25 -; GFX6-NEXT: s_lshr_b32 s24, s5, 2 +; GFX6-NEXT: s_lshr_b32 s24, s4, 4 ; GFX6-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:464 +; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:224 ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v12, s18 ; GFX6-NEXT: v_mov_b32_e32 v13, s19 -; GFX6-NEXT: s_lshr_b32 s18, s5, 3 +; GFX6-NEXT: s_lshr_b32 s18, s4, 5 ; GFX6-NEXT: v_mov_b32_e32 v14, s20 ; GFX6-NEXT: v_mov_b32_e32 v15, s21 -; GFX6-NEXT: s_lshr_b32 s20, s5, 1 +; GFX6-NEXT: s_lshr_b32 s20, s4, 2 ; GFX6-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:448 +; GFX6-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:208 ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v2, s14 ; GFX6-NEXT: v_mov_b32_e32 v3, s15 -; GFX6-NEXT: s_lshr_b32 s14, s4, 30 +; GFX6-NEXT: s_lshr_b32 s14, s4, 3 ; GFX6-NEXT: v_mov_b32_e32 v4, s16 ; GFX6-NEXT: v_mov_b32_e32 v5, s17 -; GFX6-NEXT: s_lshr_b32 s16, s4, 31 +; GFX6-NEXT: s_lshr_b32 s16, s4, 1 ; GFX6-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:432 +; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:192 ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v8, s10 ; GFX6-NEXT: v_mov_b32_e32 v9, s11 -; GFX6-NEXT: s_lshr_b32 s10, s4, 28 +; GFX6-NEXT: s_lshr_b32 s10, s5, 29 ; GFX6-NEXT: v_mov_b32_e32 v10, s12 ; GFX6-NEXT: v_mov_b32_e32 v11, s13 -; GFX6-NEXT: s_lshr_b32 s12, s4, 29 +; GFX6-NEXT: s_lshr_b32 s12, s5, 28 ; GFX6-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:416 +; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:176 ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v12, s6 ; GFX6-NEXT: v_mov_b32_e32 v13, s7 -; GFX6-NEXT: s_lshr_b32 s46, s4, 26 +; GFX6-NEXT: s_lshr_b32 s6, s5, 26 ; GFX6-NEXT: v_mov_b32_e32 v14, s8 ; GFX6-NEXT: v_mov_b32_e32 v15, s9 -; GFX6-NEXT: s_lshr_b32 s8, s4, 27 -; GFX6-NEXT: s_bfe_i64 s[6:7], s[44:45], 0x10000 +; GFX6-NEXT: s_lshr_b32 s8, s5, 27 +; GFX6-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:400 +; GFX6-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:160 ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v2, s40 ; GFX6-NEXT: v_mov_b32_e32 v3, s41 -; GFX6-NEXT: s_lshr_b32 s40, s4, 24 -; GFX6-NEXT: v_mov_b32_e32 v4, s6 -; GFX6-NEXT: v_mov_b32_e32 v5, s7 -; GFX6-NEXT: s_lshr_b32 s44, s4, 25 -; GFX6-NEXT: s_bfe_i64 s[6:7], s[36:37], 0x10000 -; GFX6-NEXT: s_bfe_i64 s[36:37], s[42:43], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:384 +; GFX6-NEXT: s_lshr_b32 s40, s5, 25 +; GFX6-NEXT: v_mov_b32_e32 v4, s44 +; GFX6-NEXT: v_mov_b32_e32 v5, s45 +; GFX6-NEXT: s_lshr_b32 s44, s5, 24 +; GFX6-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x10000 +; GFX6-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x10000 +; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:144 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v8, s36 -; GFX6-NEXT: v_mov_b32_e32 v9, s37 -; GFX6-NEXT: s_lshr_b32 s36, s4, 22 -; GFX6-NEXT: v_mov_b32_e32 v10, s6 -; GFX6-NEXT: v_mov_b32_e32 v11, s7 -; GFX6-NEXT: s_lshr_b32 s42, s4, 23 -; GFX6-NEXT: s_bfe_i64 s[6:7], s[30:31], 0x10000 -; GFX6-NEXT: s_bfe_i64 s[30:31], s[38:39], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:368 +; GFX6-NEXT: v_mov_b32_e32 v8, s42 +; GFX6-NEXT: v_mov_b32_e32 v9, s43 +; GFX6-NEXT: s_lshr_b32 s42, s5, 22 +; GFX6-NEXT: v_mov_b32_e32 v10, s36 +; GFX6-NEXT: v_mov_b32_e32 v11, s37 +; GFX6-NEXT: s_lshr_b32 s36, s5, 23 +; GFX6-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x10000 +; GFX6-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x10000 +; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:128 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v12, s30 -; GFX6-NEXT: v_mov_b32_e32 v13, s31 -; GFX6-NEXT: s_lshr_b32 s30, s4, 20 -; GFX6-NEXT: v_mov_b32_e32 v14, s6 -; GFX6-NEXT: v_mov_b32_e32 v15, s7 -; GFX6-NEXT: s_lshr_b32 s6, s4, 21 +; GFX6-NEXT: v_mov_b32_e32 v12, s38 +; GFX6-NEXT: v_mov_b32_e32 v13, s39 +; GFX6-NEXT: s_lshr_b32 s38, s5, 20 +; GFX6-NEXT: v_mov_b32_e32 v14, s30 +; GFX6-NEXT: v_mov_b32_e32 v15, s31 +; GFX6-NEXT: s_lshr_b32 s4, s5, 21 ; GFX6-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x10000 -; GFX6-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:352 -; GFX6-NEXT: v_mov_b32_e32 v16, s34 -; GFX6-NEXT: v_mov_b32_e32 v17, s35 -; GFX6-NEXT: s_lshr_b32 s34, s4, 18 -; GFX6-NEXT: v_mov_b32_e32 v18, s26 -; GFX6-NEXT: v_mov_b32_e32 v19, s27 -; GFX6-NEXT: s_lshr_b32 s26, s4, 19 +; GFX6-NEXT: s_bfe_i64 s[30:31], s[34:35], 0x10000 +; GFX6-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:112 +; GFX6-NEXT: s_waitcnt expcnt(0) +; GFX6-NEXT: v_mov_b32_e32 v2, s30 +; GFX6-NEXT: v_mov_b32_e32 v3, s31 +; GFX6-NEXT: s_lshr_b32 s30, s5, 18 +; GFX6-NEXT: v_mov_b32_e32 v4, s26 +; GFX6-NEXT: v_mov_b32_e32 v5, s27 +; GFX6-NEXT: s_lshr_b32 s26, s5, 19 ; GFX6-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:336 +; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:96 ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v8, s28 ; GFX6-NEXT: v_mov_b32_e32 v9, s29 -; GFX6-NEXT: s_lshr_b32 s28, s4, 16 +; GFX6-NEXT: s_lshr_b32 s28, s5, 17 ; GFX6-NEXT: v_mov_b32_e32 v10, s22 ; GFX6-NEXT: v_mov_b32_e32 v11, s23 -; GFX6-NEXT: s_lshr_b32 s22, s4, 17 +; GFX6-NEXT: s_lshr_b32 s22, s5, 16 +; GFX6-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:320 +; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:80 ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v12, s24 ; GFX6-NEXT: v_mov_b32_e32 v13, s25 -; GFX6-NEXT: s_lshr_b32 s24, s4, 14 -; GFX6-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x10000 -; GFX6-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x10000 +; GFX6-NEXT: s_lshr_b32 s24, s5, 14 ; GFX6-NEXT: v_mov_b32_e32 v14, s18 ; GFX6-NEXT: v_mov_b32_e32 v15, s19 -; GFX6-NEXT: s_lshr_b32 s18, s4, 15 -; GFX6-NEXT: v_mov_b32_e32 v2, s20 -; GFX6-NEXT: v_mov_b32_e32 v3, s21 -; GFX6-NEXT: s_lshr_b32 s20, s4, 12 -; GFX6-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x10000 +; GFX6-NEXT: s_lshr_b32 s18, s5, 15 +; GFX6-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x10000 +; GFX6-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:64 +; GFX6-NEXT: v_mov_b32_e32 v16, s20 +; GFX6-NEXT: v_mov_b32_e32 v17, s21 +; GFX6-NEXT: s_lshr_b32 s20, s5, 12 ; GFX6-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:304 +; GFX6-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x10000 +; GFX6-NEXT: v_mov_b32_e32 v18, s14 +; GFX6-NEXT: v_mov_b32_e32 v19, s15 +; GFX6-NEXT: s_lshr_b32 s14, s5, 13 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v16, s14 -; GFX6-NEXT: v_mov_b32_e32 v17, s15 -; GFX6-NEXT: s_lshr_b32 s14, s4, 13 -; GFX6-NEXT: v_mov_b32_e32 v18, s16 -; GFX6-NEXT: v_mov_b32_e32 v19, s17 -; GFX6-NEXT: s_lshr_b32 s16, s4, 10 +; GFX6-NEXT: v_mov_b32_e32 v2, s16 +; GFX6-NEXT: v_mov_b32_e32 v3, s17 +; GFX6-NEXT: s_lshr_b32 s16, s5, 10 ; GFX6-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:288 +; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:48 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v8, s10 -; GFX6-NEXT: v_mov_b32_e32 v9, s11 -; GFX6-NEXT: s_lshr_b32 s10, s4, 11 -; GFX6-NEXT: v_mov_b32_e32 v10, s12 -; GFX6-NEXT: v_mov_b32_e32 v11, s13 -; GFX6-NEXT: s_lshr_b32 s12, s4, 8 +; GFX6-NEXT: v_mov_b32_e32 v8, s12 +; GFX6-NEXT: v_mov_b32_e32 v9, s13 +; GFX6-NEXT: s_lshr_b32 s12, s5, 11 +; GFX6-NEXT: v_mov_b32_e32 v10, s10 +; GFX6-NEXT: v_mov_b32_e32 v11, s11 +; GFX6-NEXT: s_lshr_b32 s10, s5, 8 ; GFX6-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x10000 -; GFX6-NEXT: s_bfe_i64 s[38:39], s[46:47], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:272 +; GFX6-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x10000 +; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:32 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v12, s38 -; GFX6-NEXT: v_mov_b32_e32 v13, s39 -; GFX6-NEXT: s_lshr_b32 s38, s4, 9 +; GFX6-NEXT: v_mov_b32_e32 v12, s6 +; GFX6-NEXT: v_mov_b32_e32 v13, s7 +; GFX6-NEXT: s_lshr_b32 s6, s5, 9 ; GFX6-NEXT: v_mov_b32_e32 v14, s8 ; GFX6-NEXT: v_mov_b32_e32 v15, s9 -; GFX6-NEXT: s_lshr_b32 s8, s4, 6 -; GFX6-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x10000 +; GFX6-NEXT: s_lshr_b32 s8, s5, 6 +; GFX6-NEXT: s_bfe_i64 s[34:35], s[44:45], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:256 +; GFX6-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:16 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, s40 -; GFX6-NEXT: v_mov_b32_e32 v1, s41 -; GFX6-NEXT: s_lshr_b32 s40, s4, 7 -; GFX6-NEXT: v_mov_b32_e32 v2, s44 -; GFX6-NEXT: v_mov_b32_e32 v3, s45 -; GFX6-NEXT: s_lshr_b32 s44, s4, 4 -; GFX6-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x10000 +; GFX6-NEXT: v_mov_b32_e32 v16, s34 +; GFX6-NEXT: v_mov_b32_e32 v17, s35 +; GFX6-NEXT: s_lshr_b32 s34, s5, 7 +; GFX6-NEXT: v_mov_b32_e32 v18, s40 +; GFX6-NEXT: v_mov_b32_e32 v19, s41 +; GFX6-NEXT: s_lshr_b32 s40, s5, 4 ; GFX6-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:240 +; GFX6-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x10000 +; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v16, s36 -; GFX6-NEXT: v_mov_b32_e32 v17, s37 -; GFX6-NEXT: s_lshr_b32 s36, s4, 5 -; GFX6-NEXT: v_mov_b32_e32 v18, s42 -; GFX6-NEXT: v_mov_b32_e32 v19, s43 -; GFX6-NEXT: s_lshr_b32 s42, s4, 2 -; GFX6-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:224 +; GFX6-NEXT: v_mov_b32_e32 v0, s42 +; GFX6-NEXT: v_mov_b32_e32 v1, s43 +; GFX6-NEXT: s_lshr_b32 s42, s5, 5 +; GFX6-NEXT: v_mov_b32_e32 v2, s36 +; GFX6-NEXT: v_mov_b32_e32 v3, s37 +; GFX6-NEXT: s_lshr_b32 s36, s5, 2 +; GFX6-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x10000 +; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:480 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v8, s30 -; GFX6-NEXT: v_mov_b32_e32 v9, s31 -; GFX6-NEXT: s_lshr_b32 s30, s4, 3 -; GFX6-NEXT: s_lshr_b32 s4, s4, 1 -; GFX6-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x10000 -; GFX6-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x10000 -; GFX6-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x10000 -; GFX6-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x10000 +; GFX6-NEXT: v_mov_b32_e32 v8, s38 +; GFX6-NEXT: v_mov_b32_e32 v9, s39 +; GFX6-NEXT: s_lshr_b32 s38, s5, 3 +; GFX6-NEXT: s_lshr_b32 s44, s5, 1 ; GFX6-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x10000 +; GFX6-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x10000 +; GFX6-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x10000 +; GFX6-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x10000 +; GFX6-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x10000 -; GFX6-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x10000 -; GFX6-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x10000 +; GFX6-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x10000 +; GFX6-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x10000 @@ -8589,71 +8590,71 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o ; GFX6-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x10000 ; GFX6-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x10000 -; GFX6-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x10000 -; GFX6-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x10000 -; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:208 -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:192 -; GFX6-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:176 -; GFX6-NEXT: v_mov_b32_e32 v10, s6 -; GFX6-NEXT: v_mov_b32_e32 v11, s7 -; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:160 -; GFX6-NEXT: s_waitcnt expcnt(2) -; GFX6-NEXT: v_mov_b32_e32 v0, s34 -; GFX6-NEXT: v_mov_b32_e32 v1, s35 +; GFX6-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x10000 +; GFX6-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x10000 +; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:464 +; GFX6-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:448 +; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:432 +; GFX6-NEXT: v_mov_b32_e32 v10, s4 +; GFX6-NEXT: v_mov_b32_e32 v11, s5 +; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:416 +; GFX6-NEXT: s_waitcnt expcnt(1) +; GFX6-NEXT: v_mov_b32_e32 v0, s30 +; GFX6-NEXT: v_mov_b32_e32 v1, s31 ; GFX6-NEXT: v_mov_b32_e32 v2, s26 ; GFX6-NEXT: v_mov_b32_e32 v3, s27 -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:144 +; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:400 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, s28 -; GFX6-NEXT: v_mov_b32_e32 v1, s29 -; GFX6-NEXT: v_mov_b32_e32 v2, s22 -; GFX6-NEXT: v_mov_b32_e32 v3, s23 -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:128 +; GFX6-NEXT: v_mov_b32_e32 v0, s22 +; GFX6-NEXT: v_mov_b32_e32 v1, s23 +; GFX6-NEXT: v_mov_b32_e32 v2, s28 +; GFX6-NEXT: v_mov_b32_e32 v3, s29 +; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:384 ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, s24 ; GFX6-NEXT: v_mov_b32_e32 v1, s25 ; GFX6-NEXT: v_mov_b32_e32 v2, s18 ; GFX6-NEXT: v_mov_b32_e32 v3, s19 -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112 +; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:368 ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, s20 ; GFX6-NEXT: v_mov_b32_e32 v1, s21 ; GFX6-NEXT: v_mov_b32_e32 v2, s14 ; GFX6-NEXT: v_mov_b32_e32 v3, s15 -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96 +; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:352 ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, s16 ; GFX6-NEXT: v_mov_b32_e32 v1, s17 -; GFX6-NEXT: v_mov_b32_e32 v2, s10 -; GFX6-NEXT: v_mov_b32_e32 v3, s11 -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80 +; GFX6-NEXT: v_mov_b32_e32 v2, s12 +; GFX6-NEXT: v_mov_b32_e32 v3, s13 +; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:336 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, s12 -; GFX6-NEXT: v_mov_b32_e32 v1, s13 -; GFX6-NEXT: v_mov_b32_e32 v2, s38 -; GFX6-NEXT: v_mov_b32_e32 v3, s39 -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64 +; GFX6-NEXT: v_mov_b32_e32 v0, s10 +; GFX6-NEXT: v_mov_b32_e32 v1, s11 +; GFX6-NEXT: v_mov_b32_e32 v2, s6 +; GFX6-NEXT: v_mov_b32_e32 v3, s7 +; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:320 ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, s8 ; GFX6-NEXT: v_mov_b32_e32 v1, s9 -; GFX6-NEXT: v_mov_b32_e32 v2, s40 -; GFX6-NEXT: v_mov_b32_e32 v3, s41 -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48 +; GFX6-NEXT: v_mov_b32_e32 v2, s34 +; GFX6-NEXT: v_mov_b32_e32 v3, s35 +; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:304 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, s44 -; GFX6-NEXT: v_mov_b32_e32 v1, s45 -; GFX6-NEXT: v_mov_b32_e32 v2, s36 -; GFX6-NEXT: v_mov_b32_e32 v3, s37 -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32 +; GFX6-NEXT: v_mov_b32_e32 v0, s40 +; GFX6-NEXT: v_mov_b32_e32 v1, s41 +; GFX6-NEXT: v_mov_b32_e32 v2, s42 +; GFX6-NEXT: v_mov_b32_e32 v3, s43 +; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:288 ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, s42 -; GFX6-NEXT: v_mov_b32_e32 v1, s43 -; GFX6-NEXT: v_mov_b32_e32 v2, s30 -; GFX6-NEXT: v_mov_b32_e32 v3, s31 -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 -; GFX6-NEXT: v_mov_b32_e32 v8, s4 -; GFX6-NEXT: v_mov_b32_e32 v9, s5 -; GFX6-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 +; GFX6-NEXT: v_mov_b32_e32 v0, s36 +; GFX6-NEXT: v_mov_b32_e32 v1, s37 +; GFX6-NEXT: v_mov_b32_e32 v2, s38 +; GFX6-NEXT: v_mov_b32_e32 v3, s39 +; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:272 +; GFX6-NEXT: v_mov_b32_e32 v8, s44 +; GFX6-NEXT: v_mov_b32_e32 v9, s45 +; GFX6-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:256 ; GFX6-NEXT: s_endpgm ; ; GFX8-LABEL: constant_sextload_v64i1_to_v64i64: diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll index 4491c4b..a135b43 100644 --- a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll +++ b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll @@ -232,32 +232,38 @@ define amdgpu_kernel void @constant_load_v3i16(ptr addrspace(1) %out, ptr addrsp ; ; EG-LABEL: constant_load_v3i16: ; EG: ; %bb.0: ; %entry -; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[] -; EG-NEXT: TEX 1 @6 -; EG-NEXT: ALU 14, @11, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T3.X, 0 -; EG-NEXT: MEM_RAT MSKOR T2.XW, T0.X +; EG-NEXT: ALU 0, @12, KC0[CB0:0-32], KC1[] +; EG-NEXT: TEX 2 @6 +; EG-NEXT: ALU 19, @13, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T6.X, T7.X, 0 +; EG-NEXT: MEM_RAT MSKOR T5.XW, T8.X ; EG-NEXT: CF_END ; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_16 T1.X, T0.X, 0, #1 -; EG-NEXT: VTX_READ_16 T0.X, T0.X, 4, #1 -; EG-NEXT: ALU clause starting at 10: -; EG-NEXT: MOV * T0.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 11: +; EG-NEXT: VTX_READ_16 T6.X, T5.X, 0, #1 +; EG-NEXT: VTX_READ_16 T7.X, T5.X, 2, #1 +; EG-NEXT: VTX_READ_16 T5.X, T5.X, 4, #1 +; EG-NEXT: ALU clause starting at 12: +; EG-NEXT: MOV * T5.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 13: ; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x, ; EG-NEXT: 4(5.605194e-45), 0(0.000000e+00) ; EG-NEXT: AND_INT T1.W, PV.W, literal.x, -; EG-NEXT: AND_INT * T2.W, T0.X, literal.y, +; EG-NEXT: AND_INT * T2.W, T5.X, literal.y, ; EG-NEXT: 3(4.203895e-45), 65535(9.183409e-41) ; EG-NEXT: LSHL * T1.W, PV.W, literal.x, ; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00) -; EG-NEXT: LSHL T2.X, T2.W, PV.W, -; EG-NEXT: LSHL * T2.W, literal.x, PV.W, +; EG-NEXT: LSHL T5.X, T2.W, PV.W, +; EG-NEXT: LSHL * T5.W, literal.x, PV.W, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: MOV T5.Y, 0.0, +; EG-NEXT: MOV * T5.Z, 0.0, +; EG-NEXT: LSHR T8.X, T0.W, literal.x, +; EG-NEXT: LSHL T0.W, T7.X, literal.y, +; EG-NEXT: AND_INT * T1.W, T6.X, literal.z, +; EG-NEXT: 2(2.802597e-45), 16(2.242078e-44) ; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; EG-NEXT: MOV T2.Y, 0.0, -; EG-NEXT: MOV * T2.Z, 0.0, -; EG-NEXT: LSHR T0.X, T0.W, literal.x, -; EG-NEXT: LSHR * T3.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT T6.X, PV.W, PS, +; EG-NEXT: LSHR * T7.X, KC0[2].Y, literal.x, ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) ; ; GFX12-LABEL: constant_load_v3i16: @@ -1643,15 +1649,15 @@ define amdgpu_kernel void @constant_sextload_v4i16_to_v4i32(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s3, 0xf000 ; GCN-NOHSA-SI-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s6, s4, 16 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s7, s5, 16 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s6, s5, 16 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s7, s4, 16 ; GCN-NOHSA-SI-NEXT: s_sext_i32_i16 s5, s5 ; GCN-NOHSA-SI-NEXT: s_sext_i32_i16 s4, s4 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s2, -1 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s4 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s6 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s7 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s5 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s7 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s6 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 ; GCN-NOHSA-SI-NEXT: s_endpgm ; @@ -1666,14 +1672,14 @@ define amdgpu_kernel void @constant_sextload_v4i16_to_v4i32(ptr addrspace(1) %ou ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s0 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s1 ; GCN-HSA-NEXT: s_waitcnt lgkmcnt(0) -; GCN-HSA-NEXT: s_ashr_i32 s0, s2, 16 -; GCN-HSA-NEXT: s_ashr_i32 s1, s3, 16 +; GCN-HSA-NEXT: s_ashr_i32 s0, s3, 16 +; GCN-HSA-NEXT: s_ashr_i32 s1, s2, 16 ; GCN-HSA-NEXT: s_sext_i32_i16 s3, s3 ; GCN-HSA-NEXT: s_sext_i32_i16 s2, s2 ; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s0 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s1 ; GCN-HSA-NEXT: v_mov_b32_e32 v2, s3 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s1 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s0 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: s_endpgm ; @@ -6539,33 +6545,33 @@ define amdgpu_kernel void @constant_sextload_v8i16_to_v8i64(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: s_mov_b32 s2, -1 ; GCN-NOHSA-SI-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NOHSA-SI-NEXT: s_mov_b32 s8, s7 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s10, s5 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s12, s6, 16 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s10, s6, 16 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s12, s5 ; GCN-NOHSA-SI-NEXT: s_lshr_b32 s14, s4, 16 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[4:5], 0x100000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[6:7], 0x100000 ; GCN-NOHSA-SI-NEXT: s_ashr_i32 s13, s5, 31 ; GCN-NOHSA-SI-NEXT: s_ashr_i32 s15, s5, 16 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[10:11], 0x100000 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s20, s7, 31 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s21, s7, 16 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[12:13], 0x100000 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s12, s7, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s20, s7, 16 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[8:9], 0x100000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[14:15], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[12:13], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x100000 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s6 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s7 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s21 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s20 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s20 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s12 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s4 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s5 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s15 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s13 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 -; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s18 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s19 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s4 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s5 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s15 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s13 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:16 +; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s16 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s17 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s10 @@ -6586,8 +6592,8 @@ define amdgpu_kernel void @constant_sextload_v8i16_to_v8i64(ptr addrspace(1) %ou ; GCN-HSA-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x0 ; GCN-HSA-NEXT: s_waitcnt lgkmcnt(0) ; GCN-HSA-NEXT: s_mov_b32 s2, s7 -; GCN-HSA-NEXT: s_mov_b32 s8, s5 -; GCN-HSA-NEXT: s_lshr_b32 s10, s6, 16 +; GCN-HSA-NEXT: s_lshr_b32 s8, s6, 16 +; GCN-HSA-NEXT: s_mov_b32 s10, s5 ; GCN-HSA-NEXT: s_lshr_b32 s12, s4, 16 ; GCN-HSA-NEXT: s_ashr_i32 s13, s5, 16 ; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[4:5], 0x100000 @@ -6605,25 +6611,25 @@ define amdgpu_kernel void @constant_sextload_v8i16_to_v8i64(ptr addrspace(1) %ou ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 16 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 32 ; GCN-HSA-NEXT: v_mov_b32_e32 v2, s7 ; GCN-HSA-NEXT: v_mov_b32_e32 v3, s12 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 32 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s8 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s9 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s13 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s6 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 16 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s16 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s17 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s8 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s9 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s16 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s17 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s10 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s11 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s10 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s11 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s13 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s6 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s1 @@ -7161,12 +7167,12 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) % ; GCN-NOHSA-SI-NEXT: s_mov_b32 s10, -1 ; GCN-NOHSA-SI-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NOHSA-SI-NEXT: s_mov_b32 s12, s7 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s14, s5 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s16, s3 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s18, s1 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s20, s6, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s22, s4, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s24, s2, 16 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s14, s6, 16 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s16, s5 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s18, s4, 16 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s20, s3 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s22, s2, 16 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s24, s1 ; GCN-NOHSA-SI-NEXT: s_lshr_b32 s26, s0, 16 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[28:29], s[0:1], 0x100000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[30:31], s[2:3], 0x100000 @@ -7174,60 +7180,60 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) % ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[6:7], 0x100000 ; GCN-NOHSA-SI-NEXT: s_ashr_i32 s21, s1, 31 ; GCN-NOHSA-SI-NEXT: s_ashr_i32 s23, s1, 16 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[0:1], s[18:19], 0x100000 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s25, s3, 31 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s27, s3, 16 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[2:3], s[16:17], 0x100000 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s5, 31 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s38, s5, 16 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[14:15], 0x100000 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s39, s7, 31 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s40, s7, 16 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[0:1], s[24:25], 0x100000 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s24, s3, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s25, s3, 16 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[2:3], s[20:21], 0x100000 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s20, s5, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s27, s5, 16 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[16:17], 0x100000 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s7, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s38, s7, 16 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[12:13], 0x100000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[26:27], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[24:25], 0x100000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[22:23], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[20:21], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x100000 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s6 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s7 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s40 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s39 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:112 -; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s4 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s5 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s38 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s33 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:80 -; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s2 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s3 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s27 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s25 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:48 -; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s1 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s23 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s21 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:16 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:112 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s36 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s37 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s4 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s5 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s27 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s20 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[8:11], 0 offset:80 +; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s34 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s35 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s2 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s3 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s25 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s24 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[8:11], 0 offset:48 +; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s30 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s31 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s0 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s1 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s23 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s21 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[8:11], 0 offset:16 +; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s28 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s29 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s18 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s19 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s14 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s15 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:96 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s16 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s17 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s18 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s19 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[8:11], 0 offset:64 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s14 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s15 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s16 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s17 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[8:11], 0 offset:32 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s12 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s13 @@ -7243,19 +7249,19 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) % ; GCN-HSA-NEXT: s_waitcnt lgkmcnt(0) ; GCN-HSA-NEXT: s_load_dwordx8 s[0:7], s[10:11], 0x0 ; GCN-HSA-NEXT: s_waitcnt lgkmcnt(0) -; GCN-HSA-NEXT: s_mov_b32 s12, s7 +; GCN-HSA-NEXT: s_mov_b32 s10, s7 +; GCN-HSA-NEXT: s_lshr_b32 s12, s6, 16 ; GCN-HSA-NEXT: s_mov_b32 s14, s5 -; GCN-HSA-NEXT: s_mov_b32 s16, s3 -; GCN-HSA-NEXT: s_mov_b32 s18, s1 -; GCN-HSA-NEXT: s_ashr_i32 s27, s1, 31 +; GCN-HSA-NEXT: s_lshr_b32 s16, s4, 16 +; GCN-HSA-NEXT: s_ashr_i32 s25, s1, 31 ; GCN-HSA-NEXT: s_ashr_i32 s29, s3, 31 ; GCN-HSA-NEXT: s_ashr_i32 s30, s3, 16 -; GCN-HSA-NEXT: s_lshr_b32 s20, s6, 16 -; GCN-HSA-NEXT: s_lshr_b32 s22, s4, 16 -; GCN-HSA-NEXT: s_lshr_b32 s24, s2, 16 -; GCN-HSA-NEXT: s_lshr_b32 s26, s0, 16 -; GCN-HSA-NEXT: s_bfe_i64 s[10:11], s[2:3], 0x100000 -; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[12:13], 0x100000 +; GCN-HSA-NEXT: s_mov_b32 s18, s3 +; GCN-HSA-NEXT: s_lshr_b32 s20, s2, 16 +; GCN-HSA-NEXT: s_mov_b32 s22, s1 +; GCN-HSA-NEXT: s_lshr_b32 s24, s0, 16 +; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[2:3], 0x100000 +; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[10:11], 0x100000 ; GCN-HSA-NEXT: s_ashr_i32 s28, s1, 16 ; GCN-HSA-NEXT: s_ashr_i32 s31, s5, 31 ; GCN-HSA-NEXT: s_ashr_i32 s33, s5, 16 @@ -7266,55 +7272,36 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) % ; GCN-HSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x100000 ; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2 ; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3 -; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[26:27], 0x100000 -; GCN-HSA-NEXT: s_bfe_i64 s[12:13], s[24:25], 0x100000 -; GCN-HSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x100000 +; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[24:25], 0x100000 +; GCN-HSA-NEXT: s_bfe_i64 s[10:11], s[22:23], 0x100000 ; GCN-HSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x100000 ; GCN-HSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x100000 ; GCN-HSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x100000 ; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x100000 -; GCN-HSA-NEXT: s_add_u32 s24, s8, 0x70 -; GCN-HSA-NEXT: s_addc_u32 s25, s9, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s14 -; GCN-HSA-NEXT: s_add_u32 s14, s8, 0x50 -; GCN-HSA-NEXT: v_mov_b32_e32 v8, s24 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s15 -; GCN-HSA-NEXT: s_addc_u32 s15, s9, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v10, s14 -; GCN-HSA-NEXT: v_mov_b32_e32 v9, s25 -; GCN-HSA-NEXT: v_mov_b32_e32 v11, s15 +; GCN-HSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x100000 +; GCN-HSA-NEXT: s_add_u32 s22, s8, 0x70 +; GCN-HSA-NEXT: s_addc_u32 s23, s9, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s6 +; GCN-HSA-NEXT: s_add_u32 s6, s8, 0x60 +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s7 +; GCN-HSA-NEXT: s_addc_u32 s7, s9, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v8, s22 +; GCN-HSA-NEXT: v_mov_b32_e32 v11, s7 +; GCN-HSA-NEXT: v_mov_b32_e32 v9, s23 +; GCN-HSA-NEXT: v_mov_b32_e32 v10, s6 ; GCN-HSA-NEXT: v_mov_b32_e32 v2, s35 ; GCN-HSA-NEXT: v_mov_b32_e32 v3, s34 -; GCN-HSA-NEXT: s_add_u32 s14, s8, 48 -; GCN-HSA-NEXT: v_mov_b32_e32 v6, s33 -; GCN-HSA-NEXT: v_mov_b32_e32 v7, s31 +; GCN-HSA-NEXT: s_add_u32 s6, s8, 0x50 +; GCN-HSA-NEXT: v_mov_b32_e32 v6, s12 +; GCN-HSA-NEXT: v_mov_b32_e32 v7, s13 ; GCN-HSA-NEXT: flat_store_dwordx4 v[8:9], v[0:3] ; GCN-HSA-NEXT: flat_store_dwordx4 v[10:11], v[4:7] -; GCN-HSA-NEXT: s_addc_u32 s15, s9, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s14 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s16 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s17 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s30 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s29 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s15 -; GCN-HSA-NEXT: s_add_u32 s14, s8, 16 -; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GCN-HSA-NEXT: s_addc_u32 s15, s9, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s14 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s18 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s19 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s28 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s27 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s15 -; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GCN-HSA-NEXT: s_nop 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s6 -; GCN-HSA-NEXT: s_add_u32 s6, s8, 0x60 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s7 ; GCN-HSA-NEXT: s_addc_u32 s7, s9, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s6 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s20 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s21 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s14 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s15 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s33 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s31 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s7 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: s_nop 0 @@ -7323,17 +7310,35 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) % ; GCN-HSA-NEXT: v_mov_b32_e32 v1, s5 ; GCN-HSA-NEXT: s_addc_u32 s5, s9, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s4 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s22 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s23 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s16 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s17 +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s5 +; GCN-HSA-NEXT: s_add_u32 s4, s8, 48 +; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GCN-HSA-NEXT: s_addc_u32 s5, s9, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s4 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s18 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s19 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s30 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s29 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s5 ; GCN-HSA-NEXT: s_add_u32 s4, s8, 32 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: s_addc_u32 s5, s9, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s4 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s26 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s27 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s20 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s21 +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s5 +; GCN-HSA-NEXT: s_add_u32 s4, s8, 16 +; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GCN-HSA-NEXT: s_addc_u32 s5, s9, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s4 ; GCN-HSA-NEXT: v_mov_b32_e32 v0, s10 ; GCN-HSA-NEXT: v_mov_b32_e32 v1, s11 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s12 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s13 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s28 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s25 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s5 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s8 @@ -8307,148 +8312,151 @@ define amdgpu_kernel void @constant_sextload_v32i16_to_v32i64(ptr addrspace(1) % ; GCN-NOHSA-SI-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NOHSA-SI-NEXT: s_mov_b32 s18, s15 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s20, s13 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s24, s11 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s26, s9 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s22, s7 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s1, 31 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s39, s1, 16 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s41, s3, 31 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s43, s3, 16 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s23, s5, 31 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s27, s5, 16 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s29, s7, 31 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s31, s7, 16 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s35, s9, 31 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s37, s9, 16 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[46:47], s[26:27], 0x100000 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s55, s11, 31 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s57, s11, 16 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s22, s11 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s24, s9 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s23, s1, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s25, s1, 16 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s27, s3, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s29, s3, 16 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s31, s5, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s5, 16 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s35, s7, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s37, s7, 16 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s39, s9, 31 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[48:49], s[24:25], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[50:51], s[20:21], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[52:53], s[18:19], 0x100000 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s58, s13, 31 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s59, s13, 16 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s60, s15, 31 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s61, s15, 16 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s54, s5 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s44, s3 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s56, s1 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s30, s14, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s36, s12, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s34, s10, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s8, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s26, s6, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s38, s4, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s40, s2, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s42, s0, 16 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[50:51], s[22:23], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[52:53], s[20:21], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[54:55], s[18:19], 0x100000 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s43, s9, 16 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s45, s11, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s47, s11, 16 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s41, s13, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s56, s13, 16 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s57, s15, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s58, s15, 16 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s38, s14, 16 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s24, s12, 16 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s22, s10, 16 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s26, s8, 16 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s46, s7 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s6, 16 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s44, s5 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s30, s4, 16 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s42, s3 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s34, s2, 16 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s40, s1 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s36, s0, 16 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[0:1], 0x100000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[20:21], s[2:3], 0x100000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x100000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x100000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[24:25], s[14:15], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[2:3], s[12:13], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[14:15], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x100000 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s0, s16 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s1, s17 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s52 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s53 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s50 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s51 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s48 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s49 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s46 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s47 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s54 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s55 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s12 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s13 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s52 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s53 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s2 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s3 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s50 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s51 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v18, s48 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v19, s49 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s58 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s57 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s56 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s41 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s3, 0xf000 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s2, -1 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[22:23], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[54:55], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[46:47], s[56:57], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x100000 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s14 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s15 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s61 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s60 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s59 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s58 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s57 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s55 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s37 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s35 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v18, s31 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v19, s29 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v20, s16 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:240 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v21, s17 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v22, s27 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:208 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v23, s23 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[42:43], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[40:41], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[22:23], s[38:39], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x100000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x100000 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:176 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:144 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:112 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:80 -; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(5) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s44 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s45 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s43 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s41 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[46:47], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[44:45], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[42:43], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x100000 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s47 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s45 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v20, s43 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v21, s39 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:240 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s46 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s47 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s39 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s33 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s12 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s13 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s37 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s35 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:208 +; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s14 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:176 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s15 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s33 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s31 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:144 +; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(1) +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s16 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s17 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s29 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:80 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[38:39], 0x100000 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s27 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:48 +; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(2) +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s40 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s41 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s25 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s23 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:16 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s24 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s25 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s12 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s13 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s10 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s11 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s8 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s9 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[36:37], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[34:35], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[30:31], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[28:29], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x100000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x100000 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s6 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s7 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v20, s4 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v21, s5 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v24, s20 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v25, s21 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s30 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s31 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s12 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s13 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:224 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s18 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s19 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s36 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s37 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s20 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s21 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s24 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s25 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:192 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s34 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s35 +; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s18 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s19 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s22 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s23 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:160 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s28 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s29 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s26 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s27 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:128 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v18, s26 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v19, s27 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v18, s16 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v19, s17 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:96 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v22, s22 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v23, s23 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v22, s14 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v23, s15 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:64 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v26, s16 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v27, s17 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:32 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s14 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s15 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s10 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s11 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s8 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s9 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 ; GCN-NOHSA-SI-NEXT: s_endpgm ; ; GCN-HSA-LABEL: constant_sextload_v32i16_to_v32i64: @@ -8460,47 +8468,47 @@ define amdgpu_kernel void @constant_sextload_v32i16_to_v32i64(ptr addrspace(1) % ; GCN-HSA-NEXT: s_waitcnt lgkmcnt(0) ; GCN-HSA-NEXT: s_load_dwordx16 s[0:15], s[18:19], 0x0 ; GCN-HSA-NEXT: s_waitcnt lgkmcnt(0) -; GCN-HSA-NEXT: s_mov_b32 s34, s15 -; GCN-HSA-NEXT: s_ashr_i32 s41, s3, 31 -; GCN-HSA-NEXT: s_ashr_i32 s42, s3, 16 -; GCN-HSA-NEXT: s_ashr_i32 s57, s5, 16 -; GCN-HSA-NEXT: s_ashr_i32 s59, s7, 31 -; GCN-HSA-NEXT: s_ashr_i32 s61, s7, 16 -; GCN-HSA-NEXT: s_ashr_i32 s63, s9, 31 -; GCN-HSA-NEXT: s_ashr_i32 s65, s9, 16 -; GCN-HSA-NEXT: s_ashr_i32 s67, s11, 31 -; GCN-HSA-NEXT: s_ashr_i32 s69, s11, 16 -; GCN-HSA-NEXT: s_mov_b32 s44, s13 -; GCN-HSA-NEXT: s_mov_b32 s46, s11 -; GCN-HSA-NEXT: s_mov_b32 s48, s9 -; GCN-HSA-NEXT: s_mov_b32 s50, s7 -; GCN-HSA-NEXT: s_mov_b32 s52, s5 -; GCN-HSA-NEXT: s_mov_b32 s38, s3 -; GCN-HSA-NEXT: s_mov_b32 s36, s1 -; GCN-HSA-NEXT: s_lshr_b32 s54, s14, 16 -; GCN-HSA-NEXT: s_lshr_b32 s56, s12, 16 -; GCN-HSA-NEXT: s_lshr_b32 s58, s10, 16 -; GCN-HSA-NEXT: s_lshr_b32 s60, s8, 16 -; GCN-HSA-NEXT: s_lshr_b32 s62, s6, 16 -; GCN-HSA-NEXT: s_lshr_b32 s64, s4, 16 -; GCN-HSA-NEXT: s_lshr_b32 s66, s2, 16 +; GCN-HSA-NEXT: s_mov_b32 s24, s15 +; GCN-HSA-NEXT: s_ashr_i32 s37, s3, 31 +; GCN-HSA-NEXT: s_ashr_i32 s38, s3, 16 +; GCN-HSA-NEXT: s_ashr_i32 s57, s11, 16 +; GCN-HSA-NEXT: s_ashr_i32 s59, s13, 31 +; GCN-HSA-NEXT: s_ashr_i32 s61, s13, 16 +; GCN-HSA-NEXT: s_ashr_i32 s63, s15, 31 +; GCN-HSA-NEXT: s_ashr_i32 s65, s15, 16 +; GCN-HSA-NEXT: s_lshr_b32 s46, s14, 16 +; GCN-HSA-NEXT: s_mov_b32 s48, s13 +; GCN-HSA-NEXT: s_lshr_b32 s50, s12, 16 +; GCN-HSA-NEXT: s_mov_b32 s52, s11 +; GCN-HSA-NEXT: s_lshr_b32 s34, s10, 16 +; GCN-HSA-NEXT: s_mov_b32 s30, s9 +; GCN-HSA-NEXT: s_lshr_b32 s28, s8, 16 +; GCN-HSA-NEXT: s_mov_b32 s54, s7 +; GCN-HSA-NEXT: s_lshr_b32 s56, s6, 16 +; GCN-HSA-NEXT: s_mov_b32 s58, s5 +; GCN-HSA-NEXT: s_lshr_b32 s60, s4, 16 +; GCN-HSA-NEXT: s_mov_b32 s62, s3 +; GCN-HSA-NEXT: s_lshr_b32 s64, s2, 16 +; GCN-HSA-NEXT: s_mov_b32 s66, s1 ; GCN-HSA-NEXT: s_lshr_b32 s68, s0, 16 ; GCN-HSA-NEXT: s_bfe_i64 s[18:19], s[2:3], 0x100000 -; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[34:35], 0x100000 +; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[24:25], 0x100000 ; GCN-HSA-NEXT: s_ashr_i32 s33, s1, 31 -; GCN-HSA-NEXT: s_ashr_i32 s40, s1, 16 -; GCN-HSA-NEXT: s_ashr_i32 s43, s5, 31 -; GCN-HSA-NEXT: s_ashr_i32 s70, s13, 31 -; GCN-HSA-NEXT: s_ashr_i32 s71, s13, 16 -; GCN-HSA-NEXT: s_ashr_i32 s72, s15, 31 -; GCN-HSA-NEXT: s_ashr_i32 s73, s15, 16 +; GCN-HSA-NEXT: s_ashr_i32 s36, s1, 16 +; GCN-HSA-NEXT: s_ashr_i32 s39, s5, 31 +; GCN-HSA-NEXT: s_ashr_i32 s40, s5, 16 +; GCN-HSA-NEXT: s_ashr_i32 s41, s7, 31 +; GCN-HSA-NEXT: s_ashr_i32 s42, s7, 16 +; GCN-HSA-NEXT: s_ashr_i32 s43, s9, 31 +; GCN-HSA-NEXT: s_ashr_i32 s44, s9, 16 +; GCN-HSA-NEXT: s_ashr_i32 s45, s11, 31 ; GCN-HSA-NEXT: s_bfe_i64 s[0:1], s[0:1], 0x100000 ; GCN-HSA-NEXT: s_bfe_i64 s[20:21], s[4:5], 0x100000 ; GCN-HSA-NEXT: s_bfe_i64 s[22:23], s[6:7], 0x100000 -; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[8:9], 0x100000 -; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[10:11], 0x100000 -; GCN-HSA-NEXT: s_bfe_i64 s[28:29], s[12:13], 0x100000 -; GCN-HSA-NEXT: s_bfe_i64 s[30:31], s[14:15], 0x100000 +; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[8:9], 0x100000 +; GCN-HSA-NEXT: s_bfe_i64 s[70:71], s[10:11], 0x100000 +; GCN-HSA-NEXT: s_bfe_i64 s[72:73], s[12:13], 0x100000 +; GCN-HSA-NEXT: s_bfe_i64 s[74:75], s[14:15], 0x100000 ; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2 ; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3 ; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[68:69], 0x100000 @@ -8510,149 +8518,149 @@ define amdgpu_kernel void @constant_sextload_v32i16_to_v32i64(ptr addrspace(1) % ; GCN-HSA-NEXT: s_bfe_i64 s[10:11], s[60:61], 0x100000 ; GCN-HSA-NEXT: s_bfe_i64 s[12:13], s[58:59], 0x100000 ; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[56:57], 0x100000 -; GCN-HSA-NEXT: s_bfe_i64 s[34:35], s[54:55], 0x100000 -; GCN-HSA-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x100000 -; GCN-HSA-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x100000 +; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[54:55], 0x100000 +; GCN-HSA-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x100000 +; GCN-HSA-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x100000 +; GCN-HSA-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x100000 ; GCN-HSA-NEXT: s_bfe_i64 s[52:53], s[52:53], 0x100000 ; GCN-HSA-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x100000 ; GCN-HSA-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x100000 ; GCN-HSA-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x100000 -; GCN-HSA-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x100000 ; GCN-HSA-NEXT: s_add_u32 s54, s16, 0xf0 ; GCN-HSA-NEXT: s_addc_u32 s55, s17, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s44 -; GCN-HSA-NEXT: s_add_u32 s44, s16, 0xd0 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s45 -; GCN-HSA-NEXT: s_addc_u32 s45, s17, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v23, s44 -; GCN-HSA-NEXT: v_mov_b32_e32 v24, s45 -; GCN-HSA-NEXT: s_add_u32 s44, s16, 0xb0 -; GCN-HSA-NEXT: s_addc_u32 s45, s17, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v25, s44 -; GCN-HSA-NEXT: v_mov_b32_e32 v26, s45 -; GCN-HSA-NEXT: s_add_u32 s44, s16, 0x90 -; GCN-HSA-NEXT: s_addc_u32 s45, s17, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v27, s44 +; GCN-HSA-NEXT: v_mov_b32_e32 v6, s46 +; GCN-HSA-NEXT: s_add_u32 s46, s16, 0xe0 +; GCN-HSA-NEXT: v_mov_b32_e32 v7, s47 +; GCN-HSA-NEXT: s_addc_u32 s47, s17, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v24, s46 +; GCN-HSA-NEXT: v_mov_b32_e32 v25, s47 +; GCN-HSA-NEXT: s_add_u32 s46, s16, 0xd0 +; GCN-HSA-NEXT: s_addc_u32 s47, s17, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v26, s46 +; GCN-HSA-NEXT: v_mov_b32_e32 v27, s47 +; GCN-HSA-NEXT: s_add_u32 s46, s16, 0xc0 +; GCN-HSA-NEXT: s_addc_u32 s47, s17, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v28, s46 ; GCN-HSA-NEXT: v_mov_b32_e32 v18, s54 -; GCN-HSA-NEXT: v_mov_b32_e32 v28, s45 -; GCN-HSA-NEXT: s_add_u32 s44, s16, 0x70 +; GCN-HSA-NEXT: v_mov_b32_e32 v29, s47 +; GCN-HSA-NEXT: s_add_u32 s46, s16, 0xb0 ; GCN-HSA-NEXT: v_mov_b32_e32 v19, s55 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s73 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s72 -; GCN-HSA-NEXT: s_addc_u32 s45, s17, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s65 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s63 +; GCN-HSA-NEXT: s_addc_u32 s47, s17, 0 ; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[0:3] -; GCN-HSA-NEXT: v_mov_b32_e32 v6, s71 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s38 -; GCN-HSA-NEXT: s_add_u32 s38, s16, 0x50 -; GCN-HSA-NEXT: v_mov_b32_e32 v7, s70 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s39 -; GCN-HSA-NEXT: s_addc_u32 s39, s17, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v8, s46 -; GCN-HSA-NEXT: v_mov_b32_e32 v9, s47 -; GCN-HSA-NEXT: v_mov_b32_e32 v10, s69 -; GCN-HSA-NEXT: v_mov_b32_e32 v11, s67 -; GCN-HSA-NEXT: flat_store_dwordx4 v[23:24], v[4:7] -; GCN-HSA-NEXT: flat_store_dwordx4 v[25:26], v[8:11] -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s36 -; GCN-HSA-NEXT: s_add_u32 s36, s16, 48 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s37 -; GCN-HSA-NEXT: s_addc_u32 s37, s17, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v24, s36 -; GCN-HSA-NEXT: v_mov_b32_e32 v25, s37 -; GCN-HSA-NEXT: s_add_u32 s36, s16, 16 -; GCN-HSA-NEXT: s_addc_u32 s37, s17, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v8, s30 -; GCN-HSA-NEXT: s_add_u32 s30, s16, 0xe0 -; GCN-HSA-NEXT: v_mov_b32_e32 v12, s48 -; GCN-HSA-NEXT: v_mov_b32_e32 v13, s49 -; GCN-HSA-NEXT: v_mov_b32_e32 v14, s65 -; GCN-HSA-NEXT: v_mov_b32_e32 v15, s63 -; GCN-HSA-NEXT: v_mov_b32_e32 v9, s31 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s74 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s30 +; GCN-HSA-NEXT: s_add_u32 s30, s16, 0xa0 +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s75 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s31 ; GCN-HSA-NEXT: s_addc_u32 s31, s17, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v29, s44 -; GCN-HSA-NEXT: flat_store_dwordx4 v[27:28], v[12:15] -; GCN-HSA-NEXT: v_mov_b32_e32 v16, s50 +; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[4:7] +; GCN-HSA-NEXT: v_mov_b32_e32 v8, s48 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s26 +; GCN-HSA-NEXT: s_add_u32 s26, s16, 0x90 +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s27 +; GCN-HSA-NEXT: s_addc_u32 s27, s17, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v24, s26 +; GCN-HSA-NEXT: v_mov_b32_e32 v25, s27 +; GCN-HSA-NEXT: s_add_u32 s26, s16, 0x80 +; GCN-HSA-NEXT: v_mov_b32_e32 v9, s49 +; GCN-HSA-NEXT: v_mov_b32_e32 v10, s61 +; GCN-HSA-NEXT: v_mov_b32_e32 v11, s59 +; GCN-HSA-NEXT: s_addc_u32 s27, s17, 0 +; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[8:11] +; GCN-HSA-NEXT: v_mov_b32_e32 v12, s72 +; GCN-HSA-NEXT: v_mov_b32_e32 v8, s24 +; GCN-HSA-NEXT: s_add_u32 s24, s16, 0x70 +; GCN-HSA-NEXT: v_mov_b32_e32 v13, s73 +; GCN-HSA-NEXT: v_mov_b32_e32 v14, s50 +; GCN-HSA-NEXT: v_mov_b32_e32 v15, s51 +; GCN-HSA-NEXT: v_mov_b32_e32 v9, s25 +; GCN-HSA-NEXT: s_addc_u32 s25, s17, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v30, s46 +; GCN-HSA-NEXT: flat_store_dwordx4 v[28:29], v[12:15] +; GCN-HSA-NEXT: v_mov_b32_e32 v16, s52 ; GCN-HSA-NEXT: v_mov_b32_e32 v14, s14 -; GCN-HSA-NEXT: s_add_u32 s14, s16, 0xc0 -; GCN-HSA-NEXT: v_mov_b32_e32 v17, s51 -; GCN-HSA-NEXT: v_mov_b32_e32 v30, s45 -; GCN-HSA-NEXT: v_mov_b32_e32 v18, s61 -; GCN-HSA-NEXT: v_mov_b32_e32 v19, s59 -; GCN-HSA-NEXT: v_mov_b32_e32 v10, s38 +; GCN-HSA-NEXT: s_add_u32 s14, s16, 0x60 +; GCN-HSA-NEXT: v_mov_b32_e32 v17, s53 +; GCN-HSA-NEXT: v_mov_b32_e32 v31, s47 +; GCN-HSA-NEXT: v_mov_b32_e32 v18, s57 +; GCN-HSA-NEXT: v_mov_b32_e32 v19, s45 +; GCN-HSA-NEXT: v_mov_b32_e32 v10, s30 ; GCN-HSA-NEXT: v_mov_b32_e32 v15, s15 ; GCN-HSA-NEXT: s_addc_u32 s15, s17, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v20, s52 -; GCN-HSA-NEXT: v_mov_b32_e32 v21, s53 -; GCN-HSA-NEXT: v_mov_b32_e32 v22, s57 -; GCN-HSA-NEXT: v_mov_b32_e32 v23, s43 -; GCN-HSA-NEXT: v_mov_b32_e32 v11, s39 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s42 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s41 -; GCN-HSA-NEXT: v_mov_b32_e32 v26, s36 -; GCN-HSA-NEXT: flat_store_dwordx4 v[29:30], v[16:19] -; GCN-HSA-NEXT: v_mov_b32_e32 v27, s37 -; GCN-HSA-NEXT: v_mov_b32_e32 v16, s30 +; GCN-HSA-NEXT: v_mov_b32_e32 v20, s70 +; GCN-HSA-NEXT: v_mov_b32_e32 v21, s71 +; GCN-HSA-NEXT: v_mov_b32_e32 v22, s34 +; GCN-HSA-NEXT: v_mov_b32_e32 v23, s35 +; GCN-HSA-NEXT: v_mov_b32_e32 v11, s31 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s44 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s43 +; GCN-HSA-NEXT: v_mov_b32_e32 v26, s26 +; GCN-HSA-NEXT: flat_store_dwordx4 v[30:31], v[16:19] +; GCN-HSA-NEXT: v_mov_b32_e32 v27, s27 +; GCN-HSA-NEXT: v_mov_b32_e32 v16, s24 ; GCN-HSA-NEXT: v_mov_b32_e32 v19, s15 -; GCN-HSA-NEXT: v_mov_b32_e32 v6, s40 -; GCN-HSA-NEXT: v_mov_b32_e32 v7, s33 +; GCN-HSA-NEXT: v_mov_b32_e32 v6, s28 +; GCN-HSA-NEXT: v_mov_b32_e32 v7, s29 ; GCN-HSA-NEXT: flat_store_dwordx4 v[10:11], v[20:23] -; GCN-HSA-NEXT: v_mov_b32_e32 v12, s28 -; GCN-HSA-NEXT: v_mov_b32_e32 v10, s34 -; GCN-HSA-NEXT: v_mov_b32_e32 v11, s35 -; GCN-HSA-NEXT: v_mov_b32_e32 v13, s29 -; GCN-HSA-NEXT: v_mov_b32_e32 v17, s31 +; GCN-HSA-NEXT: v_mov_b32_e32 v12, s22 +; GCN-HSA-NEXT: v_mov_b32_e32 v10, s42 +; GCN-HSA-NEXT: v_mov_b32_e32 v11, s41 +; GCN-HSA-NEXT: v_mov_b32_e32 v13, s23 +; GCN-HSA-NEXT: v_mov_b32_e32 v17, s25 ; GCN-HSA-NEXT: v_mov_b32_e32 v18, s14 ; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[0:3] ; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[4:7] ; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[8:11] ; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[12:15] -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s12 -; GCN-HSA-NEXT: s_add_u32 s12, s16, 0xa0 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s13 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s12 +; GCN-HSA-NEXT: s_add_u32 s12, s16, 0x50 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s13 ; GCN-HSA-NEXT: s_addc_u32 s13, s17, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s12 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s26 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s27 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s40 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s39 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s13 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: s_nop 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v2, s10 -; GCN-HSA-NEXT: s_add_u32 s10, s16, 0x80 +; GCN-HSA-NEXT: s_add_u32 s10, s16, 64 ; GCN-HSA-NEXT: v_mov_b32_e32 v3, s11 ; GCN-HSA-NEXT: s_addc_u32 s11, s17, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s10 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s24 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s25 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s20 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s21 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s11 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: s_nop 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s8 -; GCN-HSA-NEXT: s_add_u32 s8, s16, 0x60 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s9 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s8 +; GCN-HSA-NEXT: s_add_u32 s8, s16, 48 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s9 ; GCN-HSA-NEXT: s_addc_u32 s9, s17, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s8 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s22 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s23 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s38 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s37 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s9 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: s_nop 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v2, s6 -; GCN-HSA-NEXT: s_add_u32 s6, s16, 64 +; GCN-HSA-NEXT: s_add_u32 s6, s16, 32 ; GCN-HSA-NEXT: v_mov_b32_e32 v3, s7 ; GCN-HSA-NEXT: s_addc_u32 s7, s17, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s6 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s20 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s21 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s18 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s19 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s7 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: s_nop 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s4 -; GCN-HSA-NEXT: s_add_u32 s4, s16, 32 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s5 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s4 +; GCN-HSA-NEXT: s_add_u32 s4, s16, 16 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s5 ; GCN-HSA-NEXT: s_addc_u32 s5, s17, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s4 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s18 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s19 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s36 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s33 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s5 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s16 diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll index b39b38a..b534c2c 100644 --- a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll +++ b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll @@ -6398,41 +6398,41 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out ; GFX6-NOHSA-NEXT: s_mov_b32 s2, -1 ; GFX6-NOHSA-NEXT: s_waitcnt lgkmcnt(0) ; GFX6-NOHSA-NEXT: s_lshr_b32 s6, s5, 16 -; GFX6-NOHSA-NEXT: s_lshr_b32 s8, s5, 8 -; GFX6-NOHSA-NEXT: s_mov_b32 s10, s5 -; GFX6-NOHSA-NEXT: s_lshr_b32 s12, s4, 16 -; GFX6-NOHSA-NEXT: s_lshr_b32 s14, s4, 24 -; GFX6-NOHSA-NEXT: s_lshr_b32 s16, s4, 8 +; GFX6-NOHSA-NEXT: s_lshr_b32 s8, s4, 16 +; GFX6-NOHSA-NEXT: s_lshr_b32 s10, s4, 24 +; GFX6-NOHSA-NEXT: s_lshr_b32 s12, s4, 8 +; GFX6-NOHSA-NEXT: s_lshr_b32 s14, s5, 8 +; GFX6-NOHSA-NEXT: s_mov_b32 s16, s5 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[18:19], s[4:5], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 -; GFX6-NOHSA-NEXT: s_ashr_i32 s17, s5, 31 +; GFX6-NOHSA-NEXT: s_ashr_i32 s15, s5, 31 ; GFX6-NOHSA-NEXT: s_ashr_i32 s20, s5, 24 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[4:5], s[16:17], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[4:5], s[14:15], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000 ; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s20 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s17 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s10 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s11 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s18 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s19 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s15 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s18 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s19 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s16 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s17 ; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s6 ; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s7 ; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s8 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s9 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:32 -; GFX6-NOHSA-NEXT: s_waitcnt expcnt(1) -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s12 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s13 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s14 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s15 +; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0) +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s8 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s9 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s10 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s11 ; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s12 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s13 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 ; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s4 ; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s5 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32 ; GFX6-NOHSA-NEXT: s_endpgm ; ; GFX7-HSA-LABEL: constant_sextload_v8i8_to_v8i64: @@ -6445,11 +6445,11 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out ; GFX7-HSA-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0 ; GFX7-HSA-NEXT: s_waitcnt lgkmcnt(0) ; GFX7-HSA-NEXT: s_lshr_b32 s4, s3, 16 -; GFX7-HSA-NEXT: s_lshr_b32 s6, s3, 8 -; GFX7-HSA-NEXT: s_mov_b32 s8, s3 -; GFX7-HSA-NEXT: s_lshr_b32 s10, s2, 16 -; GFX7-HSA-NEXT: s_lshr_b32 s12, s2, 24 -; GFX7-HSA-NEXT: s_lshr_b32 s14, s2, 8 +; GFX7-HSA-NEXT: s_lshr_b32 s6, s2, 16 +; GFX7-HSA-NEXT: s_lshr_b32 s8, s2, 24 +; GFX7-HSA-NEXT: s_lshr_b32 s10, s2, 8 +; GFX7-HSA-NEXT: s_lshr_b32 s12, s3, 8 +; GFX7-HSA-NEXT: s_mov_b32 s14, s3 ; GFX7-HSA-NEXT: s_ashr_i32 s5, s3, 31 ; GFX7-HSA-NEXT: s_bfe_i64 s[16:17], s[2:3], 0x80000 ; GFX7-HSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 @@ -6465,32 +6465,32 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out ; GFX7-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s3 ; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s2 -; GFX7-HSA-NEXT: s_add_u32 s2, s0, 32 +; GFX7-HSA-NEXT: s_add_u32 s2, s0, 16 ; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s18 ; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s5 ; GFX7-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s3 -; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s2 -; GFX7-HSA-NEXT: s_add_u32 s2, s0, 16 -; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s8 -; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s9 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s6 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s7 -; GFX7-HSA-NEXT: s_addc_u32 s3, s1, 0 -; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s3 -; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s10 -; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s11 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s12 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s13 +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s6 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s7 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s8 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s9 ; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s2 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s1 +; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s0 +; GFX7-HSA-NEXT: s_add_u32 s0, s0, 32 ; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s16 ; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s17 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s14 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s15 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s10 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s11 +; GFX7-HSA-NEXT: s_addc_u32 s1, s1, 0 +; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s1 +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s14 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s15 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s12 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s13 ; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s0 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX7-HSA-NEXT: s_endpgm @@ -6502,11 +6502,11 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out ; GFX8-NOHSA-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0 ; GFX8-NOHSA-NEXT: s_waitcnt lgkmcnt(0) ; GFX8-NOHSA-NEXT: s_lshr_b32 s4, s3, 16 -; GFX8-NOHSA-NEXT: s_lshr_b32 s6, s3, 8 -; GFX8-NOHSA-NEXT: s_mov_b32 s8, s3 -; GFX8-NOHSA-NEXT: s_lshr_b32 s10, s2, 16 -; GFX8-NOHSA-NEXT: s_lshr_b32 s12, s2, 24 -; GFX8-NOHSA-NEXT: s_lshr_b32 s14, s2, 8 +; GFX8-NOHSA-NEXT: s_lshr_b32 s6, s2, 16 +; GFX8-NOHSA-NEXT: s_lshr_b32 s8, s2, 24 +; GFX8-NOHSA-NEXT: s_lshr_b32 s10, s2, 8 +; GFX8-NOHSA-NEXT: s_lshr_b32 s12, s3, 8 +; GFX8-NOHSA-NEXT: s_mov_b32 s14, s3 ; GFX8-NOHSA-NEXT: s_ashr_i32 s5, s3, 31 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[16:17], s[2:3], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 @@ -6522,32 +6522,32 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out ; GFX8-NOHSA-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s3 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s2 -; GFX8-NOHSA-NEXT: s_add_u32 s2, s0, 32 +; GFX8-NOHSA-NEXT: s_add_u32 s2, s0, 16 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s18 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s5 ; GFX8-NOHSA-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s3 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s2 -; GFX8-NOHSA-NEXT: s_add_u32 s2, s0, 16 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s8 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s9 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s6 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s7 -; GFX8-NOHSA-NEXT: s_addc_u32 s3, s1, 0 -; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s3 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s10 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s11 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s12 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s13 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s6 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s7 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s8 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s9 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s2 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s1 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0 +; GFX8-NOHSA-NEXT: s_add_u32 s0, s0, 32 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s16 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s17 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s14 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s15 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s10 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s11 +; GFX8-NOHSA-NEXT: s_addc_u32 s1, s1, 0 +; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s1 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s14 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s15 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s12 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s13 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NOHSA-NEXT: s_endpgm @@ -6615,34 +6615,34 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out ; GFX12-NEXT: s_load_b64 s[2:3], s[2:3], 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: s_lshr_b32 s4, s3, 16 -; GFX12-NEXT: s_lshr_b32 s6, s3, 8 -; GFX12-NEXT: s_mov_b32 s8, s3 -; GFX12-NEXT: s_lshr_b32 s10, s2, 16 -; GFX12-NEXT: s_lshr_b32 s12, s2, 24 +; GFX12-NEXT: s_lshr_b32 s6, s2, 16 +; GFX12-NEXT: s_lshr_b32 s8, s2, 24 +; GFX12-NEXT: s_lshr_b32 s10, s2, 8 +; GFX12-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000 +; GFX12-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000 +; GFX12-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000 +; GFX12-NEXT: s_lshr_b32 s12, s3, 8 +; GFX12-NEXT: s_mov_b32 s14, s3 ; GFX12-NEXT: s_bfe_i64 s[16:17], s[2:3], 0x80000 ; GFX12-NEXT: s_ashr_i32 s15, s3, 31 ; GFX12-NEXT: s_ashr_i32 s18, s3, 24 -; GFX12-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000 -; GFX12-NEXT: s_lshr_b32 s14, s2, 8 -; GFX12-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000 -; GFX12-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000 +; GFX12-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 ; GFX12-NEXT: v_dual_mov_b32 v16, 0 :: v_dual_mov_b32 v3, s15 +; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v9, s7 +; GFX12-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v11, s9 +; GFX12-NEXT: v_dual_mov_b32 v10, s8 :: v_dual_mov_b32 v7, s11 +; GFX12-NEXT: s_bfe_i64 s[2:3], s[14:15], 0x80000 ; GFX12-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 -; GFX12-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 ; GFX12-NEXT: v_dual_mov_b32 v2, s18 :: v_dual_mov_b32 v5, s17 ; GFX12-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v1, s5 -; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v9, s9 -; GFX12-NEXT: s_bfe_i64 s[2:3], s[14:15], 0x80000 -; GFX12-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v11, s7 -; GFX12-NEXT: v_dual_mov_b32 v10, s6 :: v_dual_mov_b32 v13, s11 -; GFX12-NEXT: v_dual_mov_b32 v12, s10 :: v_dual_mov_b32 v15, s13 -; GFX12-NEXT: v_dual_mov_b32 v14, s12 :: v_dual_mov_b32 v7, s3 -; GFX12-NEXT: v_mov_b32_e32 v6, s2 +; GFX12-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v13, s3 +; GFX12-NEXT: v_dual_mov_b32 v12, s2 :: v_dual_mov_b32 v15, s13 +; GFX12-NEXT: v_mov_b32_e32 v14, s12 ; GFX12-NEXT: s_clause 0x3 -; GFX12-NEXT: global_store_b128 v16, v[0:3], s[0:1] offset:48 -; GFX12-NEXT: global_store_b128 v16, v[8:11], s[0:1] offset:32 -; GFX12-NEXT: global_store_b128 v16, v[12:15], s[0:1] offset:16 +; GFX12-NEXT: global_store_b128 v16, v[8:11], s[0:1] offset:16 ; GFX12-NEXT: global_store_b128 v16, v[4:7], s[0:1] +; GFX12-NEXT: global_store_b128 v16, v[0:3], s[0:1] offset:48 +; GFX12-NEXT: global_store_b128 v16, v[12:15], s[0:1] offset:32 ; GFX12-NEXT: s_endpgm %load = load <8 x i8>, ptr addrspace(4) %in %ext = sext <8 x i8> %load to <8 x i64> @@ -7033,80 +7033,81 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o ; GFX6-NOHSA-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NOHSA-NEXT: s_mov_b32 s2, -1 ; GFX6-NOHSA-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NOHSA-NEXT: s_lshr_b32 s10, s7, 16 -; GFX6-NOHSA-NEXT: s_lshr_b32 s12, s7, 8 -; GFX6-NOHSA-NEXT: s_mov_b32 s14, s7 -; GFX6-NOHSA-NEXT: s_lshr_b32 s16, s6, 16 -; GFX6-NOHSA-NEXT: s_lshr_b32 s18, s6, 24 -; GFX6-NOHSA-NEXT: s_lshr_b32 s20, s6, 8 -; GFX6-NOHSA-NEXT: s_lshr_b32 s22, s5, 16 -; GFX6-NOHSA-NEXT: s_lshr_b32 s24, s5, 8 -; GFX6-NOHSA-NEXT: s_mov_b32 s26, s5 -; GFX6-NOHSA-NEXT: s_lshr_b32 s28, s4, 16 -; GFX6-NOHSA-NEXT: s_lshr_b32 s30, s4, 24 -; GFX6-NOHSA-NEXT: s_lshr_b32 s34, s4, 8 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[8:9], s[4:5], 0x80000 +; GFX6-NOHSA-NEXT: s_lshr_b32 s12, s6, 16 +; GFX6-NOHSA-NEXT: s_lshr_b32 s14, s6, 24 +; GFX6-NOHSA-NEXT: s_lshr_b32 s16, s6, 8 +; GFX6-NOHSA-NEXT: s_lshr_b32 s18, s4, 16 +; GFX6-NOHSA-NEXT: s_lshr_b32 s20, s4, 24 +; GFX6-NOHSA-NEXT: s_lshr_b32 s22, s4, 8 +; GFX6-NOHSA-NEXT: s_lshr_b32 s24, s7, 16 +; GFX6-NOHSA-NEXT: s_lshr_b32 s10, s7, 8 +; GFX6-NOHSA-NEXT: s_mov_b32 s26, s7 +; GFX6-NOHSA-NEXT: s_lshr_b32 s28, s5, 16 +; GFX6-NOHSA-NEXT: s_lshr_b32 s30, s5, 8 +; GFX6-NOHSA-NEXT: s_mov_b32 s8, s5 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[34:35], s[4:5], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[36:37], s[6:7], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000 +; GFX6-NOHSA-NEXT: s_ashr_i32 s29, s5, 31 +; GFX6-NOHSA-NEXT: s_ashr_i32 s31, s5, 24 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000 -; GFX6-NOHSA-NEXT: s_ashr_i32 s31, s5, 31 -; GFX6-NOHSA-NEXT: s_ashr_i32 s33, s5, 24 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 -; GFX6-NOHSA-NEXT: s_ashr_i32 s35, s7, 31 +; GFX6-NOHSA-NEXT: s_ashr_i32 s33, s7, 31 ; GFX6-NOHSA-NEXT: s_ashr_i32 s38, s7, 24 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[4:5], s[34:35], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[6:7], s[30:31], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[4:5], s[30:31], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[6:7], s[28:29], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s38 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s35 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s14 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s15 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s36 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s37 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s33 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s31 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s26 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s27 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s10 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s11 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s36 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s37 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s34 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s35 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s38 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s33 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s26 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s27 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s12 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s13 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s14 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s15 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:80 ; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0) -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s8 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s9 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s12 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s13 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:96 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s31 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s29 ; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s16 ; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s17 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64 ; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0) -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s18 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s19 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:80 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s20 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s21 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:64 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s18 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s19 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s20 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s21 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 ; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0) -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s22 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s23 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:48 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v16, s24 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v17, s25 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:32 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s28 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s29 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s6 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s7 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:16 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s8 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s9 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s22 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s23 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 ; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0) +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s24 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s25 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s10 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s11 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:96 +; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0) +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s6 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s7 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48 ; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s4 ; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s5 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32 ; GFX6-NOHSA-NEXT: s_endpgm ; ; GFX7-HSA-LABEL: constant_sextload_v16i8_to_v16i64: @@ -7118,31 +7119,33 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o ; GFX7-HSA-NEXT: s_waitcnt lgkmcnt(0) ; GFX7-HSA-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x0 ; GFX7-HSA-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-HSA-NEXT: s_lshr_b32 s8, s7, 16 -; GFX7-HSA-NEXT: s_lshr_b32 s10, s7, 8 -; GFX7-HSA-NEXT: s_mov_b32 s12, s7 -; GFX7-HSA-NEXT: s_lshr_b32 s14, s6, 16 -; GFX7-HSA-NEXT: s_lshr_b32 s16, s6, 24 -; GFX7-HSA-NEXT: s_lshr_b32 s18, s6, 8 -; GFX7-HSA-NEXT: s_lshr_b32 s20, s5, 16 -; GFX7-HSA-NEXT: s_lshr_b32 s22, s5, 8 -; GFX7-HSA-NEXT: s_ashr_i32 s29, s5, 31 -; GFX7-HSA-NEXT: s_ashr_i32 s31, s5, 24 -; GFX7-HSA-NEXT: s_mov_b32 s24, s5 -; GFX7-HSA-NEXT: s_lshr_b32 s26, s4, 16 -; GFX7-HSA-NEXT: s_lshr_b32 s28, s4, 24 -; GFX7-HSA-NEXT: s_lshr_b32 s30, s4, 8 -; GFX7-HSA-NEXT: s_bfe_i64 s[2:3], s[4:5], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[4:5], s[8:9], 0x80000 +; GFX7-HSA-NEXT: s_lshr_b32 s2, s6, 16 +; GFX7-HSA-NEXT: s_lshr_b32 s8, s6, 24 +; GFX7-HSA-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000 +; GFX7-HSA-NEXT: s_lshr_b32 s10, s6, 8 +; GFX7-HSA-NEXT: s_lshr_b32 s12, s4, 16 +; GFX7-HSA-NEXT: s_lshr_b32 s14, s4, 24 +; GFX7-HSA-NEXT: s_lshr_b32 s16, s4, 8 +; GFX7-HSA-NEXT: s_lshr_b32 s18, s7, 16 +; GFX7-HSA-NEXT: s_lshr_b32 s20, s7, 8 +; GFX7-HSA-NEXT: s_ashr_i32 s27, s5, 31 +; GFX7-HSA-NEXT: s_ashr_i32 s29, s5, 24 +; GFX7-HSA-NEXT: s_mov_b32 s22, s7 +; GFX7-HSA-NEXT: s_lshr_b32 s24, s5, 16 +; GFX7-HSA-NEXT: s_lshr_b32 s26, s5, 8 +; GFX7-HSA-NEXT: s_mov_b32 s28, s5 +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s2 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s3 +; GFX7-HSA-NEXT: s_bfe_i64 s[2:3], s[8:9], 0x80000 ; GFX7-HSA-NEXT: s_ashr_i32 s33, s7, 31 -; GFX7-HSA-NEXT: s_ashr_i32 s36, s7, 24 -; GFX7-HSA-NEXT: s_bfe_i64 s[34:35], s[6:7], 0x80000 -; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s4 -; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s5 -; GFX7-HSA-NEXT: s_bfe_i64 s[4:5], s[30:31], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[6:7], s[28:29], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[8:9], s[26:27], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 +; GFX7-HSA-NEXT: s_ashr_i32 s34, s7, 24 +; GFX7-HSA-NEXT: s_bfe_i64 s[30:31], s[4:5], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s2 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s3 +; GFX7-HSA-NEXT: s_bfe_i64 s[4:5], s[28:29], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[2:3], s[26:27], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[8:9], s[24:25], 0x80000 ; GFX7-HSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 ; GFX7-HSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 ; GFX7-HSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000 @@ -7150,73 +7153,70 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o ; GFX7-HSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 ; GFX7-HSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 ; GFX7-HSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 -; GFX7-HSA-NEXT: s_add_u32 s26, s0, 0x70 -; GFX7-HSA-NEXT: s_addc_u32 s27, s1, 0 +; GFX7-HSA-NEXT: s_add_u32 s24, s0, 0x50 +; GFX7-HSA-NEXT: s_addc_u32 s25, s1, 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-HSA-NEXT: s_add_u32 s6, s0, 64 +; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s7 +; GFX7-HSA-NEXT: s_addc_u32 s7, s1, 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v8, s24 +; GFX7-HSA-NEXT: v_mov_b32_e32 v11, s7 +; GFX7-HSA-NEXT: v_mov_b32_e32 v9, s25 +; GFX7-HSA-NEXT: v_mov_b32_e32 v10, s6 +; GFX7-HSA-NEXT: s_add_u32 s6, s0, 16 ; GFX7-HSA-NEXT: v_mov_b32_e32 v6, s10 -; GFX7-HSA-NEXT: s_add_u32 s10, s0, 0x60 -; GFX7-HSA-NEXT: v_mov_b32_e32 v8, s26 ; GFX7-HSA-NEXT: v_mov_b32_e32 v7, s11 -; GFX7-HSA-NEXT: s_addc_u32 s11, s1, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v10, s10 -; GFX7-HSA-NEXT: v_mov_b32_e32 v9, s27 -; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s12 -; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s13 -; GFX7-HSA-NEXT: v_mov_b32_e32 v11, s11 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s36 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s33 -; GFX7-HSA-NEXT: s_add_u32 s10, s0, 0x50 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[8:9], v[0:3] ; GFX7-HSA-NEXT: flat_store_dwordx4 v[10:11], v[4:7] -; GFX7-HSA-NEXT: s_addc_u32 s11, s1, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s10 -; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s14 -; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s15 +; GFX7-HSA-NEXT: s_addc_u32 s7, s1, 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s12 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s13 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s14 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s15 +; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s7 +; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s1 +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s30 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s31 ; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s16 ; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s17 -; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s11 -; GFX7-HSA-NEXT: s_add_u32 s10, s0, 64 -; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX7-HSA-NEXT: s_addc_u32 s11, s1, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s10 -; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s34 -; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s35 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s18 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s19 -; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s11 -; GFX7-HSA-NEXT: s_add_u32 s10, s0, 48 -; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX7-HSA-NEXT: s_addc_u32 s11, s1, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s10 -; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s20 -; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s21 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s31 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s29 -; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s11 -; GFX7-HSA-NEXT: s_add_u32 s10, s0, 32 +; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s0 +; GFX7-HSA-NEXT: s_add_u32 s6, s0, 0x70 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX7-HSA-NEXT: s_addc_u32 s11, s1, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s10 -; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s24 -; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s25 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s22 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s23 -; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s11 +; GFX7-HSA-NEXT: s_addc_u32 s7, s1, 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s18 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s19 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s34 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s33 +; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s7 +; GFX7-HSA-NEXT: s_add_u32 s6, s0, 0x60 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX7-HSA-NEXT: s_nop 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s6 -; GFX7-HSA-NEXT: s_add_u32 s6, s0, 16 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s7 ; GFX7-HSA-NEXT: s_addc_u32 s7, s1, 0 ; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s7 +; GFX7-HSA-NEXT: s_add_u32 s6, s0, 48 +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s22 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s23 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s20 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s21 +; GFX7-HSA-NEXT: s_addc_u32 s7, s1, 0 +; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-HSA-NEXT: s_add_u32 s0, s0, 32 ; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s8 ; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s9 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s29 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s27 ; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s7 +; GFX7-HSA-NEXT: s_addc_u32 s1, s1, 0 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s1 -; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s2 -; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s3 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s4 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s5 +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s5 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s2 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s3 ; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s0 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX7-HSA-NEXT: s_endpgm @@ -7225,107 +7225,109 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o ; GFX8-NOHSA: ; %bb.0: ; GFX8-NOHSA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX8-NOHSA-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NOHSA-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x0 +; GFX8-NOHSA-NEXT: s_load_dwordx4 s[8:11], s[2:3], 0x0 ; GFX8-NOHSA-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NOHSA-NEXT: s_lshr_b32 s18, s7, 16 -; GFX8-NOHSA-NEXT: s_lshr_b32 s20, s7, 8 -; GFX8-NOHSA-NEXT: s_mov_b32 s22, s7 -; GFX8-NOHSA-NEXT: s_lshr_b32 s24, s6, 16 -; GFX8-NOHSA-NEXT: s_lshr_b32 s26, s6, 24 -; GFX8-NOHSA-NEXT: s_lshr_b32 s28, s6, 8 -; GFX8-NOHSA-NEXT: s_lshr_b32 s30, s5, 16 -; GFX8-NOHSA-NEXT: s_lshr_b32 s16, s5, 8 -; GFX8-NOHSA-NEXT: s_mov_b32 s14, s5 -; GFX8-NOHSA-NEXT: s_lshr_b32 s12, s4, 16 -; GFX8-NOHSA-NEXT: s_lshr_b32 s10, s4, 24 -; GFX8-NOHSA-NEXT: s_lshr_b32 s8, s4, 8 -; GFX8-NOHSA-NEXT: s_ashr_i32 s19, s5, 31 -; GFX8-NOHSA-NEXT: s_ashr_i32 s31, s5, 24 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[2:3], s[4:5], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[34:35], s[6:7], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[4:5], s[30:31], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000 +; GFX8-NOHSA-NEXT: s_lshr_b32 s12, s10, 16 +; GFX8-NOHSA-NEXT: s_lshr_b32 s14, s10, 24 +; GFX8-NOHSA-NEXT: s_lshr_b32 s16, s10, 8 +; GFX8-NOHSA-NEXT: s_lshr_b32 s18, s8, 16 +; GFX8-NOHSA-NEXT: s_lshr_b32 s20, s8, 24 +; GFX8-NOHSA-NEXT: s_lshr_b32 s22, s8, 8 +; GFX8-NOHSA-NEXT: s_lshr_b32 s24, s11, 16 +; GFX8-NOHSA-NEXT: s_lshr_b32 s26, s11, 8 +; GFX8-NOHSA-NEXT: s_mov_b32 s28, s11 +; GFX8-NOHSA-NEXT: s_lshr_b32 s6, s9, 16 +; GFX8-NOHSA-NEXT: s_lshr_b32 s4, s9, 8 +; GFX8-NOHSA-NEXT: s_mov_b32 s2, s9 +; GFX8-NOHSA-NEXT: s_ashr_i32 s25, s9, 31 +; GFX8-NOHSA-NEXT: s_ashr_i32 s29, s9, 24 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[30:31], s[8:9], 0x80000 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[34:35], s[10:11], 0x80000 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[8:9], s[28:29], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 +; GFX8-NOHSA-NEXT: s_ashr_i32 s28, s11, 31 +; GFX8-NOHSA-NEXT: s_ashr_i32 s33, s11, 24 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[10:11], s[24:25], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 -; GFX8-NOHSA-NEXT: s_ashr_i32 s30, s7, 31 -; GFX8-NOHSA-NEXT: s_ashr_i32 s33, s7, 24 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[6:7], s[18:19], 0x80000 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s6 -; GFX8-NOHSA-NEXT: s_add_u32 s6, s0, 0x70 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s7 -; GFX8-NOHSA-NEXT: s_addc_u32 s7, s1, 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s6 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s33 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s30 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s7 -; GFX8-NOHSA-NEXT: s_add_u32 s6, s0, 0x60 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s12 +; GFX8-NOHSA-NEXT: s_add_u32 s12, s0, 0x50 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s13 +; GFX8-NOHSA-NEXT: s_addc_u32 s13, s1, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s12 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s14 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s15 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s13 +; GFX8-NOHSA-NEXT: s_add_u32 s12, s0, 64 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NOHSA-NEXT: s_addc_u32 s7, s1, 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s6 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s22 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s23 +; GFX8-NOHSA-NEXT: s_addc_u32 s13, s1, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s12 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s34 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s35 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s16 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s17 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s13 +; GFX8-NOHSA-NEXT: s_add_u32 s12, s0, 16 +; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX8-NOHSA-NEXT: s_addc_u32 s13, s1, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s12 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s18 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s19 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s20 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s21 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s7 -; GFX8-NOHSA-NEXT: s_add_u32 s6, s0, 0x50 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s13 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NOHSA-NEXT: s_addc_u32 s7, s1, 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s6 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s24 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s25 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s1 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s30 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s31 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s22 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s23 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0 +; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX8-NOHSA-NEXT: s_nop 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s10 +; GFX8-NOHSA-NEXT: s_add_u32 s10, s0, 0x70 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s11 +; GFX8-NOHSA-NEXT: s_addc_u32 s11, s1, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s10 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s33 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s28 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s11 +; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX8-NOHSA-NEXT: s_nop 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s8 +; GFX8-NOHSA-NEXT: s_add_u32 s8, s0, 0x60 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s9 +; GFX8-NOHSA-NEXT: s_addc_u32 s9, s1, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s8 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s26 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s27 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s7 -; GFX8-NOHSA-NEXT: s_add_u32 s6, s0, 64 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s9 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX8-NOHSA-NEXT: s_nop 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s6 +; GFX8-NOHSA-NEXT: s_add_u32 s6, s0, 48 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s7 ; GFX8-NOHSA-NEXT: s_addc_u32 s7, s1, 0 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s6 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s34 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s35 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s28 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s29 +; GFX8-NOHSA-NEXT: s_add_u32 s0, s0, 32 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s29 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s25 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s7 -; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NOHSA-NEXT: s_nop 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s4 -; GFX8-NOHSA-NEXT: s_add_u32 s4, s0, 48 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s5 -; GFX8-NOHSA-NEXT: s_addc_u32 s5, s1, 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s4 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s31 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s19 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s5 -; GFX8-NOHSA-NEXT: s_add_u32 s4, s0, 32 -; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NOHSA-NEXT: s_addc_u32 s5, s1, 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s4 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s14 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s15 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s16 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s17 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s5 -; GFX8-NOHSA-NEXT: s_add_u32 s4, s0, 16 -; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NOHSA-NEXT: s_addc_u32 s5, s1, 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s4 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s12 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s13 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s10 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s11 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s5 +; GFX8-NOHSA-NEXT: s_addc_u32 s1, s1, 0 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s1 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s2 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s3 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s8 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s9 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s4 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s5 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NOHSA-NEXT: s_endpgm @@ -7435,64 +7437,64 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: s_load_b128 s[4:7], s[2:3], 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_lshr_b32 s8, s7, 16 -; GFX12-NEXT: s_lshr_b32 s10, s7, 8 -; GFX12-NEXT: s_mov_b32 s12, s7 -; GFX12-NEXT: s_bfe_i64 s[34:35], s[6:7], 0x80000 -; GFX12-NEXT: s_ashr_i32 s33, s7, 31 -; GFX12-NEXT: s_ashr_i32 s36, s7, 24 +; GFX12-NEXT: s_lshr_b32 s2, s6, 16 +; GFX12-NEXT: s_lshr_b32 s8, s6, 24 +; GFX12-NEXT: s_lshr_b32 s10, s6, 8 +; GFX12-NEXT: s_bfe_i64 s[30:31], s[4:5], 0x80000 ; GFX12-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000 -; GFX12-NEXT: s_lshr_b32 s14, s6, 16 -; GFX12-NEXT: s_lshr_b32 s16, s6, 24 -; GFX12-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 +; GFX12-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000 +; GFX12-NEXT: s_lshr_b32 s12, s4, 16 +; GFX12-NEXT: s_lshr_b32 s14, s4, 24 +; GFX12-NEXT: s_bfe_i64 s[34:35], s[6:7], 0x80000 ; GFX12-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 -; GFX12-NEXT: v_dual_mov_b32 v24, 0 :: v_dual_mov_b32 v3, s33 -; GFX12-NEXT: s_lshr_b32 s18, s6, 8 -; GFX12-NEXT: v_dual_mov_b32 v2, s36 :: v_dual_mov_b32 v5, s35 -; GFX12-NEXT: v_dual_mov_b32 v4, s34 :: v_dual_mov_b32 v1, s9 -; GFX12-NEXT: v_dual_mov_b32 v0, s8 :: v_dual_mov_b32 v9, s13 -; GFX12-NEXT: s_lshr_b32 s20, s5, 16 -; GFX12-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 +; GFX12-NEXT: v_dual_mov_b32 v24, 0 :: v_dual_mov_b32 v1, s35 +; GFX12-NEXT: s_lshr_b32 s16, s4, 8 +; GFX12-NEXT: v_dual_mov_b32 v4, s30 :: v_dual_mov_b32 v9, s3 +; GFX12-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v11, s9 +; GFX12-NEXT: v_dual_mov_b32 v10, s8 :: v_dual_mov_b32 v3, s11 +; GFX12-NEXT: s_lshr_b32 s18, s7, 16 ; GFX12-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 -; GFX12-NEXT: v_dual_mov_b32 v8, s12 :: v_dual_mov_b32 v11, s11 -; GFX12-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v13, s15 -; GFX12-NEXT: s_lshr_b32 s22, s5, 8 -; GFX12-NEXT: s_mov_b32 s24, s5 +; GFX12-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 +; GFX12-NEXT: v_dual_mov_b32 v0, s34 :: v_dual_mov_b32 v5, s31 +; GFX12-NEXT: v_dual_mov_b32 v2, s10 :: v_dual_mov_b32 v13, s13 +; GFX12-NEXT: s_lshr_b32 s20, s7, 8 +; GFX12-NEXT: s_mov_b32 s22, s7 +; GFX12-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 +; GFX12-NEXT: s_lshr_b32 s24, s5, 16 +; GFX12-NEXT: s_ashr_i32 s33, s7, 31 +; GFX12-NEXT: s_ashr_i32 s36, s7, 24 ; GFX12-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000 -; GFX12-NEXT: s_lshr_b32 s26, s4, 16 -; GFX12-NEXT: s_lshr_b32 s28, s4, 24 -; GFX12-NEXT: s_ashr_i32 s29, s5, 31 -; GFX12-NEXT: s_ashr_i32 s31, s5, 24 +; GFX12-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v15, s15 +; GFX12-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v7, s17 +; GFX12-NEXT: s_lshr_b32 s26, s5, 8 +; GFX12-NEXT: s_mov_b32 s28, s5 +; GFX12-NEXT: s_ashr_i32 s27, s5, 31 +; GFX12-NEXT: s_ashr_i32 s29, s5, 24 +; GFX12-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 ; GFX12-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 -; GFX12-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v15, s17 -; GFX12-NEXT: v_dual_mov_b32 v14, s16 :: v_dual_mov_b32 v7, s19 -; GFX12-NEXT: s_lshr_b32 s30, s4, 8 +; GFX12-NEXT: v_mov_b32_e32 v6, s16 ; GFX12-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 -; GFX12-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 -; GFX12-NEXT: v_mov_b32_e32 v6, s18 -; GFX12-NEXT: s_bfe_i64 s[6:7], s[28:29], 0x80000 -; GFX12-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000 ; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: global_store_b128 v24, v[8:11], s[0:1] offset:80 +; GFX12-NEXT: global_store_b128 v24, v[0:3], s[0:1] offset:64 +; GFX12-NEXT: v_dual_mov_b32 v0, s18 :: v_dual_mov_b32 v3, s33 +; GFX12-NEXT: v_dual_mov_b32 v1, s19 :: v_dual_mov_b32 v2, s36 +; GFX12-NEXT: v_mov_b32_e32 v9, s23 +; GFX12-NEXT: s_bfe_i64 s[4:5], s[28:29], 0x80000 +; GFX12-NEXT: s_bfe_i64 s[6:7], s[26:27], 0x80000 +; GFX12-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v11, s21 +; GFX12-NEXT: v_dual_mov_b32 v10, s20 :: v_dual_mov_b32 v17, s25 +; GFX12-NEXT: v_dual_mov_b32 v16, s24 :: v_dual_mov_b32 v19, s27 +; GFX12-NEXT: v_dual_mov_b32 v18, s29 :: v_dual_mov_b32 v21, s5 +; GFX12-NEXT: v_dual_mov_b32 v20, s4 :: v_dual_mov_b32 v23, s7 +; GFX12-NEXT: v_mov_b32_e32 v22, s6 +; GFX12-NEXT: s_clause 0x5 +; GFX12-NEXT: global_store_b128 v24, v[12:15], s[0:1] offset:16 +; GFX12-NEXT: global_store_b128 v24, v[4:7], s[0:1] ; GFX12-NEXT: global_store_b128 v24, v[0:3], s[0:1] offset:112 ; GFX12-NEXT: global_store_b128 v24, v[8:11], s[0:1] offset:96 -; GFX12-NEXT: v_dual_mov_b32 v0, s20 :: v_dual_mov_b32 v3, s29 -; GFX12-NEXT: v_dual_mov_b32 v1, s21 :: v_dual_mov_b32 v2, s31 -; GFX12-NEXT: v_mov_b32_e32 v9, s25 -; GFX12-NEXT: s_bfe_i64 s[2:3], s[4:5], 0x80000 -; GFX12-NEXT: s_bfe_i64 s[4:5], s[30:31], 0x80000 -; GFX12-NEXT: v_dual_mov_b32 v8, s24 :: v_dual_mov_b32 v11, s23 -; GFX12-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v17, s27 -; GFX12-NEXT: v_dual_mov_b32 v16, s26 :: v_dual_mov_b32 v19, s7 -; GFX12-NEXT: v_dual_mov_b32 v18, s6 :: v_dual_mov_b32 v21, s3 -; GFX12-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v23, s5 -; GFX12-NEXT: v_mov_b32_e32 v22, s4 -; GFX12-NEXT: s_clause 0x5 -; GFX12-NEXT: global_store_b128 v24, v[12:15], s[0:1] offset:80 -; GFX12-NEXT: global_store_b128 v24, v[4:7], s[0:1] offset:64 -; GFX12-NEXT: global_store_b128 v24, v[0:3], s[0:1] offset:48 -; GFX12-NEXT: global_store_b128 v24, v[8:11], s[0:1] offset:32 -; GFX12-NEXT: global_store_b128 v24, v[16:19], s[0:1] offset:16 -; GFX12-NEXT: global_store_b128 v24, v[20:23], s[0:1] +; GFX12-NEXT: global_store_b128 v24, v[16:19], s[0:1] offset:48 +; GFX12-NEXT: global_store_b128 v24, v[20:23], s[0:1] offset:32 ; GFX12-NEXT: s_endpgm %load = load <16 x i8>, ptr addrspace(4) %in %ext = sext <16 x i8> %load to <16 x i64> @@ -8204,157 +8206,157 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o ; GFX6-NOHSA-NEXT: s_waitcnt lgkmcnt(0) ; GFX6-NOHSA-NEXT: s_load_dwordx8 s[0:7], s[10:11], 0x0 ; GFX6-NOHSA-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NOHSA-NEXT: s_lshr_b32 s14, s7, 16 -; GFX6-NOHSA-NEXT: s_lshr_b32 s22, s7, 8 -; GFX6-NOHSA-NEXT: s_mov_b32 s30, s7 ; GFX6-NOHSA-NEXT: s_lshr_b32 s28, s6, 16 -; GFX6-NOHSA-NEXT: s_lshr_b32 s24, s6, 24 -; GFX6-NOHSA-NEXT: s_lshr_b32 s26, s6, 8 -; GFX6-NOHSA-NEXT: s_lshr_b32 s10, s5, 16 -; GFX6-NOHSA-NEXT: s_lshr_b32 s12, s5, 8 -; GFX6-NOHSA-NEXT: s_mov_b32 s34, s5 -; GFX6-NOHSA-NEXT: s_lshr_b32 s16, s4, 16 -; GFX6-NOHSA-NEXT: s_lshr_b32 s18, s4, 24 -; GFX6-NOHSA-NEXT: s_lshr_b32 s20, s4, 8 +; GFX6-NOHSA-NEXT: s_lshr_b32 s30, s6, 24 +; GFX6-NOHSA-NEXT: s_lshr_b32 s20, s6, 8 +; GFX6-NOHSA-NEXT: s_lshr_b32 s26, s4, 16 +; GFX6-NOHSA-NEXT: s_lshr_b32 s22, s4, 24 +; GFX6-NOHSA-NEXT: s_lshr_b32 s24, s4, 8 +; GFX6-NOHSA-NEXT: s_lshr_b32 s10, s2, 16 +; GFX6-NOHSA-NEXT: s_lshr_b32 s12, s2, 24 +; GFX6-NOHSA-NEXT: s_lshr_b32 s14, s2, 8 +; GFX6-NOHSA-NEXT: s_lshr_b32 s16, s0, 16 +; GFX6-NOHSA-NEXT: s_lshr_b32 s18, s0, 24 +; GFX6-NOHSA-NEXT: s_mov_b32 s34, s7 ; GFX6-NOHSA-NEXT: s_ashr_i32 s11, s1, 31 ; GFX6-NOHSA-NEXT: s_ashr_i32 s13, s1, 24 -; GFX6-NOHSA-NEXT: s_ashr_i32 s17, s3, 31 -; GFX6-NOHSA-NEXT: s_ashr_i32 s19, s3, 24 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[56:57], s[34:35], 0x80000 -; GFX6-NOHSA-NEXT: s_ashr_i32 s21, s5, 31 -; GFX6-NOHSA-NEXT: s_ashr_i32 s23, s5, 24 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[58:59], s[30:31], 0x80000 -; GFX6-NOHSA-NEXT: s_ashr_i32 s25, s7, 31 -; GFX6-NOHSA-NEXT: s_ashr_i32 s27, s7, 24 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[60:61], s[14:15], 0x80000 -; GFX6-NOHSA-NEXT: s_lshr_b32 s30, s3, 16 -; GFX6-NOHSA-NEXT: s_lshr_b32 s34, s3, 8 -; GFX6-NOHSA-NEXT: s_mov_b32 s44, s3 -; GFX6-NOHSA-NEXT: s_lshr_b32 s36, s2, 16 -; GFX6-NOHSA-NEXT: s_lshr_b32 s38, s2, 24 -; GFX6-NOHSA-NEXT: s_lshr_b32 s40, s2, 8 -; GFX6-NOHSA-NEXT: s_lshr_b32 s42, s1, 16 -; GFX6-NOHSA-NEXT: s_lshr_b32 s46, s1, 8 -; GFX6-NOHSA-NEXT: s_mov_b32 s52, s1 -; GFX6-NOHSA-NEXT: s_lshr_b32 s48, s0, 16 -; GFX6-NOHSA-NEXT: s_lshr_b32 s50, s0, 24 -; GFX6-NOHSA-NEXT: s_lshr_b32 s54, s0, 8 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[14:15], s[0:1], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[62:63], s[4:5], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[4:5], s[2:3], 0x80000 +; GFX6-NOHSA-NEXT: s_ashr_i32 s15, s3, 31 +; GFX6-NOHSA-NEXT: s_ashr_i32 s17, s3, 24 +; GFX6-NOHSA-NEXT: s_ashr_i32 s33, s5, 31 +; GFX6-NOHSA-NEXT: s_ashr_i32 s49, s5, 24 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[38:39], s[34:35], 0x80000 +; GFX6-NOHSA-NEXT: s_ashr_i32 s19, s7, 31 +; GFX6-NOHSA-NEXT: s_ashr_i32 s21, s7, 24 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[52:53], s[30:31], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[54:55], s[28:29], 0x80000 +; GFX6-NOHSA-NEXT: s_lshr_b32 s28, s0, 8 +; GFX6-NOHSA-NEXT: s_lshr_b32 s30, s7, 16 +; GFX6-NOHSA-NEXT: s_lshr_b32 s34, s7, 8 +; GFX6-NOHSA-NEXT: s_lshr_b32 s36, s5, 16 +; GFX6-NOHSA-NEXT: s_lshr_b32 s40, s5, 8 +; GFX6-NOHSA-NEXT: s_mov_b32 s46, s5 +; GFX6-NOHSA-NEXT: s_lshr_b32 s42, s3, 16 +; GFX6-NOHSA-NEXT: s_lshr_b32 s44, s3, 8 +; GFX6-NOHSA-NEXT: s_mov_b32 s50, s3 +; GFX6-NOHSA-NEXT: s_lshr_b32 s48, s1, 16 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[56:57], s[0:1], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[58:59], s[4:5], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[60:61], s[6:7], 0x80000 +; GFX6-NOHSA-NEXT: s_lshr_b32 s4, s1, 8 +; GFX6-NOHSA-NEXT: s_mov_b32 s6, s1 ; GFX6-NOHSA-NEXT: s_mov_b32 s0, s8 ; GFX6-NOHSA-NEXT: s_mov_b32 s1, s9 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s58 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s59 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s6 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s7 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s56 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s57 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s62 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s63 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s60 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s61 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v16, s27 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v17, s25 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v18, s23 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v19, s21 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v20, s19 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v21, s17 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s60 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s61 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s58 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s59 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s2 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s3 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s56 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s57 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v16, s38 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v17, s39 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v18, s54 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v19, s55 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v20, s52 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v21, s53 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v22, s21 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v23, s19 ; GFX6-NOHSA-NEXT: s_mov_b32 s3, 0xf000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[6:7], s[28:29], 0x80000 -; GFX6-NOHSA-NEXT: s_mov_b32 s2, -1 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v22, s6 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:240 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[8:9], s[26:27], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 +; GFX6-NOHSA-NEXT: s_mov_b32 s2, -1 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v24, s8 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:208 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s22 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s23 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v23, s7 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:224 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v24, s24 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v25, s25 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s8 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s9 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:208 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[52:53], s[52:53], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[6:7], s[54:55], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[8:9], s[50:51], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[22:23], s[48:49], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s20 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s21 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v25, s9 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:192 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v26, s22 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v27, s23 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s24 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s25 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:144 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[20:21], s[6:7], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[22:23], s[50:51], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[24:25], s[46:47], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[6:7], s[48:49], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[8:9], s[44:45], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[26:27], s[42:43], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[28:29], s[40:41], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[38:39], s[40:41], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000 ; GFX6-NOHSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[40:41], s[12:13], 0x80000 -; GFX6-NOHSA-NEXT: s_bfe_i64 s[42:43], s[10:11], 0x80000 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:192 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[40:41], s[16:17], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[42:43], s[14:15], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[44:45], s[12:13], 0x80000 +; GFX6-NOHSA-NEXT: s_bfe_i64 s[46:47], s[10:11], 0x80000 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:128 ; GFX6-NOHSA-NEXT: s_waitcnt expcnt(2) -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s44 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s45 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v16, s42 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v17, s43 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:176 -; GFX6-NOHSA-NEXT: s_waitcnt expcnt(1) -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s4 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s5 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s40 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s41 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:160 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s16 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s17 -; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0) -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s18 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s19 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:144 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s46 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s47 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s44 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s45 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80 ; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0) -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s13 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s11 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s20 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s21 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:128 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s52 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s53 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v18, s30 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v19, s31 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:112 -; GFX6-NOHSA-NEXT: s_waitcnt expcnt(1) -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s14 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s15 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s34 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s35 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s49 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s33 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s42 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s43 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:64 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s40 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s41 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s18 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s19 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16 ; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0) +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s24 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s25 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s28 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s29 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s17 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s15 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v20, s30 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v21, s31 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:240 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s22 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s23 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v18, s34 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v19, s35 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:224 +; GFX6-NOHSA-NEXT: s_waitcnt expcnt(2) +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s13 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s11 ; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s36 ; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s37 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s38 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s39 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s28 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s29 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:64 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:176 +; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0) +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s20 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s21 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s38 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s39 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:160 ; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0) ; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s26 ; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s27 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:48 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s24 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s25 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:32 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s22 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s23 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s8 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s9 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v16, s6 -; GFX6-NOHSA-NEXT: v_mov_b32_e32 v17, s7 -; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s8 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s9 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:96 +; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0) +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s6 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s7 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s4 +; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s5 +; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32 ; GFX6-NOHSA-NEXT: s_endpgm ; ; GFX7-HSA-LABEL: constant_sextload_v32i8_to_v32i64: @@ -8366,211 +8368,212 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o ; GFX7-HSA-NEXT: s_waitcnt lgkmcnt(0) ; GFX7-HSA-NEXT: s_load_dwordx8 s[0:7], s[10:11], 0x0 ; GFX7-HSA-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-HSA-NEXT: s_lshr_b32 s10, s7, 16 -; GFX7-HSA-NEXT: s_lshr_b32 s40, s7, 8 -; GFX7-HSA-NEXT: s_mov_b32 s42, s7 -; GFX7-HSA-NEXT: s_lshr_b32 s44, s6, 16 -; GFX7-HSA-NEXT: s_ashr_i32 s41, s1, 24 -; GFX7-HSA-NEXT: s_ashr_i32 s43, s3, 31 -; GFX7-HSA-NEXT: s_ashr_i32 s45, s3, 24 -; GFX7-HSA-NEXT: s_lshr_b32 s48, s6, 24 -; GFX7-HSA-NEXT: s_lshr_b32 s50, s6, 8 -; GFX7-HSA-NEXT: s_lshr_b32 s52, s5, 16 -; GFX7-HSA-NEXT: s_lshr_b32 s46, s5, 8 -; GFX7-HSA-NEXT: s_mov_b32 s54, s5 -; GFX7-HSA-NEXT: s_lshr_b32 s38, s4, 16 -; GFX7-HSA-NEXT: s_lshr_b32 s36, s4, 24 -; GFX7-HSA-NEXT: s_lshr_b32 s34, s4, 8 -; GFX7-HSA-NEXT: s_lshr_b32 s28, s3, 16 -; GFX7-HSA-NEXT: s_lshr_b32 s24, s3, 8 -; GFX7-HSA-NEXT: s_mov_b32 s26, s3 -; GFX7-HSA-NEXT: s_lshr_b32 s22, s2, 16 -; GFX7-HSA-NEXT: s_lshr_b32 s20, s2, 24 -; GFX7-HSA-NEXT: s_lshr_b32 s18, s2, 8 -; GFX7-HSA-NEXT: s_lshr_b32 s14, s1, 16 -; GFX7-HSA-NEXT: s_lshr_b32 s56, s1, 8 -; GFX7-HSA-NEXT: s_mov_b32 s12, s1 -; GFX7-HSA-NEXT: s_lshr_b32 s58, s0, 16 -; GFX7-HSA-NEXT: s_lshr_b32 s60, s0, 24 -; GFX7-HSA-NEXT: s_lshr_b32 s62, s0, 8 -; GFX7-HSA-NEXT: s_bfe_i64 s[16:17], s[2:3], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[2:3], s[10:11], 0x80000 +; GFX7-HSA-NEXT: s_lshr_b32 s12, s6, 16 +; GFX7-HSA-NEXT: s_lshr_b32 s10, s6, 24 ; GFX7-HSA-NEXT: s_ashr_i32 s33, s1, 31 -; GFX7-HSA-NEXT: s_ashr_i32 s66, s5, 31 -; GFX7-HSA-NEXT: s_ashr_i32 s67, s5, 24 -; GFX7-HSA-NEXT: s_ashr_i32 s68, s7, 31 -; GFX7-HSA-NEXT: s_ashr_i32 s69, s7, 24 -; GFX7-HSA-NEXT: s_bfe_i64 s[0:1], s[0:1], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[30:31], s[4:5], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[64:65], s[6:7], 0x80000 -; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s2 -; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s3 -; GFX7-HSA-NEXT: s_bfe_i64 s[2:3], s[62:63], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[4:5], s[60:61], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[6:7], s[58:59], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[10:11], s[56:57], 0x80000 +; GFX7-HSA-NEXT: s_ashr_i32 s37, s1, 24 +; GFX7-HSA-NEXT: s_lshr_b32 s34, s0, 16 +; GFX7-HSA-NEXT: s_lshr_b32 s30, s0, 24 +; GFX7-HSA-NEXT: s_lshr_b32 s28, s0, 8 +; GFX7-HSA-NEXT: s_lshr_b32 s64, s1, 16 +; GFX7-HSA-NEXT: s_lshr_b32 s66, s1, 8 +; GFX7-HSA-NEXT: s_mov_b32 s68, s1 +; GFX7-HSA-NEXT: s_bfe_i64 s[22:23], s[0:1], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[0:1], s[12:13], 0x80000 +; GFX7-HSA-NEXT: s_lshr_b32 s36, s6, 8 +; GFX7-HSA-NEXT: s_lshr_b32 s40, s4, 16 +; GFX7-HSA-NEXT: s_ashr_i32 s41, s3, 31 +; GFX7-HSA-NEXT: s_lshr_b32 s50, s4, 24 +; GFX7-HSA-NEXT: s_lshr_b32 s52, s4, 8 +; GFX7-HSA-NEXT: s_lshr_b32 s54, s2, 16 +; GFX7-HSA-NEXT: s_lshr_b32 s56, s2, 24 +; GFX7-HSA-NEXT: s_lshr_b32 s42, s2, 8 +; GFX7-HSA-NEXT: s_lshr_b32 s26, s7, 16 +; GFX7-HSA-NEXT: s_lshr_b32 s20, s7, 8 +; GFX7-HSA-NEXT: s_mov_b32 s24, s7 +; GFX7-HSA-NEXT: s_lshr_b32 s18, s5, 16 +; GFX7-HSA-NEXT: s_lshr_b32 s14, s5, 8 +; GFX7-HSA-NEXT: s_mov_b32 s16, s5 +; GFX7-HSA-NEXT: s_lshr_b32 s58, s3, 16 +; GFX7-HSA-NEXT: s_lshr_b32 s60, s3, 8 +; GFX7-HSA-NEXT: s_mov_b32 s62, s3 +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-HSA-NEXT: s_bfe_i64 s[0:1], s[10:11], 0x80000 +; GFX7-HSA-NEXT: s_ashr_i32 s44, s3, 24 +; GFX7-HSA-NEXT: s_ashr_i32 s45, s5, 31 +; GFX7-HSA-NEXT: s_ashr_i32 s46, s5, 24 +; GFX7-HSA-NEXT: s_ashr_i32 s47, s7, 31 +; GFX7-HSA-NEXT: s_ashr_i32 s48, s7, 24 +; GFX7-HSA-NEXT: s_bfe_i64 s[38:39], s[2:3], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[70:71], s[4:5], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[72:73], s[6:7], 0x80000 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s1 +; GFX7-HSA-NEXT: s_bfe_i64 s[2:3], s[68:69], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[0:1], s[66:67], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[4:5], s[64:65], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[10:11], s[62:63], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[6:7], s[60:61], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[12:13], s[58:59], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 ; GFX7-HSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 ; GFX7-HSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 ; GFX7-HSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 ; GFX7-HSA-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 ; GFX7-HSA-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000 ; GFX7-HSA-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[56:57], s[56:57], 0x80000 ; GFX7-HSA-NEXT: s_bfe_i64 s[54:55], s[54:55], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x80000 ; GFX7-HSA-NEXT: s_bfe_i64 s[52:53], s[52:53], 0x80000 ; GFX7-HSA-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[56:57], s[44:45], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[58:59], s[42:43], 0x80000 -; GFX7-HSA-NEXT: s_bfe_i64 s[60:61], s[40:41], 0x80000 -; GFX7-HSA-NEXT: s_add_u32 s62, s8, 0xf0 +; GFX7-HSA-NEXT: s_bfe_i64 s[58:59], s[40:41], 0x80000 +; GFX7-HSA-NEXT: s_bfe_i64 s[60:61], s[36:37], 0x80000 +; GFX7-HSA-NEXT: s_add_u32 s62, s8, 0xd0 ; GFX7-HSA-NEXT: s_addc_u32 s63, s9, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s58 -; GFX7-HSA-NEXT: s_add_u32 s58, s8, 0xe0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s59 -; GFX7-HSA-NEXT: s_addc_u32 s59, s9, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v10, s48 -; GFX7-HSA-NEXT: s_add_u32 s48, s8, 0xd0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v11, s49 -; GFX7-HSA-NEXT: s_addc_u32 s49, s9, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v26, s48 -; GFX7-HSA-NEXT: v_mov_b32_e32 v27, s49 -; GFX7-HSA-NEXT: s_add_u32 s48, s8, 0xc0 -; GFX7-HSA-NEXT: s_addc_u32 s49, s9, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v28, s48 -; GFX7-HSA-NEXT: v_mov_b32_e32 v18, s62 -; GFX7-HSA-NEXT: v_mov_b32_e32 v29, s49 -; GFX7-HSA-NEXT: s_add_u32 s48, s8, 0xb0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v19, s63 -; GFX7-HSA-NEXT: s_addc_u32 s49, s9, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s69 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s68 -; GFX7-HSA-NEXT: v_mov_b32_e32 v24, s58 -; GFX7-HSA-NEXT: flat_store_dwordx4 v[18:19], v[0:3] ; GFX7-HSA-NEXT: v_mov_b32_e32 v6, s60 -; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s38 -; GFX7-HSA-NEXT: s_add_u32 s38, s8, 0xa0 +; GFX7-HSA-NEXT: s_add_u32 s60, s8, 0xc0 ; GFX7-HSA-NEXT: v_mov_b32_e32 v7, s61 -; GFX7-HSA-NEXT: v_mov_b32_e32 v25, s59 -; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s39 +; GFX7-HSA-NEXT: s_addc_u32 s61, s9, 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v10, s50 +; GFX7-HSA-NEXT: s_add_u32 s50, s8, 0x90 +; GFX7-HSA-NEXT: v_mov_b32_e32 v11, s51 +; GFX7-HSA-NEXT: s_addc_u32 s51, s9, 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v26, s50 +; GFX7-HSA-NEXT: v_mov_b32_e32 v27, s51 +; GFX7-HSA-NEXT: s_add_u32 s50, s8, 0x80 +; GFX7-HSA-NEXT: s_addc_u32 s51, s9, 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v22, s62 +; GFX7-HSA-NEXT: v_mov_b32_e32 v20, s38 +; GFX7-HSA-NEXT: s_add_u32 s38, s8, 0x50 +; GFX7-HSA-NEXT: v_mov_b32_e32 v23, s63 +; GFX7-HSA-NEXT: v_mov_b32_e32 v21, s39 ; GFX7-HSA-NEXT: s_addc_u32 s39, s9, 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v24, s60 +; GFX7-HSA-NEXT: flat_store_dwordx4 v[22:23], v[0:3] +; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s72 +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s34 +; GFX7-HSA-NEXT: s_add_u32 s34, s8, 64 +; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s73 +; GFX7-HSA-NEXT: v_mov_b32_e32 v25, s61 +; GFX7-HSA-NEXT: v_mov_b32_e32 v30, s38 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s35 +; GFX7-HSA-NEXT: s_addc_u32 s35, s9, 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v8, s58 +; GFX7-HSA-NEXT: v_mov_b32_e32 v9, s59 +; GFX7-HSA-NEXT: v_mov_b32_e32 v16, s54 +; GFX7-HSA-NEXT: v_mov_b32_e32 v17, s55 +; GFX7-HSA-NEXT: v_mov_b32_e32 v18, s56 +; GFX7-HSA-NEXT: v_mov_b32_e32 v19, s57 +; GFX7-HSA-NEXT: v_mov_b32_e32 v31, s39 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[24:25], v[4:7] -; GFX7-HSA-NEXT: v_mov_b32_e32 v8, s56 -; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s30 -; GFX7-HSA-NEXT: s_add_u32 s30, s8, 0x90 -; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s31 -; GFX7-HSA-NEXT: s_addc_u32 s31, s9, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v24, s30 -; GFX7-HSA-NEXT: v_mov_b32_e32 v25, s31 -; GFX7-HSA-NEXT: s_add_u32 s30, s8, 0x80 -; GFX7-HSA-NEXT: v_mov_b32_e32 v9, s57 -; GFX7-HSA-NEXT: s_addc_u32 s31, s9, 0 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[26:27], v[8:11] -; GFX7-HSA-NEXT: v_mov_b32_e32 v12, s64 -; GFX7-HSA-NEXT: v_mov_b32_e32 v8, s28 -; GFX7-HSA-NEXT: s_add_u32 s28, s8, 0x70 -; GFX7-HSA-NEXT: v_mov_b32_e32 v13, s65 -; GFX7-HSA-NEXT: v_mov_b32_e32 v14, s50 -; GFX7-HSA-NEXT: v_mov_b32_e32 v15, s51 -; GFX7-HSA-NEXT: v_mov_b32_e32 v30, s48 -; GFX7-HSA-NEXT: v_mov_b32_e32 v9, s29 -; GFX7-HSA-NEXT: s_addc_u32 s29, s9, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v16, s52 -; GFX7-HSA-NEXT: v_mov_b32_e32 v17, s53 -; GFX7-HSA-NEXT: v_mov_b32_e32 v31, s49 -; GFX7-HSA-NEXT: v_mov_b32_e32 v18, s67 -; GFX7-HSA-NEXT: v_mov_b32_e32 v19, s66 -; GFX7-HSA-NEXT: v_mov_b32_e32 v10, s38 -; GFX7-HSA-NEXT: flat_store_dwordx4 v[28:29], v[12:15] -; GFX7-HSA-NEXT: v_mov_b32_e32 v20, s54 -; GFX7-HSA-NEXT: v_mov_b32_e32 v14, s24 -; GFX7-HSA-NEXT: s_add_u32 s24, s8, 0x60 -; GFX7-HSA-NEXT: v_mov_b32_e32 v21, s55 -; GFX7-HSA-NEXT: v_mov_b32_e32 v22, s46 -; GFX7-HSA-NEXT: v_mov_b32_e32 v23, s47 -; GFX7-HSA-NEXT: v_mov_b32_e32 v11, s39 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s36 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s37 -; GFX7-HSA-NEXT: v_mov_b32_e32 v26, s30 +; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s22 +; GFX7-HSA-NEXT: s_add_u32 s22, s8, 16 +; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s23 +; GFX7-HSA-NEXT: s_addc_u32 s23, s9, 0 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[30:31], v[16:19] -; GFX7-HSA-NEXT: v_mov_b32_e32 v15, s25 -; GFX7-HSA-NEXT: v_mov_b32_e32 v16, s28 -; GFX7-HSA-NEXT: s_addc_u32 s25, s9, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v18, s24 -; GFX7-HSA-NEXT: v_mov_b32_e32 v27, s31 -; GFX7-HSA-NEXT: v_mov_b32_e32 v6, s34 -; GFX7-HSA-NEXT: v_mov_b32_e32 v7, s35 -; GFX7-HSA-NEXT: flat_store_dwordx4 v[10:11], v[20:23] -; GFX7-HSA-NEXT: v_mov_b32_e32 v12, s26 -; GFX7-HSA-NEXT: v_mov_b32_e32 v10, s45 -; GFX7-HSA-NEXT: v_mov_b32_e32 v11, s43 -; GFX7-HSA-NEXT: v_mov_b32_e32 v13, s27 -; GFX7-HSA-NEXT: v_mov_b32_e32 v17, s29 -; GFX7-HSA-NEXT: v_mov_b32_e32 v19, s25 -; GFX7-HSA-NEXT: flat_store_dwordx4 v[24:25], v[0:3] -; GFX7-HSA-NEXT: flat_store_dwordx4 v[26:27], v[4:7] -; GFX7-HSA-NEXT: flat_store_dwordx4 v[16:17], v[8:11] -; GFX7-HSA-NEXT: flat_store_dwordx4 v[18:19], v[12:15] -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s20 -; GFX7-HSA-NEXT: s_add_u32 s20, s8, 0x50 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s21 -; GFX7-HSA-NEXT: s_addc_u32 s21, s9, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s20 -; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s22 -; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s23 -; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s21 -; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX7-HSA-NEXT: s_nop 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s16 -; GFX7-HSA-NEXT: s_add_u32 s16, s8, 64 -; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s17 -; GFX7-HSA-NEXT: s_addc_u32 s17, s9, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s16 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s18 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s19 -; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s17 +; GFX7-HSA-NEXT: v_mov_b32_e32 v24, s34 +; GFX7-HSA-NEXT: v_mov_b32_e32 v16, s22 +; GFX7-HSA-NEXT: v_mov_b32_e32 v17, s23 +; GFX7-HSA-NEXT: s_add_u32 s22, s8, 0xf0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s30 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s31 +; GFX7-HSA-NEXT: s_addc_u32 s23, s9, 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v18, s22 +; GFX7-HSA-NEXT: v_mov_b32_e32 v28, s50 +; GFX7-HSA-NEXT: v_mov_b32_e32 v22, s42 +; GFX7-HSA-NEXT: v_mov_b32_e32 v23, s43 +; GFX7-HSA-NEXT: v_mov_b32_e32 v25, s35 +; GFX7-HSA-NEXT: v_mov_b32_e32 v19, s23 +; GFX7-HSA-NEXT: s_add_u32 s22, s8, 0xe0 +; GFX7-HSA-NEXT: flat_store_dwordx4 v[16:17], v[0:3] +; GFX7-HSA-NEXT: v_mov_b32_e32 v12, s70 +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s8 +; GFX7-HSA-NEXT: v_mov_b32_e32 v13, s71 +; GFX7-HSA-NEXT: v_mov_b32_e32 v14, s52 +; GFX7-HSA-NEXT: v_mov_b32_e32 v15, s53 +; GFX7-HSA-NEXT: v_mov_b32_e32 v29, s51 +; GFX7-HSA-NEXT: v_mov_b32_e32 v6, s28 +; GFX7-HSA-NEXT: v_mov_b32_e32 v7, s29 +; GFX7-HSA-NEXT: flat_store_dwordx4 v[24:25], v[20:23] +; GFX7-HSA-NEXT: s_addc_u32 s23, s9, 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v20, s22 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s9 +; GFX7-HSA-NEXT: flat_store_dwordx4 v[28:29], v[12:15] +; GFX7-HSA-NEXT: v_mov_b32_e32 v8, s26 +; GFX7-HSA-NEXT: v_mov_b32_e32 v9, s27 +; GFX7-HSA-NEXT: v_mov_b32_e32 v10, s48 +; GFX7-HSA-NEXT: v_mov_b32_e32 v11, s47 +; GFX7-HSA-NEXT: v_mov_b32_e32 v12, s24 +; GFX7-HSA-NEXT: v_mov_b32_e32 v13, s25 +; GFX7-HSA-NEXT: v_mov_b32_e32 v21, s23 +; GFX7-HSA-NEXT: v_mov_b32_e32 v14, s20 +; GFX7-HSA-NEXT: v_mov_b32_e32 v15, s21 +; GFX7-HSA-NEXT: flat_store_dwordx4 v[0:1], v[4:7] +; GFX7-HSA-NEXT: flat_store_dwordx4 v[18:19], v[8:11] +; GFX7-HSA-NEXT: flat_store_dwordx4 v[20:21], v[12:15] +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s18 +; GFX7-HSA-NEXT: s_add_u32 s18, s8, 0xb0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s19 +; GFX7-HSA-NEXT: s_addc_u32 s19, s9, 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s18 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s46 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s45 +; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s19 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX7-HSA-NEXT: s_nop 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s14 -; GFX7-HSA-NEXT: s_add_u32 s14, s8, 48 -; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s15 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s14 +; GFX7-HSA-NEXT: s_add_u32 s14, s8, 0xa0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s15 ; GFX7-HSA-NEXT: s_addc_u32 s15, s9, 0 ; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s14 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s41 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s33 +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s16 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s17 ; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s15 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX7-HSA-NEXT: s_nop 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s10 -; GFX7-HSA-NEXT: s_add_u32 s10, s8, 32 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s11 -; GFX7-HSA-NEXT: s_addc_u32 s11, s9, 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s10 ; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s12 +; GFX7-HSA-NEXT: s_add_u32 s12, s8, 0x70 ; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s13 -; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s11 +; GFX7-HSA-NEXT: s_addc_u32 s13, s9, 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s12 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s44 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s41 +; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s13 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX7-HSA-NEXT: s_nop 0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s4 -; GFX7-HSA-NEXT: s_add_u32 s4, s8, 16 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s5 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-HSA-NEXT: s_add_u32 s6, s8, 0x60 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s7 +; GFX7-HSA-NEXT: s_addc_u32 s7, s9, 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s10 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s11 +; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s7 +; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX7-HSA-NEXT: s_nop 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-HSA-NEXT: s_add_u32 s4, s8, 48 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s5 ; GFX7-HSA-NEXT: s_addc_u32 s5, s9, 0 ; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s4 -; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s6 -; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s7 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s37 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s33 ; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s5 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s8 -; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s0 -; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s1 -; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s2 -; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s3 -; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s9 +; GFX7-HSA-NEXT: s_nop 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s0 +; GFX7-HSA-NEXT: s_add_u32 s0, s8, 32 +; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s1 +; GFX7-HSA-NEXT: s_addc_u32 s1, s9, 0 +; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s1 +; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s2 +; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s3 +; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s0 ; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX7-HSA-NEXT: s_endpgm ; @@ -8580,140 +8583,175 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o ; GFX8-NOHSA-NEXT: s_waitcnt lgkmcnt(0) ; GFX8-NOHSA-NEXT: s_load_dwordx8 s[0:7], s[10:11], 0x0 ; GFX8-NOHSA-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NOHSA-NEXT: s_lshr_b32 s50, s7, 16 -; GFX8-NOHSA-NEXT: s_lshr_b32 s52, s7, 8 -; GFX8-NOHSA-NEXT: s_mov_b32 s54, s7 -; GFX8-NOHSA-NEXT: s_lshr_b32 s56, s6, 16 -; GFX8-NOHSA-NEXT: s_lshr_b32 s58, s6, 24 -; GFX8-NOHSA-NEXT: s_lshr_b32 s60, s6, 8 -; GFX8-NOHSA-NEXT: s_lshr_b32 s62, s5, 16 -; GFX8-NOHSA-NEXT: s_lshr_b32 s48, s5, 8 -; GFX8-NOHSA-NEXT: s_mov_b32 s46, s5 -; GFX8-NOHSA-NEXT: s_lshr_b32 s42, s4, 16 -; GFX8-NOHSA-NEXT: s_lshr_b32 s40, s4, 24 -; GFX8-NOHSA-NEXT: s_lshr_b32 s38, s4, 8 -; GFX8-NOHSA-NEXT: s_lshr_b32 s64, s3, 16 -; GFX8-NOHSA-NEXT: s_lshr_b32 s34, s3, 8 -; GFX8-NOHSA-NEXT: s_mov_b32 s30, s3 -; GFX8-NOHSA-NEXT: s_lshr_b32 s28, s2, 16 -; GFX8-NOHSA-NEXT: s_lshr_b32 s26, s2, 24 -; GFX8-NOHSA-NEXT: s_lshr_b32 s24, s2, 8 -; GFX8-NOHSA-NEXT: s_lshr_b32 s66, s1, 16 -; GFX8-NOHSA-NEXT: s_lshr_b32 s20, s1, 8 -; GFX8-NOHSA-NEXT: s_mov_b32 s18, s1 -; GFX8-NOHSA-NEXT: s_lshr_b32 s16, s0, 16 -; GFX8-NOHSA-NEXT: s_lshr_b32 s14, s0, 24 -; GFX8-NOHSA-NEXT: s_lshr_b32 s12, s0, 8 -; GFX8-NOHSA-NEXT: s_ashr_i32 s65, s3, 24 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[10:11], s[0:1], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[22:23], s[2:3], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[36:37], s[4:5], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[44:45], s[6:7], 0x80000 +; GFX8-NOHSA-NEXT: s_lshr_b32 s46, s6, 16 +; GFX8-NOHSA-NEXT: s_lshr_b32 s48, s6, 24 +; GFX8-NOHSA-NEXT: s_lshr_b32 s50, s6, 8 +; GFX8-NOHSA-NEXT: s_lshr_b32 s52, s4, 16 +; GFX8-NOHSA-NEXT: s_lshr_b32 s54, s4, 24 +; GFX8-NOHSA-NEXT: s_lshr_b32 s56, s4, 8 +; GFX8-NOHSA-NEXT: s_lshr_b32 s58, s2, 16 +; GFX8-NOHSA-NEXT: s_lshr_b32 s60, s2, 24 +; GFX8-NOHSA-NEXT: s_lshr_b32 s40, s2, 8 +; GFX8-NOHSA-NEXT: s_lshr_b32 s36, s0, 16 +; GFX8-NOHSA-NEXT: s_lshr_b32 s34, s0, 24 +; GFX8-NOHSA-NEXT: s_lshr_b32 s28, s0, 8 +; GFX8-NOHSA-NEXT: s_lshr_b32 s62, s7, 16 +; GFX8-NOHSA-NEXT: s_lshr_b32 s24, s7, 8 +; GFX8-NOHSA-NEXT: s_mov_b32 s22, s7 +; GFX8-NOHSA-NEXT: s_lshr_b32 s64, s5, 16 +; GFX8-NOHSA-NEXT: s_lshr_b32 s20, s5, 8 +; GFX8-NOHSA-NEXT: s_mov_b32 s18, s5 +; GFX8-NOHSA-NEXT: s_lshr_b32 s66, s3, 16 +; GFX8-NOHSA-NEXT: s_lshr_b32 s16, s3, 8 +; GFX8-NOHSA-NEXT: s_mov_b32 s14, s3 +; GFX8-NOHSA-NEXT: s_lshr_b32 s44, s1, 16 +; GFX8-NOHSA-NEXT: s_lshr_b32 s12, s1, 8 +; GFX8-NOHSA-NEXT: s_mov_b32 s10, s1 +; GFX8-NOHSA-NEXT: s_ashr_i32 s63, s5, 24 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[26:27], s[0:1], 0x80000 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[30:31], s[2:3], 0x80000 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[38:39], s[4:5], 0x80000 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[68:69], s[6:7], 0x80000 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 +; GFX8-NOHSA-NEXT: s_ashr_i32 s33, s1, 31 +; GFX8-NOHSA-NEXT: s_ashr_i32 s42, s1, 24 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[0:1], s[44:45], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 +; GFX8-NOHSA-NEXT: s_ashr_i32 s43, s3, 31 +; GFX8-NOHSA-NEXT: s_ashr_i32 s44, s3, 24 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[2:3], s[66:67], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 -; GFX8-NOHSA-NEXT: s_ashr_i32 s4, s1, 31 -; GFX8-NOHSA-NEXT: s_ashr_i32 s6, s1, 24 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[0:1], s[66:67], 0x80000 +; GFX8-NOHSA-NEXT: s_ashr_i32 s45, s5, 31 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[4:5], s[64:65], 0x80000 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000 +; GFX8-NOHSA-NEXT: s_ashr_i32 s64, s7, 31 +; GFX8-NOHSA-NEXT: s_ashr_i32 s65, s7, 24 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[6:7], s[62:63], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000 -; GFX8-NOHSA-NEXT: s_ashr_i32 s33, s3, 31 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[2:3], s[64:65], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x80000 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000 -; GFX8-NOHSA-NEXT: s_ashr_i32 s64, s5, 31 -; GFX8-NOHSA-NEXT: s_ashr_i32 s5, s5, 24 -; GFX8-NOHSA-NEXT: s_bfe_i64 s[62:63], s[62:63], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[60:61], s[60:61], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[58:59], s[58:59], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[56:57], s[56:57], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[54:55], s[54:55], 0x80000 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[52:53], s[52:53], 0x80000 -; GFX8-NOHSA-NEXT: s_ashr_i32 s66, s7, 31 -; GFX8-NOHSA-NEXT: s_ashr_i32 s7, s7, 24 ; GFX8-NOHSA-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x80000 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s50 -; GFX8-NOHSA-NEXT: s_add_u32 s50, s8, 0xf0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s51 -; GFX8-NOHSA-NEXT: s_addc_u32 s51, s9, 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s50 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s7 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s66 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s51 -; GFX8-NOHSA-NEXT: s_add_u32 s50, s8, 0xe0 -; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NOHSA-NEXT: s_addc_u32 s51, s9, 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s50 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s54 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s55 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s52 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s53 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s51 -; GFX8-NOHSA-NEXT: s_add_u32 s50, s8, 0xd0 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000 +; GFX8-NOHSA-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x80000 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s46 +; GFX8-NOHSA-NEXT: s_add_u32 s46, s8, 0xd0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s47 +; GFX8-NOHSA-NEXT: s_addc_u32 s47, s9, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s46 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s48 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s49 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s47 +; GFX8-NOHSA-NEXT: s_add_u32 s46, s8, 0xc0 +; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX8-NOHSA-NEXT: s_addc_u32 s47, s9, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s46 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s68 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s69 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s50 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s51 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s47 +; GFX8-NOHSA-NEXT: s_add_u32 s46, s8, 0x90 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NOHSA-NEXT: s_addc_u32 s51, s9, 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s50 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s56 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s57 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s58 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s59 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s51 +; GFX8-NOHSA-NEXT: s_addc_u32 s47, s9, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s46 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s52 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s53 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s54 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s55 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s47 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NOHSA-NEXT: s_nop 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s44 -; GFX8-NOHSA-NEXT: s_add_u32 s44, s8, 0xc0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s45 -; GFX8-NOHSA-NEXT: s_addc_u32 s45, s9, 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s44 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s38 +; GFX8-NOHSA-NEXT: s_add_u32 s38, s8, 0x80 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s39 +; GFX8-NOHSA-NEXT: s_addc_u32 s39, s9, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s38 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s56 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s57 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s39 +; GFX8-NOHSA-NEXT: s_add_u32 s38, s8, 0x50 +; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX8-NOHSA-NEXT: s_addc_u32 s39, s9, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s38 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s58 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s59 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s60 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s61 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s45 -; GFX8-NOHSA-NEXT: s_add_u32 s44, s8, 0xb0 -; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NOHSA-NEXT: s_addc_u32 s45, s9, 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s44 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s62 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s63 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s5 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s64 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s45 -; GFX8-NOHSA-NEXT: s_add_u32 s44, s8, 0xa0 -; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NOHSA-NEXT: s_addc_u32 s45, s9, 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s44 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s46 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s47 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s48 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s49 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s45 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s39 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NOHSA-NEXT: s_nop 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s30 +; GFX8-NOHSA-NEXT: s_add_u32 s30, s8, 64 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s31 +; GFX8-NOHSA-NEXT: s_addc_u32 s31, s9, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s30 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s40 -; GFX8-NOHSA-NEXT: s_add_u32 s40, s8, 0x90 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s41 -; GFX8-NOHSA-NEXT: s_addc_u32 s41, s9, 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s40 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s42 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s43 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s41 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s31 +; GFX8-NOHSA-NEXT: s_add_u32 s30, s8, 16 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NOHSA-NEXT: s_nop 0 +; GFX8-NOHSA-NEXT: s_addc_u32 s31, s9, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s30 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s36 -; GFX8-NOHSA-NEXT: s_add_u32 s36, s8, 0x80 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s37 -; GFX8-NOHSA-NEXT: s_addc_u32 s37, s9, 0 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s36 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s38 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s39 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s37 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s34 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s35 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s31 +; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s8 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s26 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s27 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s28 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s29 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s9 +; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX8-NOHSA-NEXT: s_nop 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s6 +; GFX8-NOHSA-NEXT: s_add_u32 s6, s8, 0xf0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s7 +; GFX8-NOHSA-NEXT: s_addc_u32 s7, s9, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s6 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s65 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s64 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s7 +; GFX8-NOHSA-NEXT: s_add_u32 s6, s8, 0xe0 +; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX8-NOHSA-NEXT: s_addc_u32 s7, s9, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s6 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s22 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s23 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s24 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s25 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s7 +; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX8-NOHSA-NEXT: s_nop 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NOHSA-NEXT: s_add_u32 s4, s8, 0xb0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s5 +; GFX8-NOHSA-NEXT: s_addc_u32 s5, s9, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s63 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s45 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s5 +; GFX8-NOHSA-NEXT: s_add_u32 s4, s8, 0xa0 +; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GFX8-NOHSA-NEXT: s_addc_u32 s5, s9, 0 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s18 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s19 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s21 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s5 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NOHSA-NEXT: s_nop 0 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s2 @@ -8723,33 +8761,15 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s3 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s2 ; GFX8-NOHSA-NEXT: s_add_u32 s2, s8, 0x60 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s65 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s33 -; GFX8-NOHSA-NEXT: s_addc_u32 s3, s9, 0 -; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s3 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s2 -; GFX8-NOHSA-NEXT: s_add_u32 s2, s8, 0x50 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s30 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s31 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s34 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s35 -; GFX8-NOHSA-NEXT: s_addc_u32 s3, s9, 0 -; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s3 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s2 -; GFX8-NOHSA-NEXT: s_add_u32 s2, s8, 64 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s28 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s29 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s26 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s27 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s44 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s43 ; GFX8-NOHSA-NEXT: s_addc_u32 s3, s9, 0 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s3 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s22 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s23 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s24 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s25 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s14 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s15 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s16 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s17 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s2 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NOHSA-NEXT: s_nop 0 @@ -8760,32 +8780,16 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s1 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0 ; GFX8-NOHSA-NEXT: s_add_u32 s0, s8, 32 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s6 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s4 -; GFX8-NOHSA-NEXT: s_addc_u32 s1, s9, 0 -; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NOHSA-NEXT: s_add_u32 s0, s8, 16 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s18 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s19 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s20 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s21 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s42 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s33 ; GFX8-NOHSA-NEXT: s_addc_u32 s1, s9, 0 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s16 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s17 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s14 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s15 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s8 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s10 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s11 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s12 ; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s13 -; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s9 +; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0 ; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NOHSA-NEXT: s_endpgm ; @@ -8984,122 +8988,120 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: s_load_b256 s[0:7], s[10:11], 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_lshr_b32 s40, s7, 16 -; GFX12-NEXT: s_lshr_b32 s50, s6, 8 -; GFX12-NEXT: s_lshr_b32 s62, s3, 16 -; GFX12-NEXT: s_ashr_i32 s51, s3, 24 -; GFX12-NEXT: s_lshr_b32 s42, s7, 8 -; GFX12-NEXT: s_mov_b32 s44, s7 -; GFX12-NEXT: s_lshr_b32 s46, s6, 16 -; GFX12-NEXT: s_lshr_b32 s48, s6, 24 -; GFX12-NEXT: s_lshr_b32 s38, s5, 16 -; GFX12-NEXT: s_lshr_b32 s52, s5, 8 -; GFX12-NEXT: s_mov_b32 s54, s5 -; GFX12-NEXT: s_lshr_b32 s56, s4, 16 -; GFX12-NEXT: s_lshr_b32 s58, s4, 24 -; GFX12-NEXT: s_lshr_b32 s60, s4, 8 -; GFX12-NEXT: s_lshr_b32 s36, s3, 8 -; GFX12-NEXT: s_mov_b32 s34, s3 -; GFX12-NEXT: s_lshr_b32 s28, s2, 16 -; GFX12-NEXT: s_lshr_b32 s26, s2, 24 -; GFX12-NEXT: s_lshr_b32 s24, s2, 8 -; GFX12-NEXT: s_bfe_i64 s[20:21], s[2:3], 0x80000 -; GFX12-NEXT: s_bfe_i64 s[30:31], s[4:5], 0x80000 +; GFX12-NEXT: s_lshr_b32 s34, s6, 16 +; GFX12-NEXT: s_lshr_b32 s36, s6, 24 +; GFX12-NEXT: s_lshr_b32 s38, s6, 8 +; GFX12-NEXT: s_lshr_b32 s40, s4, 16 +; GFX12-NEXT: s_lshr_b32 s42, s4, 24 +; GFX12-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000 +; GFX12-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000 +; GFX12-NEXT: s_lshr_b32 s44, s4, 8 ; GFX12-NEXT: s_bfe_i64 s[66:67], s[6:7], 0x80000 -; GFX12-NEXT: s_ashr_i32 s39, s3, 31 -; GFX12-NEXT: s_bfe_i64 s[2:3], s[62:63], 0x80000 -; GFX12-NEXT: s_ashr_i32 s62, s5, 31 -; GFX12-NEXT: s_ashr_i32 s63, s5, 24 -; GFX12-NEXT: s_bfe_i64 s[4:5], s[50:51], 0x80000 -; GFX12-NEXT: s_ashr_i32 s50, s7, 31 +; GFX12-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000 +; GFX12-NEXT: v_dual_mov_b32 v24, 0 :: v_dual_mov_b32 v1, s35 +; GFX12-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000 ; GFX12-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x80000 -; GFX12-NEXT: s_ashr_i32 s7, s7, 24 +; GFX12-NEXT: v_dual_mov_b32 v0, s34 :: v_dual_mov_b32 v3, s37 +; GFX12-NEXT: v_dual_mov_b32 v2, s36 :: v_dual_mov_b32 v5, s67 +; GFX12-NEXT: s_lshr_b32 s28, s2, 16 +; GFX12-NEXT: s_lshr_b32 s46, s2, 24 +; GFX12-NEXT: s_bfe_i64 s[64:65], s[4:5], 0x80000 ; GFX12-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x80000 -; GFX12-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000 -; GFX12-NEXT: v_dual_mov_b32 v24, 0 :: v_dual_mov_b32 v1, s41 -; GFX12-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000 +; GFX12-NEXT: v_dual_mov_b32 v4, s66 :: v_dual_mov_b32 v7, s39 +; GFX12-NEXT: v_dual_mov_b32 v6, s38 :: v_dual_mov_b32 v9, s41 +; GFX12-NEXT: s_lshr_b32 s48, s2, 8 +; GFX12-NEXT: v_dual_mov_b32 v8, s40 :: v_dual_mov_b32 v11, s43 +; GFX12-NEXT: v_dual_mov_b32 v10, s42 :: v_dual_mov_b32 v13, s65 +; GFX12-NEXT: s_lshr_b32 s50, s0, 16 +; GFX12-NEXT: s_lshr_b32 s52, s0, 24 ; GFX12-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x80000 -; GFX12-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v3, s50 -; GFX12-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v5, s45 -; GFX12-NEXT: v_dual_mov_b32 v4, s44 :: v_dual_mov_b32 v7, s43 -; GFX12-NEXT: v_dual_mov_b32 v6, s42 :: v_dual_mov_b32 v9, s47 -; GFX12-NEXT: v_dual_mov_b32 v8, s46 :: v_dual_mov_b32 v11, s49 -; GFX12-NEXT: v_dual_mov_b32 v10, s48 :: v_dual_mov_b32 v13, s67 -; GFX12-NEXT: v_dual_mov_b32 v12, s66 :: v_dual_mov_b32 v15, s5 -; GFX12-NEXT: v_mov_b32_e32 v14, s4 -; GFX12-NEXT: s_bfe_i64 s[4:5], s[38:39], 0x80000 -; GFX12-NEXT: s_bfe_i64 s[54:55], s[54:55], 0x80000 +; GFX12-NEXT: v_dual_mov_b32 v12, s64 :: v_dual_mov_b32 v15, s45 +; GFX12-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000 +; GFX12-NEXT: v_mov_b32_e32 v14, s44 +; GFX12-NEXT: s_lshr_b32 s54, s0, 8 +; GFX12-NEXT: s_bfe_i64 s[30:31], s[2:3], 0x80000 +; GFX12-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000 +; GFX12-NEXT: s_lshr_b32 s56, s7, 16 +; GFX12-NEXT: s_lshr_b32 s58, s5, 16 +; GFX12-NEXT: s_lshr_b32 s60, s1, 8 +; GFX12-NEXT: s_mov_b32 s62, s1 +; GFX12-NEXT: s_ashr_i32 s57, s1, 24 +; GFX12-NEXT: s_ashr_i32 s59, s3, 31 +; GFX12-NEXT: s_ashr_i32 s61, s3, 24 +; GFX12-NEXT: s_ashr_i32 s63, s5, 31 ; GFX12-NEXT: s_bfe_i64 s[52:53], s[52:53], 0x80000 -; GFX12-NEXT: s_bfe_i64 s[58:59], s[58:59], 0x80000 -; GFX12-NEXT: s_bfe_i64 s[56:57], s[56:57], 0x80000 +; GFX12-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x80000 ; GFX12-NEXT: s_clause 0x3 -; GFX12-NEXT: global_store_b128 v24, v[0:3], s[8:9] offset:240 -; GFX12-NEXT: global_store_b128 v24, v[4:7], s[8:9] offset:224 -; GFX12-NEXT: global_store_b128 v24, v[8:11], s[8:9] offset:208 -; GFX12-NEXT: global_store_b128 v24, v[12:15], s[8:9] offset:192 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v3, s62 -; GFX12-NEXT: v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s63 -; GFX12-NEXT: v_mov_b32_e32 v5, s55 -; GFX12-NEXT: s_bfe_i64 s[60:61], s[60:61], 0x80000 -; GFX12-NEXT: v_dual_mov_b32 v4, s54 :: v_dual_mov_b32 v7, s53 -; GFX12-NEXT: v_dual_mov_b32 v6, s52 :: v_dual_mov_b32 v9, s57 -; GFX12-NEXT: v_dual_mov_b32 v8, s56 :: v_dual_mov_b32 v11, s59 -; GFX12-NEXT: v_dual_mov_b32 v10, s58 :: v_dual_mov_b32 v13, s31 -; GFX12-NEXT: s_lshr_b32 s22, s1, 16 -; GFX12-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000 -; GFX12-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000 -; GFX12-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000 -; GFX12-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000 -; GFX12-NEXT: v_dual_mov_b32 v12, s30 :: v_dual_mov_b32 v15, s61 -; GFX12-NEXT: v_dual_mov_b32 v14, s60 :: v_dual_mov_b32 v17, s3 -; GFX12-NEXT: s_lshr_b32 s16, s1, 8 -; GFX12-NEXT: s_mov_b32 s18, s1 -; GFX12-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 -; GFX12-NEXT: v_dual_mov_b32 v16, s2 :: v_dual_mov_b32 v19, s39 -; GFX12-NEXT: v_dual_mov_b32 v18, s51 :: v_dual_mov_b32 v21, s35 -; GFX12-NEXT: s_lshr_b32 s14, s0, 16 -; GFX12-NEXT: s_lshr_b32 s12, s0, 24 -; GFX12-NEXT: s_ashr_i32 s6, s1, 31 -; GFX12-NEXT: s_ashr_i32 s33, s1, 24 -; GFX12-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 -; GFX12-NEXT: v_dual_mov_b32 v20, s34 :: v_dual_mov_b32 v23, s37 -; GFX12-NEXT: v_mov_b32_e32 v22, s36 -; GFX12-NEXT: s_clause 0x5 -; GFX12-NEXT: global_store_b128 v24, v[0:3], s[8:9] offset:176 -; GFX12-NEXT: global_store_b128 v24, v[4:7], s[8:9] offset:160 +; GFX12-NEXT: global_store_b128 v24, v[0:3], s[8:9] offset:208 +; GFX12-NEXT: global_store_b128 v24, v[4:7], s[8:9] offset:192 ; GFX12-NEXT: global_store_b128 v24, v[8:11], s[8:9] offset:144 ; GFX12-NEXT: global_store_b128 v24, v[12:15], s[8:9] offset:128 -; GFX12-NEXT: global_store_b128 v24, v[16:19], s[8:9] offset:112 -; GFX12-NEXT: global_store_b128 v24, v[20:23], s[8:9] offset:96 -; GFX12-NEXT: v_dual_mov_b32 v0, s28 :: v_dual_mov_b32 v3, s27 -; GFX12-NEXT: v_dual_mov_b32 v1, s29 :: v_dual_mov_b32 v2, s26 -; GFX12-NEXT: v_mov_b32_e32 v5, s21 -; GFX12-NEXT: s_lshr_b32 s64, s0, 8 +; GFX12-NEXT: v_dual_mov_b32 v0, s28 :: v_dual_mov_b32 v3, s47 +; GFX12-NEXT: v_dual_mov_b32 v1, s29 :: v_dual_mov_b32 v2, s46 +; GFX12-NEXT: v_mov_b32_e32 v5, s31 +; GFX12-NEXT: s_lshr_b32 s26, s7, 8 +; GFX12-NEXT: s_mov_b32 s24, s7 +; GFX12-NEXT: s_bfe_i64 s[22:23], s[0:1], 0x80000 +; GFX12-NEXT: s_bfe_i64 s[54:55], s[54:55], 0x80000 +; GFX12-NEXT: v_dual_mov_b32 v4, s30 :: v_dual_mov_b32 v7, s49 +; GFX12-NEXT: v_dual_mov_b32 v6, s48 :: v_dual_mov_b32 v9, s51 +; GFX12-NEXT: s_lshr_b32 s18, s5, 8 +; GFX12-NEXT: s_mov_b32 s20, s5 +; GFX12-NEXT: s_lshr_b32 s16, s3, 16 +; GFX12-NEXT: s_lshr_b32 s12, s3, 8 +; GFX12-NEXT: s_mov_b32 s14, s3 +; GFX12-NEXT: s_lshr_b32 s10, s1, 16 +; GFX12-NEXT: s_ashr_i32 s33, s1, 31 +; GFX12-NEXT: s_bfe_i64 s[2:3], s[62:63], 0x80000 +; GFX12-NEXT: s_bfe_i64 s[0:1], s[60:61], 0x80000 +; GFX12-NEXT: s_ashr_i32 s60, s5, 24 +; GFX12-NEXT: s_bfe_i64 s[4:5], s[58:59], 0x80000 +; GFX12-NEXT: s_ashr_i32 s58, s7, 31 +; GFX12-NEXT: s_ashr_i32 s62, s7, 24 +; GFX12-NEXT: s_bfe_i64 s[6:7], s[56:57], 0x80000 +; GFX12-NEXT: v_dual_mov_b32 v8, s50 :: v_dual_mov_b32 v11, s53 +; GFX12-NEXT: v_dual_mov_b32 v10, s52 :: v_dual_mov_b32 v13, s23 +; GFX12-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 +; GFX12-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000 +; GFX12-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v15, s55 +; GFX12-NEXT: v_dual_mov_b32 v14, s54 :: v_dual_mov_b32 v17, s7 +; GFX12-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 ; GFX12-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000 +; GFX12-NEXT: v_dual_mov_b32 v16, s6 :: v_dual_mov_b32 v19, s58 +; GFX12-NEXT: v_dual_mov_b32 v18, s62 :: v_dual_mov_b32 v21, s25 ; GFX12-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 -; GFX12-NEXT: v_dual_mov_b32 v4, s20 :: v_dual_mov_b32 v7, s25 -; GFX12-NEXT: v_dual_mov_b32 v6, s24 :: v_dual_mov_b32 v9, s23 -; GFX12-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 -; GFX12-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v11, s6 -; GFX12-NEXT: v_dual_mov_b32 v10, s33 :: v_dual_mov_b32 v13, s19 -; GFX12-NEXT: s_bfe_i64 s[10:11], s[0:1], 0x80000 -; GFX12-NEXT: s_bfe_i64 s[0:1], s[64:65], 0x80000 -; GFX12-NEXT: v_dual_mov_b32 v12, s18 :: v_dual_mov_b32 v15, s17 -; GFX12-NEXT: v_dual_mov_b32 v14, s16 :: v_dual_mov_b32 v17, s15 -; GFX12-NEXT: v_dual_mov_b32 v16, s14 :: v_dual_mov_b32 v19, s13 -; GFX12-NEXT: v_dual_mov_b32 v18, s12 :: v_dual_mov_b32 v21, s11 -; GFX12-NEXT: v_dual_mov_b32 v20, s10 :: v_dual_mov_b32 v23, s1 -; GFX12-NEXT: v_mov_b32_e32 v22, s0 +; GFX12-NEXT: v_dual_mov_b32 v20, s24 :: v_dual_mov_b32 v23, s27 +; GFX12-NEXT: v_mov_b32_e32 v22, s26 ; GFX12-NEXT: s_clause 0x5 ; GFX12-NEXT: global_store_b128 v24, v[0:3], s[8:9] offset:80 ; GFX12-NEXT: global_store_b128 v24, v[4:7], s[8:9] offset:64 -; GFX12-NEXT: global_store_b128 v24, v[8:11], s[8:9] offset:48 -; GFX12-NEXT: global_store_b128 v24, v[12:15], s[8:9] offset:32 -; GFX12-NEXT: global_store_b128 v24, v[16:19], s[8:9] offset:16 -; GFX12-NEXT: global_store_b128 v24, v[20:23], s[8:9] +; GFX12-NEXT: global_store_b128 v24, v[8:11], s[8:9] offset:16 +; GFX12-NEXT: global_store_b128 v24, v[12:15], s[8:9] +; GFX12-NEXT: global_store_b128 v24, v[16:19], s[8:9] offset:240 +; GFX12-NEXT: global_store_b128 v24, v[20:23], s[8:9] offset:224 +; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v3, s63 +; GFX12-NEXT: v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s60 +; GFX12-NEXT: v_mov_b32_e32 v5, s21 +; GFX12-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 +; GFX12-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 +; GFX12-NEXT: v_dual_mov_b32 v4, s20 :: v_dual_mov_b32 v7, s19 +; GFX12-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s17 +; GFX12-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 +; GFX12-NEXT: v_dual_mov_b32 v8, s16 :: v_dual_mov_b32 v11, s59 +; GFX12-NEXT: v_dual_mov_b32 v10, s61 :: v_dual_mov_b32 v13, s15 +; GFX12-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v15, s13 +; GFX12-NEXT: v_dual_mov_b32 v14, s12 :: v_dual_mov_b32 v17, s11 +; GFX12-NEXT: v_dual_mov_b32 v16, s10 :: v_dual_mov_b32 v19, s33 +; GFX12-NEXT: v_dual_mov_b32 v18, s57 :: v_dual_mov_b32 v21, s3 +; GFX12-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v23, s1 +; GFX12-NEXT: v_mov_b32_e32 v22, s0 +; GFX12-NEXT: s_clause 0x5 +; GFX12-NEXT: global_store_b128 v24, v[0:3], s[8:9] offset:176 +; GFX12-NEXT: global_store_b128 v24, v[4:7], s[8:9] offset:160 +; GFX12-NEXT: global_store_b128 v24, v[8:11], s[8:9] offset:112 +; GFX12-NEXT: global_store_b128 v24, v[12:15], s[8:9] offset:96 +; GFX12-NEXT: global_store_b128 v24, v[16:19], s[8:9] offset:48 +; GFX12-NEXT: global_store_b128 v24, v[20:23], s[8:9] offset:32 ; GFX12-NEXT: s_endpgm %load = load <32 x i8>, ptr addrspace(4) %in %ext = sext <32 x i8> %load to <32 x i64> @@ -9832,24 +9834,50 @@ define amdgpu_kernel void @constant_zextload_v4i8_to_v4i16(ptr addrspace(1) %out ; ; EG-LABEL: constant_zextload_v4i8_to_v4i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] +; EG-NEXT: ALU 1, @8, KC0[CB0:0-32], KC1[] ; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 6, @9, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T4.XY, T5.X, 1 +; EG-NEXT: ALU 31, @10, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T8.XY, T7.X, 1 ; EG-NEXT: CF_END ; EG-NEXT: PAD ; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_32 T4.X, T4.X, 0, #1 +; EG-NEXT: VTX_READ_32 T7.X, T7.X, 0, #1 ; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: MOV * T4.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 9: +; EG-NEXT: MOV * T0.Y, T4.X, +; EG-NEXT: MOV * T7.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 10: +; EG-NEXT: AND_INT T0.W, T7.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 255(3.573311e-43), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T0.W, T7.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, ; EG-NEXT: MOV * T0.W, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT * T4.Y, T4.X, literal.x, PV.W, +; EG-NEXT: BFE_UINT T0.W, T7.X, literal.x, PV.W, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 16(2.242078e-44), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T7.X, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: AND_INT T4.X, T4.X, literal.x, -; EG-NEXT: LSHR * T5.X, KC0[2].Y, literal.y, -; EG-NEXT: 255(3.573311e-43), 2(2.802597e-45) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: LSHR T7.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T8.Y, PV.W, PS, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T5.X, PV.Y, +; EG-NEXT: MOV * T8.X, T4.X, ; ; GFX12-LABEL: constant_zextload_v4i8_to_v4i16: ; GFX12: ; %bb.0: @@ -9951,23 +9979,56 @@ define amdgpu_kernel void @constant_sextload_v4i8_to_v4i16(ptr addrspace(1) %out ; ; EG-LABEL: constant_sextload_v4i8_to_v4i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] +; EG-NEXT: ALU 1, @8, KC0[CB0:0-32], KC1[] ; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 5, @9, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T5.XY, T4.X, 1 +; EG-NEXT: ALU 37, @10, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T8.XY, T7.X, 1 ; EG-NEXT: CF_END ; EG-NEXT: PAD ; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_32 T4.X, T4.X, 0, #1 +; EG-NEXT: VTX_READ_32 T7.X, T7.X, 0, #1 ; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: MOV * T4.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 9: -; EG-NEXT: BFE_INT T5.X, T4.X, 0.0, literal.x, -; EG-NEXT: LSHR T0.W, T4.X, literal.x, -; EG-NEXT: LSHR * T4.X, KC0[2].Y, literal.y, -; EG-NEXT: 8(1.121039e-44), 2(2.802597e-45) -; EG-NEXT: BFE_INT * T5.Y, PV.W, 0.0, literal.x, +; EG-NEXT: MOV * T0.Y, T4.X, +; EG-NEXT: MOV * T7.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 10: +; EG-NEXT: BFE_INT * T0.W, T7.X, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T0.W, PV.W, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 65535(9.183409e-41), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T7.X, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, +; EG-NEXT: LSHR * T0.W, T7.X, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T7.X, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: LSHR T7.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T8.Y, PV.W, PS, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T5.X, PV.Y, +; EG-NEXT: MOV * T8.X, T4.X, ; ; GFX12-LABEL: constant_sextload_v4i8_to_v4i16: ; GFX12: ; %bb.0: @@ -10088,27 +10149,80 @@ define amdgpu_kernel void @constant_zextload_v8i8_to_v8i16(ptr addrspace(1) %out ; ; EG-LABEL: constant_zextload_v8i8_to_v8i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] +; EG-NEXT: ALU 1, @8, KC0[CB0:0-32], KC1[] ; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 9, @9, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T6.XYZW, T5.X, 1 +; EG-NEXT: ALU 61, @10, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T12.XYZW, T11.X, 1 ; EG-NEXT: CF_END ; EG-NEXT: PAD ; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_64 T5.XY, T5.X, 0, #1 +; EG-NEXT: VTX_READ_64 T11.XY, T11.X, 0, #1 ; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: MOV * T5.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 9: +; EG-NEXT: MOV * T0.Y, T8.X, +; EG-NEXT: MOV * T11.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 10: +; EG-NEXT: AND_INT T0.W, T11.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 255(3.573311e-43), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T8.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T0.W, T11.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T8.X, PV.W, +; EG-NEXT: MOV T0.Y, T9.X, ; EG-NEXT: MOV * T0.W, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT * T6.W, T5.Y, literal.x, PV.W, +; EG-NEXT: BFE_UINT T1.W, T11.X, literal.x, PV.W, +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.y, +; EG-NEXT: 16(2.242078e-44), -65536(nan) +; EG-NEXT: OR_INT * T1.W, PS, PV.W, +; EG-NEXT: MOV * T9.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T11.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T12.Y, PV.W, PS, +; EG-NEXT: MOV T9.X, PV.Y, +; EG-NEXT: MOV * T0.Y, T4.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T11.Y, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T11.Y, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT T6.Y, T5.X, literal.x, T0.W, -; EG-NEXT: AND_INT * T6.Z, T5.Y, literal.y, -; EG-NEXT: 8(1.121039e-44), 255(3.573311e-43) -; EG-NEXT: AND_INT T6.X, T5.X, literal.x, -; EG-NEXT: LSHR * T5.X, KC0[2].Y, literal.y, -; EG-NEXT: 255(3.573311e-43), 2(2.802597e-45) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, +; EG-NEXT: BFE_UINT * T0.W, T11.Y, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, PV.W, T0.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T11.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: LSHR T11.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T12.W, PV.W, PS, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV * T12.X, T8.X, +; EG-NEXT: MOV * T12.Z, T4.X, ; ; GFX12-LABEL: constant_zextload_v8i8_to_v8i16: ; GFX12: ; %bb.0: @@ -10255,28 +10369,93 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i16(ptr addrspace(1) %out ; ; EG-LABEL: constant_sextload_v8i8_to_v8i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] +; EG-NEXT: ALU 1, @8, KC0[CB0:0-32], KC1[] ; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 10, @9, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T6.XYZW, T5.X, 1 +; EG-NEXT: ALU 74, @10, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T12.XYZW, T11.X, 1 ; EG-NEXT: CF_END ; EG-NEXT: PAD ; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_64 T5.XY, T5.X, 0, #1 +; EG-NEXT: VTX_READ_64 T11.XY, T11.X, 0, #1 ; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: MOV * T5.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 9: -; EG-NEXT: BFE_INT * T6.Z, T5.Y, 0.0, literal.x, +; EG-NEXT: MOV * T0.Y, T8.X, +; EG-NEXT: MOV * T11.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 10: +; EG-NEXT: BFE_INT * T0.W, T11.X, 0.0, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_INT T6.X, T5.X, 0.0, literal.x, -; EG-NEXT: LSHR * T0.W, T5.Y, literal.x, +; EG-NEXT: AND_INT T0.W, PV.W, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 65535(9.183409e-41), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T8.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T11.X, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_INT T6.W, PV.W, 0.0, literal.x, -; EG-NEXT: LSHR * T0.W, T5.X, literal.x, +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T8.X, PV.W, +; EG-NEXT: MOV T0.Y, T9.X, +; EG-NEXT: LSHR * T0.W, T11.X, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T9.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T11.X, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T12.Y, PV.W, PS, +; EG-NEXT: MOV T9.X, PV.Y, +; EG-NEXT: MOV T0.Y, T4.X, +; EG-NEXT: BFE_INT * T0.W, T11.Y, 0.0, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: LSHR T5.X, KC0[2].Y, literal.x, -; EG-NEXT: BFE_INT * T6.Y, PS, 0.0, literal.y, -; EG-NEXT: 2(2.802597e-45), 8(1.121039e-44) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T11.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, +; EG-NEXT: LSHR * T0.W, T11.Y, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T11.Y, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: LSHR T11.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T12.W, PV.W, PS, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV * T12.X, T8.X, +; EG-NEXT: MOV * T12.Z, T4.X, ; ; GFX12-LABEL: constant_sextload_v8i8_to_v8i16: ; GFX12: ; %bb.0: @@ -10472,37 +10651,146 @@ define amdgpu_kernel void @constant_zextload_v16i8_to_v16i16(ptr addrspace(1) %o ; ; EG-LABEL: constant_zextload_v16i8_to_v16i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] -; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T9.XYZW, T10.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T8.XYZW, T7.X, 1 +; EG-NEXT: ALU 1, @10, KC0[CB0:0-32], KC1[] +; EG-NEXT: TEX 0 @8 +; EG-NEXT: ALU 103, @12, KC0[], KC1[] +; EG-NEXT: ALU 20, @116, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T20.XYZW, T22.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T19.XYZW, T21.X, 1 ; EG-NEXT: CF_END -; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_128 T7.XYZW, T7.X, 0, #1 -; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: MOV * T7.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 9: +; EG-NEXT: PAD +; EG-NEXT: Fetch clause starting at 8: +; EG-NEXT: VTX_READ_128 T19.XYZW, T19.X, 0, #1 +; EG-NEXT: ALU clause starting at 10: +; EG-NEXT: MOV * T0.Y, T16.X, +; EG-NEXT: MOV * T19.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 12: +; EG-NEXT: AND_INT T0.W, T19.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 255(3.573311e-43), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T16.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T0.W, T19.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T16.X, PV.W, +; EG-NEXT: MOV T0.Y, T17.X, ; EG-NEXT: MOV * T0.W, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT * T8.W, T7.Y, literal.x, PV.W, +; EG-NEXT: BFE_UINT T1.W, T19.X, literal.x, PV.W, +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.y, +; EG-NEXT: 16(2.242078e-44), -65536(nan) +; EG-NEXT: OR_INT * T1.W, PS, PV.W, +; EG-NEXT: MOV * T17.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T19.X, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT T8.Y, T7.X, literal.x, T0.W, -; EG-NEXT: AND_INT T8.Z, T7.Y, literal.y, -; EG-NEXT: BFE_UINT * T9.W, T7.W, literal.x, T0.W, -; EG-NEXT: 8(1.121039e-44), 255(3.573311e-43) -; EG-NEXT: AND_INT T8.X, T7.X, literal.x, -; EG-NEXT: BFE_UINT T9.Y, T7.Z, literal.y, T0.W, -; EG-NEXT: LSHR * T7.X, KC0[2].Y, literal.z, -; EG-NEXT: 255(3.573311e-43), 8(1.121039e-44) -; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) -; EG-NEXT: AND_INT * T9.Z, T7.W, literal.x, -; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00) -; EG-NEXT: AND_INT T9.X, T7.Z, literal.x, -; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.y, -; EG-NEXT: 255(3.573311e-43), 16(2.242078e-44) -; EG-NEXT: LSHR * T10.X, PV.W, literal.x, +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T20.Y, PV.W, PS, +; EG-NEXT: MOV T17.X, PV.Y, +; EG-NEXT: MOV * T0.Y, T12.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T19.Y, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T12.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T19.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T12.X, PV.W, +; EG-NEXT: MOV T0.Y, T13.X, +; EG-NEXT: BFE_UINT * T1.W, T19.Y, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T13.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T19.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T20.W, PV.W, PS, +; EG-NEXT: MOV T13.X, PV.W, +; EG-NEXT: MOV * T0.Y, T8.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T19.Z, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T8.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T19.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T8.X, PV.W, +; EG-NEXT: MOV T0.Y, T9.X, +; EG-NEXT: BFE_UINT * T1.W, T19.Z, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T9.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T19.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T19.Y, PV.W, PS, +; EG-NEXT: MOV T9.X, PV.Y, +; EG-NEXT: MOV * T0.Y, T4.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T19.W, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T19.W, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, +; EG-NEXT: BFE_UINT * T0.W, T19.W, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: ALU clause starting at 116: +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, PV.W, T0.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR T0.W, T19.W, literal.x, +; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 16(2.242078e-44) +; EG-NEXT: LSHR T21.X, PS, literal.x, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.y, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.z, +; EG-NEXT: 2(2.802597e-45), 65535(9.183409e-41) +; EG-NEXT: 16711680(2.341805e-38), 0(0.000000e+00) +; EG-NEXT: LSHR T22.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T19.W, PV.W, PS, ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV * T20.X, T16.X, +; EG-NEXT: MOV * T20.Z, T12.X, +; EG-NEXT: MOV T19.X, T8.X, +; EG-NEXT: MOV * T19.Z, T4.X, BS:VEC_120/SCL_212 ; ; GFX12-LABEL: constant_zextload_v16i8_to_v16i16: ; GFX12: ; %bb.0: @@ -10753,38 +11041,173 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i16(ptr addrspace(1) %o ; ; EG-LABEL: constant_sextload_v16i8_to_v16i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] -; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 20, @9, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T9.XYZW, T10.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T8.XYZW, T7.X, 1 +; EG-NEXT: ALU 1, @10, KC0[CB0:0-32], KC1[] +; EG-NEXT: TEX 0 @8 +; EG-NEXT: ALU 104, @12, KC0[], KC1[] +; EG-NEXT: ALU 46, @117, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T20.XYZW, T22.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T19.XYZW, T21.X, 1 ; EG-NEXT: CF_END -; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_128 T7.XYZW, T7.X, 0, #1 -; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: MOV * T7.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 9: -; EG-NEXT: BFE_INT * T8.Z, T7.Y, 0.0, literal.x, +; EG-NEXT: PAD +; EG-NEXT: Fetch clause starting at 8: +; EG-NEXT: VTX_READ_128 T19.XYZW, T19.X, 0, #1 +; EG-NEXT: ALU clause starting at 10: +; EG-NEXT: MOV * T0.Y, T16.X, +; EG-NEXT: MOV * T19.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 12: +; EG-NEXT: BFE_INT * T0.W, T19.X, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T0.W, PV.W, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 65535(9.183409e-41), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T16.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T19.X, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_INT T8.X, T7.X, 0.0, literal.x, -; EG-NEXT: BFE_INT T9.Z, T7.W, 0.0, literal.x, -; EG-NEXT: LSHR * T0.W, T7.Y, literal.x, +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T16.X, PV.W, +; EG-NEXT: MOV T0.Y, T17.X, +; EG-NEXT: LSHR * T0.W, T19.X, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T17.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T19.X, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T20.Y, PV.W, PS, +; EG-NEXT: MOV T17.X, PV.Y, +; EG-NEXT: MOV T0.Y, T12.X, +; EG-NEXT: BFE_INT * T0.W, T19.Y, 0.0, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_INT T9.X, T7.Z, 0.0, literal.x, -; EG-NEXT: LSHR T0.Z, T7.W, literal.x, -; EG-NEXT: BFE_INT T8.W, PV.W, 0.0, literal.x, -; EG-NEXT: LSHR * T0.W, T7.X, literal.x, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T12.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T19.Y, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: LSHR T7.X, KC0[2].Y, literal.x, -; EG-NEXT: BFE_INT T8.Y, PS, 0.0, literal.y, -; EG-NEXT: LSHR T1.Z, T7.Z, literal.y, -; EG-NEXT: BFE_INT T9.W, PV.Z, 0.0, literal.y, -; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.z, -; EG-NEXT: 2(2.802597e-45), 8(1.121039e-44) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, ; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; EG-NEXT: LSHR T10.X, PS, literal.x, -; EG-NEXT: BFE_INT * T9.Y, PV.Z, 0.0, literal.y, -; EG-NEXT: 2(2.802597e-45), 8(1.121039e-44) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T12.X, PV.W, +; EG-NEXT: MOV T0.Y, T13.X, +; EG-NEXT: LSHR * T0.W, T19.Y, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T13.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T19.Y, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T20.W, PV.W, PS, +; EG-NEXT: MOV T13.X, PV.W, +; EG-NEXT: MOV T0.Y, T8.X, +; EG-NEXT: BFE_INT * T0.W, T19.Z, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T8.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T19.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T8.X, PV.W, +; EG-NEXT: MOV T0.Y, T9.X, +; EG-NEXT: LSHR * T0.W, T19.Z, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T9.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T19.Z, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: ALU clause starting at 117: +; EG-NEXT: OR_INT * T19.Y, T1.W, T0.W, +; EG-NEXT: MOV T9.X, PV.Y, +; EG-NEXT: MOV T0.Y, T4.X, +; EG-NEXT: BFE_INT * T0.W, T19.W, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T19.W, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, +; EG-NEXT: LSHR * T0.W, T19.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR T0.W, T19.W, literal.x, +; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.y, +; EG-NEXT: 24(3.363116e-44), 16(2.242078e-44) +; EG-NEXT: LSHR T21.X, PS, literal.x, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.y, +; EG-NEXT: LSHL * T0.W, PV.W, literal.z, +; EG-NEXT: 2(2.802597e-45), 65535(9.183409e-41) +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: LSHR T22.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T19.W, PV.W, PS, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV * T20.X, T16.X, +; EG-NEXT: MOV * T20.Z, T12.X, +; EG-NEXT: MOV T19.X, T8.X, +; EG-NEXT: MOV * T19.Z, T4.X, BS:VEC_120/SCL_212 ; ; GFX12-LABEL: constant_sextload_v16i8_to_v16i16: ; GFX12: ; %bb.0: @@ -11132,58 +11555,276 @@ define amdgpu_kernel void @constant_zextload_v32i8_to_v32i16(ptr addrspace(1) %o ; ; EG-LABEL: constant_zextload_v32i8_to_v32i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @12, KC0[CB0:0-32], KC1[] -; EG-NEXT: TEX 1 @8 -; EG-NEXT: ALU 37, @13, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T17.XYZW, T18.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T15.XYZW, T12.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T14.XYZW, T16.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T13.XYZW, T11.X, 1 +; EG-NEXT: ALU 1, @14, KC0[CB0:0-32], KC1[] +; EG-NEXT: TEX 1 @10 +; EG-NEXT: ALU 103, @16, KC0[], KC1[] +; EG-NEXT: ALU 104, @120, KC0[], KC1[] +; EG-NEXT: ALU 41, @225, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T36.XYZW, T42.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T37.XYZW, T41.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T38.XYZW, T40.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T35.XYZW, T39.X, 1 ; EG-NEXT: CF_END -; EG-NEXT: Fetch clause starting at 8: -; EG-NEXT: VTX_READ_128 T12.XYZW, T11.X, 16, #1 -; EG-NEXT: VTX_READ_128 T11.XYZW, T11.X, 0, #1 -; EG-NEXT: ALU clause starting at 12: -; EG-NEXT: MOV * T11.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 13: +; EG-NEXT: Fetch clause starting at 10: +; EG-NEXT: VTX_READ_128 T37.XYZW, T35.X, 16, #1 +; EG-NEXT: VTX_READ_128 T35.XYZW, T35.X, 0, #1 +; EG-NEXT: ALU clause starting at 14: +; EG-NEXT: MOV * T0.Y, T16.X, +; EG-NEXT: MOV * T35.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 16: +; EG-NEXT: AND_INT T0.W, T37.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 255(3.573311e-43), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T16.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T0.W, T37.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T16.X, PV.W, +; EG-NEXT: MOV T0.Y, T17.X, ; EG-NEXT: MOV * T0.W, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT * T13.W, T11.Y, literal.x, PV.W, +; EG-NEXT: BFE_UINT T1.W, T37.X, literal.x, PV.W, +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.y, +; EG-NEXT: 16(2.242078e-44), -65536(nan) +; EG-NEXT: OR_INT * T1.W, PS, PV.W, +; EG-NEXT: MOV * T17.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T37.X, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT T13.Y, T11.X, literal.x, T0.W, -; EG-NEXT: AND_INT T13.Z, T11.Y, literal.y, -; EG-NEXT: BFE_UINT * T14.W, T11.W, literal.x, T0.W, -; EG-NEXT: 8(1.121039e-44), 255(3.573311e-43) -; EG-NEXT: AND_INT T13.X, T11.X, literal.x, -; EG-NEXT: BFE_UINT T14.Y, T11.Z, literal.y, T0.W, -; EG-NEXT: LSHR * T11.X, KC0[2].Y, literal.z, -; EG-NEXT: 255(3.573311e-43), 8(1.121039e-44) -; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) -; EG-NEXT: AND_INT T14.Z, T11.W, literal.x, -; EG-NEXT: BFE_UINT * T15.W, T12.Y, literal.y, T0.W, -; EG-NEXT: 255(3.573311e-43), 8(1.121039e-44) -; EG-NEXT: AND_INT T14.X, T11.Z, literal.x, -; EG-NEXT: BFE_UINT T15.Y, T12.X, literal.y, T0.W, -; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.z, -; EG-NEXT: 255(3.573311e-43), 8(1.121039e-44) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T36.Y, PV.W, PS, +; EG-NEXT: MOV T17.X, PV.Y, +; EG-NEXT: MOV * T0.Y, T12.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T37.Y, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T12.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T37.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T12.X, PV.W, +; EG-NEXT: MOV T0.Y, T13.X, +; EG-NEXT: BFE_UINT * T1.W, T37.Y, literal.x, T0.W, ; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; EG-NEXT: LSHR T16.X, PV.W, literal.x, -; EG-NEXT: AND_INT T15.Z, T12.Y, literal.y, -; EG-NEXT: BFE_UINT T17.W, T12.W, literal.z, T0.W, -; EG-NEXT: AND_INT * T15.X, T12.X, literal.y, -; EG-NEXT: 2(2.802597e-45), 255(3.573311e-43) +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T13.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T37.Y, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT T17.Y, T12.Z, literal.x, T0.W, -; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.y, -; EG-NEXT: 8(1.121039e-44), 32(4.484155e-44) -; EG-NEXT: LSHR T12.X, PV.W, literal.x, -; EG-NEXT: AND_INT T17.Z, T12.W, literal.y, -; EG-NEXT: AND_INT * T17.X, T12.Z, literal.y, -; EG-NEXT: 2(2.802597e-45), 255(3.573311e-43) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T36.W, PV.W, PS, +; EG-NEXT: MOV T13.X, PV.W, +; EG-NEXT: MOV * T0.Y, T8.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T37.Z, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T8.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T37.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T8.X, PV.W, +; EG-NEXT: MOV T0.Y, T9.X, +; EG-NEXT: BFE_UINT * T1.W, T37.Z, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T9.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T37.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T37.Y, PV.W, PS, +; EG-NEXT: MOV T9.X, PV.Y, +; EG-NEXT: MOV * T0.Y, T4.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T37.W, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T37.W, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, +; EG-NEXT: BFE_UINT * T1.W, T37.W, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: ALU clause starting at 120: +; EG-NEXT: AND_INT * T2.W, T0.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T37.W, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T37.W, PV.W, PS, +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV * T0.Y, T32.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T35.X, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T32.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T35.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T32.X, PV.W, +; EG-NEXT: MOV T0.Y, T33.X, +; EG-NEXT: BFE_UINT * T1.W, T35.X, literal.x, T0.W, BS:VEC_120/SCL_212 +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T33.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T35.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T38.Y, PV.W, PS, +; EG-NEXT: MOV T33.X, PV.Y, +; EG-NEXT: MOV * T0.Y, T28.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T35.Y, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T28.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T35.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T28.X, PV.W, +; EG-NEXT: MOV T0.Y, T29.X, +; EG-NEXT: BFE_UINT * T1.W, T35.Y, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T29.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T35.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T38.W, PV.W, PS, +; EG-NEXT: MOV T29.X, PV.W, +; EG-NEXT: MOV * T0.Y, T24.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T35.Z, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T24.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T35.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T24.X, PV.W, +; EG-NEXT: MOV T0.Y, T25.X, +; EG-NEXT: BFE_UINT * T1.W, T35.Z, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T25.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T35.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T35.Y, PV.W, PS, +; EG-NEXT: MOV T25.X, PV.Y, +; EG-NEXT: MOV * T0.Y, T20.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T35.W, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T20.X, PV.W, +; EG-NEXT: ALU clause starting at 225: +; EG-NEXT: MOV T0.Y, T20.X, +; EG-NEXT: LSHL * T1.W, T35.W, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T20.X, PV.W, +; EG-NEXT: MOV T0.Y, T21.X, +; EG-NEXT: BFE_UINT * T0.W, T35.W, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, PV.W, T0.W, +; EG-NEXT: MOV * T21.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, ; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x, -; EG-NEXT: 48(6.726233e-44), 0(0.000000e+00) -; EG-NEXT: LSHR * T18.X, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: LSHR T39.X, PV.W, literal.x, +; EG-NEXT: LSHR * T40.X, KC0[2].Y, literal.x, ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: LSHR T0.W, T35.W, literal.x, +; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 48(6.726233e-44) +; EG-NEXT: LSHR T41.X, PS, literal.x, +; EG-NEXT: AND_INT T0.Z, T0.Y, literal.y, +; EG-NEXT: AND_INT T0.W, PV.W, literal.z, +; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.w, +; EG-NEXT: 2(2.802597e-45), 65535(9.183409e-41) +; EG-NEXT: 16711680(2.341805e-38), 32(4.484155e-44) +; EG-NEXT: LSHR T42.X, PS, literal.x, +; EG-NEXT: OR_INT * T35.W, PV.Z, PV.W, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T21.X, PV.W, +; EG-NEXT: MOV * T36.X, T16.X, +; EG-NEXT: MOV * T36.Z, T12.X, +; EG-NEXT: MOV T37.X, T8.X, +; EG-NEXT: MOV T37.Z, T4.X, BS:VEC_120/SCL_212 +; EG-NEXT: MOV * T38.X, T32.X, +; EG-NEXT: MOV * T38.Z, T28.X, +; EG-NEXT: MOV T35.X, T24.X, +; EG-NEXT: MOV * T35.Z, T20.X, BS:VEC_120/SCL_212 ; ; GFX12-LABEL: constant_zextload_v32i8_to_v32i16: ; GFX12: ; %bb.0: @@ -11642,60 +12283,331 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i16(ptr addrspace(1) %o ; ; EG-LABEL: constant_sextload_v32i8_to_v32i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @12, KC0[CB0:0-32], KC1[] -; EG-NEXT: TEX 1 @8 -; EG-NEXT: ALU 39, @13, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T18.XYZW, T12.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T17.XYZW, T11.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T16.XYZW, T14.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T15.XYZW, T13.X, 1 +; EG-NEXT: ALU 1, @14, KC0[CB0:0-32], KC1[] +; EG-NEXT: TEX 1 @10 +; EG-NEXT: ALU 104, @16, KC0[], KC1[] +; EG-NEXT: ALU 104, @121, KC0[], KC1[] +; EG-NEXT: ALU 95, @226, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T36.XYZW, T42.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T37.XYZW, T41.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T38.XYZW, T40.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T35.XYZW, T39.X, 1 ; EG-NEXT: CF_END -; EG-NEXT: Fetch clause starting at 8: -; EG-NEXT: VTX_READ_128 T12.XYZW, T11.X, 16, #1 -; EG-NEXT: VTX_READ_128 T11.XYZW, T11.X, 0, #1 -; EG-NEXT: ALU clause starting at 12: -; EG-NEXT: MOV * T11.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 13: -; EG-NEXT: LSHR T13.X, KC0[2].Y, literal.x, -; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.y, -; EG-NEXT: 2(2.802597e-45), 16(2.242078e-44) -; EG-NEXT: LSHR T14.X, PV.W, literal.x, -; EG-NEXT: BFE_INT * T15.Z, T11.Y, 0.0, literal.y, -; EG-NEXT: 2(2.802597e-45), 8(1.121039e-44) -; EG-NEXT: BFE_INT T15.X, T11.X, 0.0, literal.x, -; EG-NEXT: LSHR T0.Y, T12.W, literal.x, -; EG-NEXT: BFE_INT T16.Z, T11.W, 0.0, literal.x, BS:VEC_120/SCL_212 -; EG-NEXT: LSHR T0.W, T12.Y, literal.x, -; EG-NEXT: LSHR * T1.W, T11.Y, literal.x, +; EG-NEXT: Fetch clause starting at 10: +; EG-NEXT: VTX_READ_128 T37.XYZW, T35.X, 16, #1 +; EG-NEXT: VTX_READ_128 T35.XYZW, T35.X, 0, #1 +; EG-NEXT: ALU clause starting at 14: +; EG-NEXT: MOV * T0.Y, T16.X, +; EG-NEXT: MOV * T35.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 16: +; EG-NEXT: BFE_INT * T0.W, T37.X, 0.0, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_INT T16.X, T11.Z, 0.0, literal.x, -; EG-NEXT: LSHR T1.Y, T11.W, literal.x, -; EG-NEXT: BFE_INT T17.Z, T12.Y, 0.0, literal.x, -; EG-NEXT: BFE_INT T15.W, PS, 0.0, literal.x, -; EG-NEXT: LSHR * T1.W, T11.X, literal.x, +; EG-NEXT: AND_INT T0.W, PV.W, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 65535(9.183409e-41), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T16.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T37.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T16.X, PV.W, +; EG-NEXT: MOV T0.Y, T17.X, +; EG-NEXT: LSHR * T0.W, T37.X, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T17.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T37.X, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T36.Y, PV.W, PS, +; EG-NEXT: MOV T17.X, PV.Y, +; EG-NEXT: MOV T0.Y, T12.X, +; EG-NEXT: BFE_INT * T0.W, T37.Y, 0.0, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_INT T17.X, T12.X, 0.0, literal.x, -; EG-NEXT: BFE_INT T15.Y, PS, 0.0, literal.x, -; EG-NEXT: BFE_INT T18.Z, T12.W, 0.0, literal.x, -; EG-NEXT: BFE_INT T16.W, PV.Y, 0.0, literal.x, -; EG-NEXT: LSHR * T1.W, T11.Z, literal.x, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T12.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T37.Y, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_INT T18.X, T12.Z, 0.0, literal.x, -; EG-NEXT: BFE_INT T16.Y, PS, 0.0, literal.x, -; EG-NEXT: LSHR T0.Z, T12.X, literal.x, -; EG-NEXT: BFE_INT T17.W, T0.W, 0.0, literal.x, -; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.y, -; EG-NEXT: 8(1.121039e-44), 32(4.484155e-44) -; EG-NEXT: LSHR T11.X, PS, literal.x, -; EG-NEXT: BFE_INT T17.Y, PV.Z, 0.0, literal.y, -; EG-NEXT: LSHR T0.Z, T12.Z, literal.y, -; EG-NEXT: BFE_INT T18.W, T0.Y, 0.0, literal.y, -; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.z, -; EG-NEXT: 2(2.802597e-45), 8(1.121039e-44) -; EG-NEXT: 48(6.726233e-44), 0(0.000000e+00) -; EG-NEXT: LSHR T12.X, PS, literal.x, -; EG-NEXT: BFE_INT * T18.Y, PV.Z, 0.0, literal.y, -; EG-NEXT: 2(2.802597e-45), 8(1.121039e-44) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T12.X, PV.W, +; EG-NEXT: MOV T0.Y, T13.X, +; EG-NEXT: LSHR * T0.W, T37.Y, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T13.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T37.Y, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T36.W, PV.W, PS, +; EG-NEXT: MOV T13.X, PV.W, +; EG-NEXT: MOV T0.Y, T8.X, +; EG-NEXT: BFE_INT * T0.W, T37.Z, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T8.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T37.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T8.X, PV.W, +; EG-NEXT: MOV T0.Y, T9.X, +; EG-NEXT: LSHR * T0.W, T37.Z, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T9.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T37.Z, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: ALU clause starting at 121: +; EG-NEXT: OR_INT * T37.Y, T1.W, T0.W, +; EG-NEXT: MOV T9.X, PV.Y, +; EG-NEXT: MOV T0.Y, T4.X, +; EG-NEXT: BFE_INT * T0.W, T37.W, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T37.W, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, +; EG-NEXT: LSHR * T0.W, T37.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T37.W, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T37.W, PV.W, PS, +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV T0.Y, T32.X, +; EG-NEXT: BFE_INT * T0.W, T35.X, 0.0, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T32.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T35.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T32.X, PV.W, +; EG-NEXT: MOV T0.Y, T33.X, +; EG-NEXT: LSHR * T0.W, T35.X, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T33.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T35.X, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T38.Y, PV.W, PS, +; EG-NEXT: MOV T33.X, PV.Y, +; EG-NEXT: MOV T0.Y, T28.X, +; EG-NEXT: BFE_INT * T0.W, T35.Y, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T28.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T35.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T28.X, PV.W, +; EG-NEXT: MOV T0.Y, T29.X, +; EG-NEXT: LSHR * T0.W, T35.Y, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T29.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T35.Y, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: ALU clause starting at 226: +; EG-NEXT: AND_INT T1.W, T0.Y, literal.x, +; EG-NEXT: LSHL * T0.W, T0.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T38.W, PV.W, PS, +; EG-NEXT: MOV T29.X, PV.W, +; EG-NEXT: MOV T0.Y, T24.X, +; EG-NEXT: BFE_INT * T0.W, T35.Z, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T24.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T35.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T24.X, PV.W, +; EG-NEXT: MOV T0.Y, T25.X, +; EG-NEXT: LSHR * T0.W, T35.Z, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T25.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T35.Z, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T35.Y, PV.W, PS, +; EG-NEXT: MOV T25.X, PV.Y, +; EG-NEXT: MOV T0.Y, T20.X, +; EG-NEXT: BFE_INT * T0.W, T35.W, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T20.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T35.W, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T20.X, PV.W, +; EG-NEXT: MOV T0.Y, T21.X, +; EG-NEXT: LSHR * T0.W, T35.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T21.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: LSHR T39.X, PV.W, literal.x, +; EG-NEXT: LSHR * T40.X, KC0[2].Y, literal.x, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: ASHR T0.W, T35.W, literal.x, +; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.y, +; EG-NEXT: 24(3.363116e-44), 48(6.726233e-44) +; EG-NEXT: LSHR T41.X, PS, literal.x, +; EG-NEXT: AND_INT T0.Z, T0.Y, literal.y, +; EG-NEXT: LSHL T0.W, PV.W, literal.z, +; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.w, +; EG-NEXT: 2(2.802597e-45), 65535(9.183409e-41) +; EG-NEXT: 16(2.242078e-44), 32(4.484155e-44) +; EG-NEXT: LSHR T42.X, PS, literal.x, +; EG-NEXT: OR_INT * T35.W, PV.Z, PV.W, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T21.X, PV.W, +; EG-NEXT: MOV * T36.X, T16.X, +; EG-NEXT: MOV * T36.Z, T12.X, +; EG-NEXT: MOV T37.X, T8.X, +; EG-NEXT: MOV T37.Z, T4.X, BS:VEC_120/SCL_212 +; EG-NEXT: MOV * T38.X, T32.X, +; EG-NEXT: MOV * T38.Z, T28.X, +; EG-NEXT: MOV T35.X, T24.X, +; EG-NEXT: MOV * T35.Z, T20.X, BS:VEC_120/SCL_212 ; ; GFX12-LABEL: constant_sextload_v32i8_to_v32i16: ; GFX12: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll index 3753737..09d3c3b 100644 --- a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll +++ b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll @@ -263,63 +263,74 @@ define amdgpu_kernel void @global_load_v3i16(ptr addrspace(1) %out, ptr addrspac ; ; EG-LABEL: global_load_v3i16: ; EG: ; %bb.0: ; %entry -; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[] -; EG-NEXT: TEX 1 @6 -; EG-NEXT: ALU 14, @11, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T3.X, 0 -; EG-NEXT: MEM_RAT MSKOR T2.XW, T0.X +; EG-NEXT: ALU 0, @12, KC0[CB0:0-32], KC1[] +; EG-NEXT: TEX 2 @6 +; EG-NEXT: ALU 19, @13, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T6.X, T7.X, 0 +; EG-NEXT: MEM_RAT MSKOR T5.XW, T8.X ; EG-NEXT: CF_END ; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_16 T1.X, T0.X, 0, #1 -; EG-NEXT: VTX_READ_16 T0.X, T0.X, 4, #1 -; EG-NEXT: ALU clause starting at 10: -; EG-NEXT: MOV * T0.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 11: +; EG-NEXT: VTX_READ_16 T6.X, T5.X, 0, #1 +; EG-NEXT: VTX_READ_16 T7.X, T5.X, 2, #1 +; EG-NEXT: VTX_READ_16 T5.X, T5.X, 4, #1 +; EG-NEXT: ALU clause starting at 12: +; EG-NEXT: MOV * T5.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 13: ; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x, ; EG-NEXT: 4(5.605194e-45), 0(0.000000e+00) ; EG-NEXT: AND_INT T1.W, PV.W, literal.x, -; EG-NEXT: AND_INT * T2.W, T0.X, literal.y, +; EG-NEXT: AND_INT * T2.W, T5.X, literal.y, ; EG-NEXT: 3(4.203895e-45), 65535(9.183409e-41) ; EG-NEXT: LSHL * T1.W, PV.W, literal.x, ; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00) -; EG-NEXT: LSHL T2.X, T2.W, PV.W, -; EG-NEXT: LSHL * T2.W, literal.x, PV.W, +; EG-NEXT: LSHL T5.X, T2.W, PV.W, +; EG-NEXT: LSHL * T5.W, literal.x, PV.W, ; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; EG-NEXT: MOV T2.Y, 0.0, -; EG-NEXT: MOV * T2.Z, 0.0, -; EG-NEXT: LSHR T0.X, T0.W, literal.x, -; EG-NEXT: LSHR * T3.X, KC0[2].Y, literal.x, +; EG-NEXT: MOV T5.Y, 0.0, +; EG-NEXT: MOV * T5.Z, 0.0, +; EG-NEXT: LSHR T8.X, T0.W, literal.x, +; EG-NEXT: LSHL T0.W, T7.X, literal.y, +; EG-NEXT: AND_INT * T1.W, T6.X, literal.z, +; EG-NEXT: 2(2.802597e-45), 16(2.242078e-44) +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT T6.X, PV.W, PS, +; EG-NEXT: LSHR * T7.X, KC0[2].Y, literal.x, ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) ; ; CM-LABEL: global_load_v3i16: ; CM: ; %bb.0: ; %entry -; CM-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[] -; CM-NEXT: TEX 1 @6 -; CM-NEXT: ALU 15, @11, KC0[CB0:0-32], KC1[] -; CM-NEXT: MEM_RAT MSKOR T2.XW, T3.X -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T1.X, T0.X +; CM-NEXT: ALU 0, @12, KC0[CB0:0-32], KC1[] +; CM-NEXT: TEX 2 @6 +; CM-NEXT: ALU 19, @13, KC0[CB0:0-32], KC1[] +; CM-NEXT: MEM_RAT MSKOR T5.XW, T8.X +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T6.X, T7.X ; CM-NEXT: CF_END ; CM-NEXT: Fetch clause starting at 6: -; CM-NEXT: VTX_READ_16 T1.X, T0.X, 0, #1 -; CM-NEXT: VTX_READ_16 T0.X, T0.X, 4, #1 -; CM-NEXT: ALU clause starting at 10: -; CM-NEXT: MOV * T0.X, KC0[2].Z, -; CM-NEXT: ALU clause starting at 11: +; CM-NEXT: VTX_READ_16 T6.X, T5.X, 0, #1 +; CM-NEXT: VTX_READ_16 T7.X, T5.X, 2, #1 +; CM-NEXT: VTX_READ_16 T5.X, T5.X, 4, #1 +; CM-NEXT: ALU clause starting at 12: +; CM-NEXT: MOV * T5.X, KC0[2].Z, +; CM-NEXT: ALU clause starting at 13: ; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x, ; CM-NEXT: 4(5.605194e-45), 0(0.000000e+00) ; CM-NEXT: AND_INT * T1.W, PV.W, literal.x, ; CM-NEXT: 3(4.203895e-45), 0(0.000000e+00) -; CM-NEXT: AND_INT T0.Z, T0.X, literal.x, +; CM-NEXT: AND_INT T0.Z, T5.X, literal.x, ; CM-NEXT: LSHL * T1.W, PV.W, literal.y, ; CM-NEXT: 65535(9.183409e-41), 3(4.203895e-45) -; CM-NEXT: LSHL T2.X, PV.Z, PV.W, -; CM-NEXT: LSHL * T2.W, literal.x, PV.W, +; CM-NEXT: LSHL T5.X, PV.Z, PV.W, +; CM-NEXT: LSHL * T5.W, literal.x, PV.W, ; CM-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; CM-NEXT: MOV T2.Y, 0.0, -; CM-NEXT: MOV * T2.Z, 0.0, -; CM-NEXT: LSHR * T0.X, KC0[2].Y, literal.x, +; CM-NEXT: MOV T5.Y, 0.0, +; CM-NEXT: MOV * T5.Z, 0.0, +; CM-NEXT: LSHL T0.Z, T7.X, literal.x, +; CM-NEXT: AND_INT * T1.W, T6.X, literal.y, BS:VEC_120/SCL_212 +; CM-NEXT: 16(2.242078e-44), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T6.X, PV.Z, PV.W, +; CM-NEXT: LSHR * T7.X, KC0[2].Y, literal.x, ; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00) -; CM-NEXT: LSHR * T3.X, T0.W, literal.x, +; CM-NEXT: LSHR * T8.X, T0.W, literal.x, ; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00) entry: %ld = load <3 x i16>, ptr addrspace(1) %in @@ -1738,8 +1749,8 @@ define amdgpu_kernel void @global_sextload_v4i16_to_v4i32(ptr addrspace(1) %out, ; GCN-NOHSA-SI-NEXT: s_mov_b32 s4, s0 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s5, s1 ; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v1, 16, v4 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v3, 16, v5 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v1, 16, v4 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v2, v5, 0, 16 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v0, v4, 0, 16 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 @@ -1758,8 +1769,8 @@ define amdgpu_kernel void @global_sextload_v4i16_to_v4i32(ptr addrspace(1) %out, ; GCN-HSA-NEXT: v_mov_b32_e32 v6, s0 ; GCN-HSA-NEXT: v_mov_b32_e32 v7, s1 ; GCN-HSA-NEXT: s_waitcnt vmcnt(0) -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v1, 16, v4 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v3, 16, v5 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v1, 16, v4 ; GCN-HSA-NEXT: v_bfe_i32 v2, v5, 0, 16 ; GCN-HSA-NEXT: v_bfe_i32 v0, v4, 0, 16 ; GCN-HSA-NEXT: flat_store_dwordx4 v[6:7], v[0:3] @@ -6365,8 +6376,8 @@ define amdgpu_kernel void @global_sextload_v8i16_to_v8i64(ptr addrspace(1) %out, ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v5, 31, v4 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v2, v7, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v6, v6, 0, 16 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v9, 31, v8 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v6, v6, 0, 16 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v13, 31, v12 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v3, 31, v2 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v7, 31, v6 @@ -6390,28 +6401,28 @@ define amdgpu_kernel void @global_sextload_v8i16_to_v8i64(ptr addrspace(1) %out, ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v19, s3 ; GCN-HSA-NEXT: v_mov_b32_e32 v18, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 16 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 32 ; GCN-HSA-NEXT: v_mov_b32_e32 v17, s1 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v16, s0 -; GCN-HSA-NEXT: s_add_u32 s0, s0, 32 -; GCN-HSA-NEXT: v_mov_b32_e32 v21, s3 +; GCN-HSA-NEXT: s_add_u32 s0, s0, 16 ; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v20, s2 ; GCN-HSA-NEXT: v_mov_b32_e32 v23, s1 +; GCN-HSA-NEXT: v_mov_b32_e32 v21, s3 ; GCN-HSA-NEXT: v_mov_b32_e32 v22, s0 +; GCN-HSA-NEXT: v_mov_b32_e32 v20, s2 ; GCN-HSA-NEXT: s_waitcnt vmcnt(0) ; GCN-HSA-NEXT: v_mov_b32_e32 v8, v3 -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v5, 16, v2 -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v9, 16, v2 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v5, 16, v0 ; GCN-HSA-NEXT: v_bfe_i32 v4, v1, 0, 16 ; GCN-HSA-NEXT: v_bfe_i32 v0, v0, 0, 16 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v7, 31, v1 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v6, 16, v1 ; GCN-HSA-NEXT: v_bfe_i32 v12, v2, 0, 16 -; GCN-HSA-NEXT: v_bfe_i32 v2, v9, 0, 16 -; GCN-HSA-NEXT: v_bfe_i32 v14, v5, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v2, v5, 0, 16 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v5, 31, v4 +; GCN-HSA-NEXT: v_bfe_i32 v14, v9, 0, 16 ; GCN-HSA-NEXT: v_bfe_i32 v8, v8, 0, 16 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v3 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v10, 16, v3 @@ -6420,9 +6431,9 @@ define amdgpu_kernel void @global_sextload_v8i16_to_v8i64(ptr addrspace(1) %out, ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v3, 31, v2 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v15, 31, v14 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8 -; GCN-HSA-NEXT: flat_store_dwordx4 v[20:21], v[4:7] +; GCN-HSA-NEXT: flat_store_dwordx4 v[22:23], v[4:7] ; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[8:11] -; GCN-HSA-NEXT: flat_store_dwordx4 v[22:23], v[12:15] +; GCN-HSA-NEXT: flat_store_dwordx4 v[20:21], v[12:15] ; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[0:3] ; GCN-HSA-NEXT: s_endpgm ; @@ -6964,58 +6975,59 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NOHSA-SI-NEXT: s_mov_b32 s8, s6 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s9, s7 -; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0 -; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16 +; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0 offset:16 +; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s0, s4 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s1, s5 +; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(1) +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, v3 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v13, 16, v0 ; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, v7 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, v3 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v13, 16, v4 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v17, 16, v2 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v18, 16, v0 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v0, v0, 0, 16 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v11, 31, v1 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v10, 16, v1 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v8, v1, 0, 16 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v15, 31, v3 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v14, 16, v3 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v16, v2, 0, 16 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, v7 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v17, 16, v6 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v18, 16, v4 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v4, v4, 0, 16 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v11, 31, v5 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v10, 16, v5 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v8, v5, 0, 16 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v15, 31, v7 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v14, 16, v7 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v16, v6, 0, 16 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v12, v12, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v20, v4, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v2, v18, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v20, v0, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v6, v18, 0, 16 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v18, v17, 0, 16 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v13, 0, 16 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v23, v9, 0, 16 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v26, 31, v7 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 16, v7 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v26, 31, v3 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 16, v3 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 31, v23 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:112 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v27, 31, v5 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v27, 31, v1 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v26, 16, v5 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v24, v5, 0, 16 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v1, 16, v6 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v4, v6, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v6, v1, 0, 16 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v26, 16, v1 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v24, v1, 0, 16 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v1, 16, v2 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v0, v2, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v2, v1, 0, 16 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v5, 31, v4 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v17, 31, v16 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v21, 31, v20 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v5, 31, v4 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v9, 31, v8 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v13, 31, v12 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 31, v24 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v3, 31, v2 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v7, 31, v6 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v19, 31, v18 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 31, v22 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v7, 31, v6 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v3, 31, v2 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:80 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:16 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:96 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:64 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:32 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 ; GCN-NOHSA-SI-NEXT: s_endpgm ; ; GCN-HSA-LABEL: global_sextload_v16i16_to_v16i64: @@ -7037,31 +7049,31 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v15, s3 ; GCN-HSA-NEXT: v_mov_b32_e32 v14, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 16 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 32 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v17, s3 ; GCN-HSA-NEXT: v_mov_b32_e32 v16, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x70 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 16 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v19, s3 ; GCN-HSA-NEXT: v_mov_b32_e32 v18, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x50 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x70 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v21, s3 ; GCN-HSA-NEXT: v_mov_b32_e32 v20, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 32 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x60 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v23, s3 ; GCN-HSA-NEXT: v_mov_b32_e32 v22, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x60 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x50 ; GCN-HSA-NEXT: v_mov_b32_e32 v13, s1 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v12, s0 ; GCN-HSA-NEXT: s_add_u32 s0, s0, 64 -; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v25, s3 -; GCN-HSA-NEXT: v_mov_b32_e32 v27, s1 +; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v24, s2 +; GCN-HSA-NEXT: v_mov_b32_e32 v27, s1 ; GCN-HSA-NEXT: v_mov_b32_e32 v26, s0 ; GCN-HSA-NEXT: s_waitcnt vmcnt(1) ; GCN-HSA-NEXT: v_bfe_i32 v8, v5, 0, 16 @@ -7069,36 +7081,36 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v10, 16, v5 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, v7 -; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[8:11] -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v16, 16, v6 +; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[8:11] +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v18, 16, v6 ; GCN-HSA-NEXT: v_bfe_i32 v8, v5, 0, 16 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v7 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v10, 16, v7 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8 -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v17, 16, v4 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v19, 16, v4 ; GCN-HSA-NEXT: flat_store_dwordx4 v[14:15], v[8:11] ; GCN-HSA-NEXT: v_bfe_i32 v7, v6, 0, 16 -; GCN-HSA-NEXT: v_bfe_i32 v9, v16, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v9, v18, 0, 16 ; GCN-HSA-NEXT: v_bfe_i32 v4, v4, 0, 16 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v8, 31, v7 -; GCN-HSA-NEXT: v_bfe_i32 v6, v17, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v6, v19, 0, 16 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v10, 31, v9 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v5, 31, v4 -; GCN-HSA-NEXT: flat_store_dwordx4 v[22:23], v[7:10] +; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[7:10] ; GCN-HSA-NEXT: s_waitcnt vmcnt(3) ; GCN-HSA-NEXT: v_mov_b32_e32 v15, v3 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v7, 31, v6 ; GCN-HSA-NEXT: v_lshrrev_b32_e32 v14, 16, v2 -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v16, 16, v0 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v18, 16, v0 ; GCN-HSA-NEXT: flat_store_dwordx4 v[12:13], v[4:7] ; GCN-HSA-NEXT: v_bfe_i32 v0, v0, 0, 16 ; GCN-HSA-NEXT: v_bfe_i32 v4, v1, 0, 16 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v7, 31, v1 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v6, 16, v1 ; GCN-HSA-NEXT: v_bfe_i32 v12, v2, 0, 16 -; GCN-HSA-NEXT: v_bfe_i32 v2, v16, 0, 16 -; GCN-HSA-NEXT: v_bfe_i32 v14, v14, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v2, v18, 0, 16 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v5, 31, v4 +; GCN-HSA-NEXT: v_bfe_i32 v14, v14, 0, 16 ; GCN-HSA-NEXT: v_bfe_i32 v8, v15, 0, 16 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v3 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v10, 16, v3 @@ -7107,9 +7119,9 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v3, 31, v2 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v15, 31, v14 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8 -; GCN-HSA-NEXT: flat_store_dwordx4 v[20:21], v[4:7] -; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[8:11] -; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[12:15] +; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[4:7] +; GCN-HSA-NEXT: flat_store_dwordx4 v[20:21], v[8:11] +; GCN-HSA-NEXT: flat_store_dwordx4 v[22:23], v[12:15] ; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[0:3] ; GCN-HSA-NEXT: s_endpgm ; @@ -8100,113 +8112,115 @@ define amdgpu_kernel void @global_sextload_v32i16_to_v32i64(ptr addrspace(1) %ou ; GCN-NOHSA-SI-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NOHSA-SI-NEXT: s_mov_b32 s8, s6 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s9, s7 -; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[12:15], off, s[8:11], 0 -; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[8:11], off, s[8:11], 0 offset:16 -; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:32 -; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0 offset:48 +; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[12:15], off, s[8:11], 0 offset:48 +; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0 offset:32 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s0, s4 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s1, s5 +; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[8:11], off, s[8:11], 0 +; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16 +; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(3) +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v20, 16, v14 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v23, v15 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v21, 16, v12 +; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(2) +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v27, v3 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v16, 16, v0 ; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v22, v3 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v26, v7 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v27, v11 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v21, v15 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v20, 16, v2 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v16, 16, v4 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v17, 16, v10 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v18, 16, v8 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v19, 16, v14 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v22, 0, 16 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 31, v3 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 16, v3 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 31, v22 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:240 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v17, 16, v6 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v22, v7 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v18, 16, v4 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v19, 16, v10 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v23, v23, 0, 16 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v26, 31, v15 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 16, v15 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 31, v23 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:240 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 31, v1 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 16, v1 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v1, 0, 16 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 31, v22 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:208 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v3, 16, v12 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v26, 31, v13 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 16, v13 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v23, v13, 0, 16 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 31, v23 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:208 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, v11 +; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v23, v27, 0, 16 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v26, 31, v3 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 16, v3 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 31, v23 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:176 +; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v26, 31, v1 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 16, v1 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v23, v1, 0, 16 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 31, v23 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:144 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v1, 16, v8 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v22, 0, 16 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v26, 0, 16 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 31, v7 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 16, v7 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 31, v22 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:176 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:112 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 31, v5 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 16, v5 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v5, 0, 16 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 31, v22 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:144 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:80 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 31, v11 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 16, v11 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v27, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v13, 0, 16 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 31, v22 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:112 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:48 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v11, v12, 0, 16 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 31, v9 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 16, v9 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v9, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v14, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v13, v21, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v24, v20, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v3, v1, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v1, v8, 0, 16 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 31, v22 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:80 -; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 31, v15 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 16, v15 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v21, v21, 0, 16 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v22, 31, v21 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[21:24], off, s[0:3], 0 offset:48 -; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 31, v13 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 16, v13 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v21, v13, 0, 16 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v22, 31, v21 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[21:24], off, s[0:3], 0 offset:16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v1, v12, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v5, v14, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v3, v3, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v13, v20, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v11, v2, 0, 16 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 31, v24 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:224 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v8, 31, v9 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v7, 16, v9 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v5, v9, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v9, v10, 0, 16 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v12, 31, v11 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v14, 31, v13 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:224 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v9, v8, 0, 16 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:192 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v13, v10, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v7, v19, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v11, v18, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v15, v17, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v19, v16, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v17, v4, 0, 16 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v2, 16, v6 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v21, v6, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v23, v2, 0, 16 -; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v25, v0, 0, 16 -; GCN-NOHSA-SI-NEXT: v_bfe_i32 v27, v2, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v13, v4, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v20, v6, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v11, v19, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v15, v18, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v17, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v26, v16, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v24, v0, 0, 16 +; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v0, 16, v2 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v28, v2, 0, 16 +; GCN-NOHSA-SI-NEXT: v_bfe_i32 v30, v0, 0, 16 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v2, 31, v1 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v6, 31, v5 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v10, 31, v9 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v14, 31, v13 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v18, 31, v17 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v22, 31, v21 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v26, 31, v25 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v21, 31, v20 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 31, v24 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v29, 31, v28 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v6, 31, v5 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v4, 31, v3 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v8, 31, v7 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v12, 31, v11 ; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v16, 31, v15 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v20, 31, v19 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 31, v23 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v28, 31, v27 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[25:28], off, s[0:3], 0 offset:192 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[21:24], off, s[0:3], 0 offset:160 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:128 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:96 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:64 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[5:8], off, s[0:3], 0 offset:32 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 31, v22 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v27, 31, v26 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v31, 31, v30 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[28:31], off, s[0:3], 0 offset:160 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:128 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:96 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:64 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:32 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[1:4], off, s[0:3], 0 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[5:8], off, s[0:3], 0 offset:16 ; GCN-NOHSA-SI-NEXT: s_endpgm ; ; GCN-HSA-LABEL: global_sextload_v32i16_to_v32i64: @@ -8218,180 +8232,179 @@ define amdgpu_kernel void @global_sextload_v32i16_to_v32i64(ptr addrspace(1) %ou ; GCN-HSA-NEXT: s_waitcnt lgkmcnt(0) ; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2 ; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3 -; GCN-HSA-NEXT: flat_load_dwordx4 v[4:7], v[0:1] +; GCN-HSA-NEXT: flat_load_dwordx4 v[12:15], v[0:1] ; GCN-HSA-NEXT: s_add_u32 s4, s2, 48 ; GCN-HSA-NEXT: s_addc_u32 s5, s3, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v0, s4 ; GCN-HSA-NEXT: v_mov_b32_e32 v1, s5 -; GCN-HSA-NEXT: flat_load_dwordx4 v[0:3], v[0:1] ; GCN-HSA-NEXT: s_add_u32 s4, s2, 32 ; GCN-HSA-NEXT: s_addc_u32 s5, s3, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v9, s5 -; GCN-HSA-NEXT: v_mov_b32_e32 v8, s4 -; GCN-HSA-NEXT: flat_load_dwordx4 v[8:11], v[8:9] ; GCN-HSA-NEXT: s_add_u32 s2, s2, 16 ; GCN-HSA-NEXT: s_addc_u32 s3, s3, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v13, s3 -; GCN-HSA-NEXT: v_mov_b32_e32 v12, s2 -; GCN-HSA-NEXT: flat_load_dwordx4 v[12:15], v[12:13] -; GCN-HSA-NEXT: s_add_u32 s2, s0, 48 -; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v21, s3 -; GCN-HSA-NEXT: v_mov_b32_e32 v20, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 16 +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3 +; GCN-HSA-NEXT: flat_load_dwordx4 v[8:11], v[0:1] +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2 +; GCN-HSA-NEXT: flat_load_dwordx4 v[4:7], v[4:5] +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s4 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s5 +; GCN-HSA-NEXT: flat_load_dwordx4 v[0:3], v[0:1] +; GCN-HSA-NEXT: s_add_u32 s2, s0, 32 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v23, s3 ; GCN-HSA-NEXT: v_mov_b32_e32 v22, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xf0 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 48 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v25, s3 ; GCN-HSA-NEXT: v_mov_b32_e32 v24, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xd0 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 16 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v27, s3 ; GCN-HSA-NEXT: v_mov_b32_e32 v26, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xb0 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xe0 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v21, s1 +; GCN-HSA-NEXT: v_mov_b32_e32 v20, s0 ; GCN-HSA-NEXT: s_waitcnt vmcnt(3) -; GCN-HSA-NEXT: v_bfe_i32 v16, v5, 0, 16 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v5 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v18, 16, v5 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v16 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, v7 -; GCN-HSA-NEXT: flat_store_dwordx4 v[22:23], v[16:19] -; GCN-HSA-NEXT: v_mov_b32_e32 v23, s3 -; GCN-HSA-NEXT: v_bfe_i32 v16, v5, 0, 16 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v7 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v18, 16, v7 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v16 -; GCN-HSA-NEXT: v_mov_b32_e32 v22, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x90 -; GCN-HSA-NEXT: flat_store_dwordx4 v[20:21], v[16:19] -; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 -; GCN-HSA-NEXT: s_waitcnt vmcnt(4) -; GCN-HSA-NEXT: v_bfe_i32 v16, v1, 0, 16 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v1 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v18, 16, v1 +; GCN-HSA-NEXT: v_bfe_i32 v16, v13, 0, 16 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v13 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v18, 16, v13 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v16 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, v3 -; GCN-HSA-NEXT: v_mov_b32_e32 v21, s3 ; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[16:19] -; GCN-HSA-NEXT: v_mov_b32_e32 v20, s2 -; GCN-HSA-NEXT: v_bfe_i32 v16, v1, 0, 16 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x70 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v3 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v18, 16, v3 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v16 -; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 -; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[16:19] ; GCN-HSA-NEXT: v_mov_b32_e32 v27, s3 -; GCN-HSA-NEXT: s_waitcnt vmcnt(5) -; GCN-HSA-NEXT: v_bfe_i32 v16, v9, 0, 16 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v9 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v18, 16, v9 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v16 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, v11 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v13, 16, v14 ; GCN-HSA-NEXT: v_mov_b32_e32 v26, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x50 -; GCN-HSA-NEXT: flat_store_dwordx4 v[20:21], v[16:19] +; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xf0 +; GCN-HSA-NEXT: v_bfe_i32 v18, v13, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v16, v14, 0, 16 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 -; GCN-HSA-NEXT: v_bfe_i32 v16, v5, 0, 16 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v11 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v18, 16, v11 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v16 -; GCN-HSA-NEXT: v_mov_b32_e32 v25, s3 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v18 ; GCN-HSA-NEXT: flat_store_dwordx4 v[22:23], v[16:19] +; GCN-HSA-NEXT: v_mov_b32_e32 v23, s3 +; GCN-HSA-NEXT: v_mov_b32_e32 v13, v15 +; GCN-HSA-NEXT: v_mov_b32_e32 v22, s2 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xc0 +; GCN-HSA-NEXT: v_bfe_i32 v13, v13, 0, 16 +; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v16, 31, v15 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v15, 16, v15 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v14, 31, v13 +; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[13:16] +; GCN-HSA-NEXT: v_mov_b32_e32 v25, s3 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v14, 16, v12 ; GCN-HSA-NEXT: v_mov_b32_e32 v24, s2 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xd0 +; GCN-HSA-NEXT: v_bfe_i32 v12, v12, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v14, v14, 0, 16 +; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v13, 31, v12 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v15, 31, v14 +; GCN-HSA-NEXT: v_mov_b32_e32 v17, s3 +; GCN-HSA-NEXT: flat_store_dwordx4 v[20:21], v[12:15] +; GCN-HSA-NEXT: v_mov_b32_e32 v16, s2 ; GCN-HSA-NEXT: s_waitcnt vmcnt(6) -; GCN-HSA-NEXT: v_bfe_i32 v16, v13, 0, 16 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 32 -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v1, 16, v6 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v13 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v18, 16, v13 +; GCN-HSA-NEXT: v_bfe_i32 v12, v9, 0, 16 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v28, 16, v10 +; GCN-HSA-NEXT: v_mov_b32_e32 v29, v11 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v18, 16, v8 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v15, 31, v9 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v14, 16, v9 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v13, 31, v12 +; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[12:15] +; GCN-HSA-NEXT: v_bfe_i32 v16, v29, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v12, v8, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v14, v18, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v8, v10, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v10, v28, 0, 16 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xa0 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v11 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v18, 16, v11 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8 +; GCN-HSA-NEXT: s_waitcnt vmcnt(5) +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v20, 16, v2 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v10 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v15, 31, v14 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v16 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 -; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[16:19] -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v22, 31, v15 -; GCN-HSA-NEXT: v_bfe_i32 v18, v1, 0, 16 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, v15 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v13, 31, v12 +; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[8:11] +; GCN-HSA-NEXT: flat_store_dwordx4 v[22:23], v[16:19] +; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[12:15] +; GCN-HSA-NEXT: v_bfe_i32 v8, v2, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v10, v20, 0, 16 +; GCN-HSA-NEXT: v_mov_b32_e32 v15, s3 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v10 +; GCN-HSA-NEXT: v_mov_b32_e32 v14, s2 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xb0 +; GCN-HSA-NEXT: v_mov_b32_e32 v21, v3 +; GCN-HSA-NEXT: flat_store_dwordx4 v[14:15], v[8:11] +; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v3 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v10, 16, v3 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s2 +; GCN-HSA-NEXT: v_bfe_i32 v8, v21, 0, 16 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s3 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x80 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8 +; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v26, 16, v0 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v18, 16, v6 +; GCN-HSA-NEXT: v_lshrrev_b32_e32 v27, 16, v4 +; GCN-HSA-NEXT: v_bfe_i32 v12, v4, 0, 16 ; GCN-HSA-NEXT: v_bfe_i32 v16, v6, 0, 16 -; GCN-HSA-NEXT: v_bfe_i32 v19, v1, 0, 16 -; GCN-HSA-NEXT: v_mov_b32_e32 v6, s3 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v21, 16, v15 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v20, 31, v19 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xe0 +; GCN-HSA-NEXT: flat_store_dwordx4 v[2:3], v[8:11] +; GCN-HSA-NEXT: v_bfe_i32 v6, v5, 0, 16 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v5 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v8, 16, v5 +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3 +; GCN-HSA-NEXT: v_bfe_i32 v24, v0, 0, 16 +; GCN-HSA-NEXT: v_bfe_i32 v26, v26, 0, 16 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x90 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v25, 31, v24 +; GCN-HSA-NEXT: v_bfe_i32 v14, v27, 0, 16 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v27, 31, v26 +; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 +; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[24:27] +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3 +; GCN-HSA-NEXT: v_bfe_i32 v20, v1, 0, 16 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x60 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v23, 31, v1 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v22, 16, v1 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v21, 31, v20 +; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 +; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[20:23] +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3 +; GCN-HSA-NEXT: v_mov_b32_e32 v19, v7 +; GCN-HSA-NEXT: v_bfe_i32 v18, v18, 0, 16 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x70 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v16 -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v9, 16, v2 -; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[19:22] -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v25, 16, v0 +; GCN-HSA-NEXT: v_bfe_i32 v0, v19, 0, 16 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v18 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v21, 16, v10 -; GCN-HSA-NEXT: flat_store_dwordx4 v[5:6], v[16:19] -; GCN-HSA-NEXT: v_bfe_i32 v23, v2, 0, 16 -; GCN-HSA-NEXT: v_bfe_i32 v19, v10, 0, 16 -; GCN-HSA-NEXT: v_bfe_i32 v2, v25, 0, 16 -; GCN-HSA-NEXT: v_bfe_i32 v25, v9, 0, 16 -; GCN-HSA-NEXT: v_mov_b32_e32 v10, s3 -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v3, 16, v4 -; GCN-HSA-NEXT: v_mov_b32_e32 v9, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xc0 -; GCN-HSA-NEXT: v_bfe_i32 v13, v4, 0, 16 -; GCN-HSA-NEXT: v_bfe_i32 v15, v3, 0, 16 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s1 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v24, 31, v23 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v26, 31, v25 -; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v1, 16, v14 -; GCN-HSA-NEXT: v_bfe_i32 v11, v14, 0, 16 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v14, 31, v13 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v16, 31, v15 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s0 -; GCN-HSA-NEXT: v_bfe_i32 v0, v0, 0, 16 -; GCN-HSA-NEXT: flat_store_dwordx4 v[9:10], v[23:26] -; GCN-HSA-NEXT: v_mov_b32_e32 v10, s3 -; GCN-HSA-NEXT: flat_store_dwordx4 v[3:4], v[13:16] -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v3, 31, v2 -; GCN-HSA-NEXT: v_bfe_i32 v13, v1, 0, 16 +; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[16:19] +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v3, 31, v7 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v2, 16, v7 ; GCN-HSA-NEXT: v_ashrrev_i32_e32 v1, 31, v0 -; GCN-HSA-NEXT: v_mov_b32_e32 v9, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xa0 -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v22, 16, v8 -; GCN-HSA-NEXT: v_bfe_i32 v21, v21, 0, 16 -; GCN-HSA-NEXT: flat_store_dwordx4 v[9:10], v[0:3] -; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v20, 31, v19 -; GCN-HSA-NEXT: v_bfe_i32 v17, v22, 0, 16 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v22, 31, v21 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x80 -; GCN-HSA-NEXT: v_bfe_i32 v15, v8, 0, 16 -; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[19:22] -; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v16, 31, v15 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v18, 31, v17 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x60 -; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[15:18] +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 64 +; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2 -; GCN-HSA-NEXT: v_lshrrev_b32_e32 v7, 16, v12 -; GCN-HSA-NEXT: v_bfe_i32 v5, v12, 0, 16 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v12, 31, v11 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v14, 31, v13 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v13, 31, v12 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v15, 31, v14 ; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3 -; GCN-HSA-NEXT: s_add_u32 s0, s0, 64 -; GCN-HSA-NEXT: v_bfe_i32 v7, v7, 0, 16 -; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[11:14] +; GCN-HSA-NEXT: s_add_u32 s0, s0, 0x50 +; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[12:15] ; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v0, s0 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v6, 31, v5 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v8, 31, v7 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v7, 31, v6 ; GCN-HSA-NEXT: v_mov_b32_e32 v1, s1 -; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[5:8] +; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[6:9] ; GCN-HSA-NEXT: s_endpgm ; ; GCN-NOHSA-VI-LABEL: global_sextload_v32i16_to_v32i64: diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i8.ll b/llvm/test/CodeGen/AMDGPU/load-global-i8.ll index 5bc02c4..f879dc6 100644 --- a/llvm/test/CodeGen/AMDGPU/load-global-i8.ll +++ b/llvm/test/CodeGen/AMDGPU/load-global-i8.ll @@ -6274,12 +6274,12 @@ define amdgpu_kernel void @global_sextload_v8i8_to_v8i64(ptr addrspace(1) %out, ; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s5, v0 ; GCN-NOHSA-SI-NEXT: v_bfe_i32 v0, v0, 0, 8 ; GCN-NOHSA-SI-NEXT: s_lshr_b32 s6, s4, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s8, s4, 8 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s10, s5, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s12, s5, 24 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s14, s5, 8 -; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s8, s5, 16 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s10, s5, 24 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s12, s5, 8 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s14, s4, 8 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[4:5], 0x80000 +; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-NOHSA-SI-NEXT: s_ashr_i32 s15, s4, 31 ; GCN-NOHSA-SI-NEXT: s_ashr_i32 s18, s4, 24 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[14:15], 0x80000 @@ -6294,19 +6294,19 @@ define amdgpu_kernel void @global_sextload_v8i8_to_v8i64(ptr addrspace(1) %out, ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s6 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s7 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:48 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s8 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s9 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:32 -; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(1) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s10 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s11 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s12 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s13 +; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s8 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s9 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s10 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s11 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:16 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s4 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s5 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s12 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s13 ; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s4 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s5 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:32 ; GCN-NOHSA-SI-NEXT: s_endpgm ; ; GCN-HSA-LABEL: global_sextload_v8i8_to_v8i64: @@ -6325,11 +6325,12 @@ define amdgpu_kernel void @global_sextload_v8i8_to_v8i64(ptr addrspace(1) %out, ; GCN-HSA-NEXT: v_readfirstlane_b32 s2, v1 ; GCN-HSA-NEXT: v_readfirstlane_b32 s3, v0 ; GCN-HSA-NEXT: s_lshr_b32 s4, s2, 16 -; GCN-HSA-NEXT: s_lshr_b32 s6, s2, 8 -; GCN-HSA-NEXT: s_lshr_b32 s8, s3, 16 -; GCN-HSA-NEXT: s_lshr_b32 s10, s3, 24 -; GCN-HSA-NEXT: s_lshr_b32 s12, s3, 8 +; GCN-HSA-NEXT: s_lshr_b32 s6, s3, 16 +; GCN-HSA-NEXT: s_lshr_b32 s8, s3, 24 +; GCN-HSA-NEXT: s_lshr_b32 s10, s3, 8 +; GCN-HSA-NEXT: s_lshr_b32 s12, s2, 8 ; GCN-HSA-NEXT: s_ashr_i32 s13, s2, 31 +; GCN-HSA-NEXT: v_bfe_i32 v0, v0, 0, 8 ; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[2:3], 0x80000 ; GCN-HSA-NEXT: s_ashr_i32 s16, s2, 24 ; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[12:13], 0x80000 @@ -6337,38 +6338,37 @@ define amdgpu_kernel void @global_sextload_v8i8_to_v8i64(ptr addrspace(1) %out, ; GCN-HSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000 ; GCN-HSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000 ; GCN-HSA-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000 +; GCN-HSA-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s4 ; GCN-HSA-NEXT: s_add_u32 s4, s0, 48 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s10 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s11 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s5 ; GCN-HSA-NEXT: s_addc_u32 s5, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s2 -; GCN-HSA-NEXT: v_mov_b32_e32 v19, s5 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 32 +; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[0:3] ; GCN-HSA-NEXT: v_mov_b32_e32 v6, s16 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s4 ; GCN-HSA-NEXT: v_mov_b32_e32 v7, s13 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s3 -; GCN-HSA-NEXT: v_mov_b32_e32 v18, s4 +; GCN-HSA-NEXT: v_mov_b32_e32 v10, s2 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s5 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 16 +; GCN-HSA-NEXT: v_mov_b32_e32 v11, s3 +; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[4:7] ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 -; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[4:7] -; GCN-HSA-NEXT: s_add_u32 s0, s0, 16 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2 +; GCN-HSA-NEXT: v_mov_b32_e32 v12, s6 +; GCN-HSA-NEXT: v_mov_b32_e32 v13, s7 +; GCN-HSA-NEXT: v_mov_b32_e32 v14, s8 +; GCN-HSA-NEXT: v_mov_b32_e32 v15, s9 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3 +; GCN-HSA-NEXT: s_add_u32 s0, s0, 32 +; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[12:15] +; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s0 ; GCN-HSA-NEXT: v_mov_b32_e32 v8, s14 ; GCN-HSA-NEXT: v_mov_b32_e32 v9, s15 -; GCN-HSA-NEXT: v_mov_b32_e32 v10, s6 -; GCN-HSA-NEXT: v_mov_b32_e32 v11, s7 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2 -; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0 -; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[8:11] -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s1 -; GCN-HSA-NEXT: v_bfe_i32 v0, v0, 0, 8 -; GCN-HSA-NEXT: v_mov_b32_e32 v12, s8 -; GCN-HSA-NEXT: v_mov_b32_e32 v13, s9 -; GCN-HSA-NEXT: v_mov_b32_e32 v14, s10 -; GCN-HSA-NEXT: v_mov_b32_e32 v15, s11 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s0 -; GCN-HSA-NEXT: v_ashrrev_i32_e32 v1, 31, v0 -; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[12:15] -; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[0:3] +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s1 +; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[8:11] ; GCN-HSA-NEXT: s_endpgm ; ; GCN-NOHSA-VI-LABEL: global_sextload_v8i8_to_v8i64: @@ -6388,10 +6388,10 @@ define amdgpu_kernel void @global_sextload_v8i8_to_v8i64(ptr addrspace(1) %out, ; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s4, v1 ; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s5, v0 ; GCN-NOHSA-VI-NEXT: s_lshr_b32 s6, s4, 16 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s8, s4, 8 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s10, s5, 16 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s12, s5, 24 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s14, s5, 8 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s8, s5, 16 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s10, s5, 24 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s12, s5, 8 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s14, s4, 8 ; GCN-NOHSA-VI-NEXT: s_ashr_i32 s15, s4, 31 ; GCN-NOHSA-VI-NEXT: s_ashr_i32 s18, s4, 24 ; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000 @@ -6408,18 +6408,18 @@ define amdgpu_kernel void @global_sextload_v8i8_to_v8i64(ptr addrspace(1) %out, ; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v8, s16 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v9, s17 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v10, s8 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v11, s9 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v12, s10 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v13, s11 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v14, s12 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v15, s13 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s4 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s5 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v12, s8 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v13, s9 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v14, s10 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v15, s11 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s12 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s13 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v10, s4 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v11, s5 ; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:48 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32 ; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:16 ; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32 ; GCN-NOHSA-VI-NEXT: s_endpgm ; ; EG-LABEL: global_sextload_v8i8_to_v8i64: @@ -6934,84 +6934,85 @@ define amdgpu_kernel void @global_sextload_v16i8_to_v16i64(ptr addrspace(1) %out ; GCN-NOHSA-SI-NEXT: s_mov_b32 s0, s4 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s1, s5 ; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) -; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s5, v3 ; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s4, v2 -; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s9, v1 -; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s8, v0 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s10, s5, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s12, s5, 8 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s14, s5 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s16, s4, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s18, s4, 24 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s20, s4, 8 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s22, s9, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s24, s9, 8 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s26, s9 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s8, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s30, s8, 24 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s34, s8, 8 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[8:9], 0x80000 +; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s5, v3 +; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s6, v0 +; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s7, v1 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s12, s4, 16 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s14, s4, 24 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s16, s4, 8 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s18, s6, 16 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s20, s6, 24 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s22, s6, 8 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s24, s5, 16 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s10, s5, 8 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s26, s5 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s7, 16 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s30, s7, 8 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s8, s7 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[34:35], s[6:7], 0x80000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[4:5], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s29, s7, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s31, s7, 24 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s31, s9, 31 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s9, 24 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s35, s5, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s5, 31 ; GCN-NOHSA-SI-NEXT: s_ashr_i32 s38, s5, 24 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[34:35], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[30:31], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[30:31], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[28:29], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s38 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s35 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s14 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s15 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s36 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s37 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s33 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s31 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s26 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s27 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s10 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s11 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s36 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s37 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s34 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s35 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s38 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s33 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s26 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s27 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s12 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s13 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s14 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s15 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:80 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s6 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s7 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s12 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s13 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:96 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s31 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s29 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s16 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s17 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s18 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s19 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:80 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s20 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s21 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:64 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s18 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s19 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s20 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s21 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s22 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s23 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:48 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s24 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s25 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:32 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s28 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s29 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s8 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s9 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:16 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s8 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s9 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s22 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s23 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s24 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s25 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s10 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s11 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:96 +; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s6 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s7 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s4 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s5 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32 ; GCN-NOHSA-SI-NEXT: s_endpgm ; ; GCN-HSA-LABEL: global_sextload_v16i8_to_v16i64: @@ -7024,41 +7025,41 @@ define amdgpu_kernel void @global_sextload_v16i8_to_v16i64(ptr addrspace(1) %out ; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2 ; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3 ; GCN-HSA-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GCN-HSA-NEXT: v_mov_b32_e32 v17, s1 -; GCN-HSA-NEXT: v_mov_b32_e32 v16, s0 +; GCN-HSA-NEXT: v_mov_b32_e32 v9, s1 +; GCN-HSA-NEXT: v_mov_b32_e32 v8, s0 ; GCN-HSA-NEXT: s_waitcnt vmcnt(0) -; GCN-HSA-NEXT: v_readfirstlane_b32 s3, v3 ; GCN-HSA-NEXT: v_readfirstlane_b32 s2, v2 -; GCN-HSA-NEXT: v_readfirstlane_b32 s5, v1 +; GCN-HSA-NEXT: v_readfirstlane_b32 s3, v3 ; GCN-HSA-NEXT: v_readfirstlane_b32 s4, v0 -; GCN-HSA-NEXT: s_lshr_b32 s6, s3, 16 -; GCN-HSA-NEXT: s_lshr_b32 s8, s3, 8 -; GCN-HSA-NEXT: s_mov_b32 s10, s3 -; GCN-HSA-NEXT: s_lshr_b32 s12, s2, 16 -; GCN-HSA-NEXT: s_lshr_b32 s14, s2, 24 -; GCN-HSA-NEXT: s_lshr_b32 s16, s2, 8 +; GCN-HSA-NEXT: v_readfirstlane_b32 s5, v1 +; GCN-HSA-NEXT: s_lshr_b32 s6, s2, 16 +; GCN-HSA-NEXT: s_lshr_b32 s8, s2, 24 +; GCN-HSA-NEXT: s_lshr_b32 s10, s2, 8 +; GCN-HSA-NEXT: s_lshr_b32 s18, s3, 16 +; GCN-HSA-NEXT: s_lshr_b32 s20, s3, 8 +; GCN-HSA-NEXT: s_mov_b32 s22, s3 ; GCN-HSA-NEXT: s_ashr_i32 s7, s3, 31 ; GCN-HSA-NEXT: s_ashr_i32 s9, s3, 24 ; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[4:5], 0x80000 -; GCN-HSA-NEXT: s_lshr_b32 s18, s5, 16 -; GCN-HSA-NEXT: s_lshr_b32 s20, s5, 8 -; GCN-HSA-NEXT: s_mov_b32 s22, s5 +; GCN-HSA-NEXT: s_lshr_b32 s12, s4, 16 +; GCN-HSA-NEXT: s_lshr_b32 s14, s4, 24 +; GCN-HSA-NEXT: s_lshr_b32 s16, s4, 8 ; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2 ; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3 -; GCN-HSA-NEXT: s_lshr_b32 s2, s4, 16 +; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[4:5], 0x80000 +; GCN-HSA-NEXT: s_ashr_i32 s4, s5, 24 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s2 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s3 +; GCN-HSA-NEXT: s_lshr_b32 s2, s5, 16 ; GCN-HSA-NEXT: s_ashr_i32 s3, s5, 31 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s7 -; GCN-HSA-NEXT: s_ashr_i32 s7, s5, 24 -; GCN-HSA-NEXT: v_mov_b32_e32 v8, s24 -; GCN-HSA-NEXT: s_lshr_b32 s24, s4, 24 -; GCN-HSA-NEXT: s_lshr_b32 s4, s4, 8 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s9 -; GCN-HSA-NEXT: v_mov_b32_e32 v6, s7 -; GCN-HSA-NEXT: v_mov_b32_e32 v7, s3 -; GCN-HSA-NEXT: v_mov_b32_e32 v9, s25 -; GCN-HSA-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000 +; GCN-HSA-NEXT: v_mov_b32_e32 v10, s4 +; GCN-HSA-NEXT: s_lshr_b32 s4, s5, 8 +; GCN-HSA-NEXT: s_mov_b32 s24, s5 +; GCN-HSA-NEXT: v_mov_b32_e32 v6, s9 +; GCN-HSA-NEXT: v_mov_b32_e32 v7, s7 +; GCN-HSA-NEXT: v_mov_b32_e32 v11, s3 ; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 +; GCN-HSA-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000 ; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000 ; GCN-HSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 ; GCN-HSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 @@ -7069,66 +7070,66 @@ define amdgpu_kernel void @global_sextload_v16i8_to_v16i64(ptr addrspace(1) %out ; GCN-HSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 ; GCN-HSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000 ; GCN-HSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s6 -; GCN-HSA-NEXT: s_add_u32 s6, s0, 0x70 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s7 -; GCN-HSA-NEXT: s_addc_u32 s7, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v11, s7 -; GCN-HSA-NEXT: v_mov_b32_e32 v10, s6 -; GCN-HSA-NEXT: s_add_u32 s6, s0, 0x60 -; GCN-HSA-NEXT: s_addc_u32 s7, s1, 0 -; GCN-HSA-NEXT: flat_store_dwordx4 v[10:11], v[2:5] -; GCN-HSA-NEXT: v_mov_b32_e32 v11, s7 -; GCN-HSA-NEXT: v_mov_b32_e32 v10, s6 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s16 +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s17 +; GCN-HSA-NEXT: v_mov_b32_e32 v12, s6 ; GCN-HSA-NEXT: s_add_u32 s6, s0, 0x50 +; GCN-HSA-NEXT: flat_store_dwordx4 v[8:9], v[2:5] +; GCN-HSA-NEXT: v_mov_b32_e32 v13, s7 ; GCN-HSA-NEXT: s_addc_u32 s7, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v15, s7 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s10 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s11 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s8 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s9 -; GCN-HSA-NEXT: v_mov_b32_e32 v14, s6 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s6 +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s7 ; GCN-HSA-NEXT: s_add_u32 s6, s0, 64 -; GCN-HSA-NEXT: flat_store_dwordx4 v[10:11], v[2:5] -; GCN-HSA-NEXT: v_mov_b32_e32 v10, s12 -; GCN-HSA-NEXT: v_mov_b32_e32 v11, s13 -; GCN-HSA-NEXT: v_mov_b32_e32 v12, s14 -; GCN-HSA-NEXT: v_mov_b32_e32 v13, s15 ; GCN-HSA-NEXT: s_addc_u32 s7, s1, 0 -; GCN-HSA-NEXT: flat_store_dwordx4 v[14:15], v[10:13] -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s16 -; GCN-HSA-NEXT: v_mov_b32_e32 v11, s7 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s17 -; GCN-HSA-NEXT: v_mov_b32_e32 v10, s6 -; GCN-HSA-NEXT: flat_store_dwordx4 v[10:11], v[0:3] -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s18 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 48 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3 +; GCN-HSA-NEXT: v_mov_b32_e32 v9, s7 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s10 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s11 +; GCN-HSA-NEXT: v_mov_b32_e32 v8, s6 +; GCN-HSA-NEXT: flat_store_dwordx4 v[8:9], v[0:3] +; GCN-HSA-NEXT: v_mov_b32_e32 v8, s2 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 16 +; GCN-HSA-NEXT: v_mov_b32_e32 v9, s3 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v19, s3 -; GCN-HSA-NEXT: v_mov_b32_e32 v18, s2 -; GCN-HSA-NEXT: s_add_u32 s2, s0, 32 +; GCN-HSA-NEXT: v_mov_b32_e32 v17, s3 +; GCN-HSA-NEXT: v_mov_b32_e32 v14, s8 +; GCN-HSA-NEXT: v_mov_b32_e32 v15, s9 +; GCN-HSA-NEXT: v_mov_b32_e32 v16, s2 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x70 +; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[12:15] +; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v12, s12 +; GCN-HSA-NEXT: v_mov_b32_e32 v13, s13 +; GCN-HSA-NEXT: v_mov_b32_e32 v14, s14 +; GCN-HSA-NEXT: v_mov_b32_e32 v15, s15 +; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[12:15] +; GCN-HSA-NEXT: v_mov_b32_e32 v17, s3 +; GCN-HSA-NEXT: v_mov_b32_e32 v16, s2 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x60 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s18 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s19 ; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 -; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[4:7] -; GCN-HSA-NEXT: s_add_u32 s0, s0, 16 +; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[4:7] +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s22 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3 -; GCN-HSA-NEXT: v_mov_b32_e32 v12, s22 -; GCN-HSA-NEXT: v_mov_b32_e32 v13, s23 -; GCN-HSA-NEXT: v_mov_b32_e32 v14, s20 -; GCN-HSA-NEXT: v_mov_b32_e32 v15, s21 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s23 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s20 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s21 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2 -; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0 -; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[12:15] -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s1 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s24 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s25 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s0 -; GCN-HSA-NEXT: v_mov_b32_e32 v10, s4 -; GCN-HSA-NEXT: v_mov_b32_e32 v11, s5 +; GCN-HSA-NEXT: s_add_u32 s2, s0, 48 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[8:11] +; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3 +; GCN-HSA-NEXT: s_add_u32 s0, s0, 32 +; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[8:11] +; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s0 +; GCN-HSA-NEXT: v_mov_b32_e32 v12, s24 +; GCN-HSA-NEXT: v_mov_b32_e32 v13, s25 +; GCN-HSA-NEXT: v_mov_b32_e32 v14, s4 +; GCN-HSA-NEXT: v_mov_b32_e32 v15, s5 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s1 +; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[12:15] ; GCN-HSA-NEXT: s_endpgm ; ; GCN-NOHSA-VI-LABEL: global_sextload_v16i8_to_v16i64: @@ -7142,83 +7143,84 @@ define amdgpu_kernel void @global_sextload_v16i8_to_v16i64(ptr addrspace(1) %out ; GCN-NOHSA-VI-NEXT: s_mov_b32 s8, s6 ; GCN-NOHSA-VI-NEXT: s_mov_b32 s9, s7 ; GCN-NOHSA-VI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0 -; GCN-NOHSA-VI-NEXT: s_mov_b32 s1, s5 ; GCN-NOHSA-VI-NEXT: s_mov_b32 s0, s4 +; GCN-NOHSA-VI-NEXT: s_mov_b32 s1, s5 ; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(0) -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s5, v3 ; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s4, v2 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s16, s5, 16 -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s9, v1 -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s8, v0 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s18, s5, 8 -; GCN-NOHSA-VI-NEXT: s_mov_b32 s20, s5 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s22, s4, 16 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s24, s4, 24 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s26, s4, 8 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s35, s5, 31 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s38, s5, 24 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s6, v0 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s14, s4, 16 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s16, s4, 24 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s5, v3 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s7, v1 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s18, s4, 8 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s20, s6, 16 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s22, s6, 24 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s24, s6, 8 ; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s28, s9, 16 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s12, s9, 8 -; GCN-NOHSA-VI-NEXT: s_mov_b32 s14, s9 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s10, s8, 16 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s30, s8, 24 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s34, s8, 8 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s26, s5, 16 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s10, s5, 8 +; GCN-NOHSA-VI-NEXT: s_mov_b32 s12, s5 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s8, s7, 16 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s28, s7, 8 +; GCN-NOHSA-VI-NEXT: s_mov_b32 s30, s7 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[34:35], s[6:7], 0x80000 ; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[36:37], s[4:5], 0x80000 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s31, s9, 31 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s29, s7, 31 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s31, s7, 24 ; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 ; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 ; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 ; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s38 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s35 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s16 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s17 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[6:7], s[8:9], 0x80000 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s33, s9, 24 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[4:5], s[34:35], 0x80000 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[8:9], s[30:31], 0x80000 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v6, s14 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v7, s15 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v8, s16 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v9, s17 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s33, s5, 31 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s38, s5, 24 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[6:7], s[30:31], 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[4:5], s[28:29], 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000 ; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v4, s36 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v5, s37 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v14, s20 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v15, s21 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v16, s18 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v17, s19 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v6, s26 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s22 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s23 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s24 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s25 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v7, s27 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v10, s33 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v11, s31 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v12, s6 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v13, s7 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v8, s28 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v9, s29 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:96 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v18, s12 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v16, s14 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v17, s15 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v19, s13 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v14, s4 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s10 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s11 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s8 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s9 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v15, s5 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:64 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:48 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s36 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s37 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v4, s34 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v5, s35 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s18 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s19 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:80 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v16, s20 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v17, s21 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v18, s22 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v19, s23 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v6, s24 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v7, s25 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v10, s38 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v11, s33 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v14, s31 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v15, s29 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v8, s26 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v9, s27 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v12, s8 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s12 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s13 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s10 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s11 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v13, s9 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:16 +; GCN-NOHSA-VI-NEXT: s_nop 0 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v16, s6 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v17, s7 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v18, s4 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v19, s5 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:112 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48 ; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:32 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 ; GCN-NOHSA-VI-NEXT: s_endpgm ; ; EG-LABEL: global_sextload_v16i8_to_v16i64: @@ -8174,166 +8176,166 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i64(ptr addrspace(1) %out ; GCN-NOHSA-SI-NEXT: s_mov_b32 s0, s4 ; GCN-NOHSA-SI-NEXT: s_mov_b32 s1, s5 ; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(1) -; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s39, v3 -; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s38, v2 -; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s37, v1 -; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s36, v0 +; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s18, v2 +; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s19, v3 +; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s22, v0 +; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s23, v1 ; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0) -; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s7, v7 -; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s6, v6 -; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s11, v5 -; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s10, v4 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s30, s39, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s34, s39, 8 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s42, s39 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s38, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s26, s38, 24 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s24, s38, 8 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s4, s37, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s8, s37, 8 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s40, s37 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s12, s36, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s14, s36, 24 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s16, s36, 8 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s18, s7, 16 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s20, s7, 8 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s22, s7 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s5, s39, 31 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s9, s39, 24 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[44:45], s[38:39], 0x80000 +; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s24, v6 +; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s25, v7 +; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s12, v4 +; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s13, v5 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s38, s18, 16 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s36, s18, 24 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s34, s18, 8 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s30, s22, 16 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s22, 24 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s26, s22, 8 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s4, s24, 16 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s6, s24, 24 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s8, s24, 8 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s10, s12, 16 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s14, s12, 24 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s16, s12, 8 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s20, s19, 16 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[40:41], s[12:13], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[42:43], s[18:19], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[44:45], s[22:23], 0x80000 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s42 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s43 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s38, s6, 16 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[42:43], s[24:25], 0x80000 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s44 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s45 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s42, s6, 24 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x80000 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s9 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s9, s37, 31 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s5 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s5, s37, 24 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s18, s19, 8 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s42 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s43 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s12, s19 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s40 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s41 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s40, s23, 16 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s40 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s41 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s40, s6, 8 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s36 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s37 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s36, s11, 16 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s38 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s39 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s38, s23, 8 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s36 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s37 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s22, s23 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[12:13], 0x80000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s30 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s31 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s44, s11, 8 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:208 +; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s36 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s37 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s36, s25, 16 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s34 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s35 -; GCN-NOHSA-SI-NEXT: s_mov_b32 s34, s11 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s24, s25, 8 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:192 +; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s30 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s25, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s39, s23, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s41, s23, 24 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s5, s19, 31 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s7, s19, 24 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s44, s25, 24 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s12, s25 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:240 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s28 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s29 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s10, 16 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v18, s26 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v19, s27 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s26, s10, 24 -; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s24 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s25 -; GCN-NOHSA-SI-NEXT: s_lshr_b32 s24, s10, 8 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:224 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s28 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s29 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s13, 16 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s26 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s27 +; GCN-NOHSA-SI-NEXT: s_lshr_b32 s26, s13, 8 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:144 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s5 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s5, s7, 31 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s9 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s9, s7, 24 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:208 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s9 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s11, 31 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s5 -; GCN-NOHSA-SI-NEXT: s_ashr_i32 s41, s11, 24 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[30:31], s[10:11], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[46:47], s[6:7], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[48:49], s[22:23], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[24:25], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[26:27], 0x80000 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s7 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s45, s13, 31 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s5 +; GCN-NOHSA-SI-NEXT: s_ashr_i32 s46, s13, 24 +; GCN-NOHSA-SI-NEXT: s_mov_b32 s30, s13 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[34:35], s[12:13], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[42:43], s[22:23], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[26:27], 0x80000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[22:23], s[28:29], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[24:25], s[44:45], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[26:27], s[36:37], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[28:29], s[40:41], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[42:43], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[28:29], s[38:39], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[40:41], 0x80000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 -; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000 +; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000 ; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:192 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:128 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s48 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s49 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s4 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s5 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:176 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s4 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s5 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s6 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s7 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:80 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s46 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s47 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s8 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s9 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:160 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s41 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s39 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s8 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s9 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:64 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s10 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s11 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s12 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s13 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s14 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s15 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:144 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s14 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s15 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:16 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s41 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s42 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s43 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s16 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s17 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s44 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s33 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s16 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s17 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:128 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s34 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s35 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s18 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s19 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112 -; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(1) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s30 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s31 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s20 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s21 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:240 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s20 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s21 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:96 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s38 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s39 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s34 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s35 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v18, s18 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v19, s19 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:224 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s46 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s45 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s36 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s37 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:176 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s36 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s37 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:80 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s30 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s31 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s28 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s29 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:160 ; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s28 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s29 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s26 ; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s27 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:48 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s24 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s25 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:32 -; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(2) -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s22 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s23 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s10 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s11 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v18, s6 -; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v19, s7 -; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:112 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s24 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s25 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96 +; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(1) +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s22 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s23 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:48 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s12 +; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s13 +; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:32 ; GCN-NOHSA-SI-NEXT: s_endpgm ; ; GCN-HSA-LABEL: global_sextload_v32i8_to_v32i64: @@ -8346,225 +8348,223 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i64(ptr addrspace(1) %out ; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2 ; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3 ; GCN-HSA-NEXT: s_add_u32 s2, s2, 16 -; GCN-HSA-NEXT: flat_load_dwordx4 v[6:9], v[0:1] +; GCN-HSA-NEXT: flat_load_dwordx4 v[4:7], v[0:1] ; GCN-HSA-NEXT: s_addc_u32 s3, s3, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2 ; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3 -; GCN-HSA-NEXT: flat_load_dwordx4 v[2:5], v[0:1] +; GCN-HSA-NEXT: flat_load_dwordx4 v[0:3], v[0:1] ; GCN-HSA-NEXT: s_waitcnt vmcnt(1) -; GCN-HSA-NEXT: v_readfirstlane_b32 s7, v9 -; GCN-HSA-NEXT: v_readfirstlane_b32 s6, v8 -; GCN-HSA-NEXT: v_readfirstlane_b32 s9, v7 -; GCN-HSA-NEXT: v_readfirstlane_b32 s8, v6 -; GCN-HSA-NEXT: s_lshr_b32 s16, s7, 16 -; GCN-HSA-NEXT: s_lshr_b32 s18, s7, 8 -; GCN-HSA-NEXT: s_mov_b32 s24, s7 -; GCN-HSA-NEXT: s_lshr_b32 s22, s6, 16 -; GCN-HSA-NEXT: s_lshr_b32 s14, s6, 24 -; GCN-HSA-NEXT: s_lshr_b32 s2, s6, 8 -; GCN-HSA-NEXT: s_lshr_b32 s4, s9, 16 -; GCN-HSA-NEXT: s_lshr_b32 s10, s9, 8 -; GCN-HSA-NEXT: s_mov_b32 s12, s9 -; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[6:7], 0x80000 -; GCN-HSA-NEXT: s_lshr_b32 s6, s8, 16 -; GCN-HSA-NEXT: s_lshr_b32 s28, s8, 24 -; GCN-HSA-NEXT: s_lshr_b32 s30, s8, 8 -; GCN-HSA-NEXT: s_ashr_i32 s43, s9, 31 -; GCN-HSA-NEXT: s_ashr_i32 s52, s9, 24 -; GCN-HSA-NEXT: s_bfe_i64 s[20:21], s[8:9], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[8:9], s[16:17], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[16:17], s[24:25], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 +; GCN-HSA-NEXT: v_readfirstlane_b32 s6, v6 +; GCN-HSA-NEXT: v_readfirstlane_b32 s8, v4 +; GCN-HSA-NEXT: v_readfirstlane_b32 s9, v5 +; GCN-HSA-NEXT: v_readfirstlane_b32 s7, v7 +; GCN-HSA-NEXT: s_lshr_b32 s20, s6, 16 +; GCN-HSA-NEXT: s_lshr_b32 s18, s6, 24 +; GCN-HSA-NEXT: s_lshr_b32 s10, s8, 16 +; GCN-HSA-NEXT: s_lshr_b32 s2, s8, 24 +; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[8:9], 0x80000 +; GCN-HSA-NEXT: s_lshr_b32 s16, s6, 8 +; GCN-HSA-NEXT: s_lshr_b32 s4, s8, 8 +; GCN-HSA-NEXT: s_lshr_b32 s12, s7, 16 +; GCN-HSA-NEXT: s_lshr_b32 s14, s7, 8 +; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[6:7], 0x80000 +; GCN-HSA-NEXT: s_lshr_b32 s6, s9, 16 +; GCN-HSA-NEXT: s_mov_b32 s28, s9 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s26 +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s27 +; GCN-HSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 +; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[18:19], 0x80000 ; GCN-HSA-NEXT: s_waitcnt vmcnt(0) -; GCN-HSA-NEXT: v_readfirstlane_b32 s25, v5 -; GCN-HSA-NEXT: v_readfirstlane_b32 s24, v4 -; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 -; GCN-HSA-NEXT: v_readfirstlane_b32 s37, v3 -; GCN-HSA-NEXT: v_readfirstlane_b32 s36, v2 +; GCN-HSA-NEXT: v_readfirstlane_b32 s40, v2 +; GCN-HSA-NEXT: v_readfirstlane_b32 s41, v3 +; GCN-HSA-NEXT: s_bfe_i64 s[42:43], s[10:11], 0x80000 +; GCN-HSA-NEXT: v_readfirstlane_b32 s44, v0 +; GCN-HSA-NEXT: v_readfirstlane_b32 s45, v1 ; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s26 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s27 -; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[30:31], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[30:31], s[28:29], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[34:35], s[6:7], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[44:45], s[12:13], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[40:41], s[10:11], 0x80000 +; GCN-HSA-NEXT: s_mov_b32 s22, s7 +; GCN-HSA-NEXT: s_lshr_b32 s8, s9, 8 +; GCN-HSA-NEXT: v_mov_b32_e32 v8, s24 +; GCN-HSA-NEXT: v_mov_b32_e32 v9, s25 +; GCN-HSA-NEXT: s_bfe_i64 s[30:31], s[16:17], 0x80000 +; GCN-HSA-NEXT: s_bfe_i64 s[10:11], s[28:29], 0x80000 +; GCN-HSA-NEXT: s_bfe_i64 s[16:17], s[6:7], 0x80000 +; GCN-HSA-NEXT: s_bfe_i64 s[18:19], s[14:15], 0x80000 +; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[12:13], 0x80000 ; GCN-HSA-NEXT: s_bfe_i64 s[46:47], s[4:5], 0x80000 -; GCN-HSA-NEXT: v_mov_b32_e32 v8, s8 -; GCN-HSA-NEXT: v_mov_b32_e32 v14, s18 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s22 -; GCN-HSA-NEXT: v_mov_b32_e32 v6, s14 +; GCN-HSA-NEXT: v_mov_b32_e32 v12, s20 +; GCN-HSA-NEXT: v_mov_b32_e32 v14, s26 +; GCN-HSA-NEXT: v_mov_b32_e32 v15, s27 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s42 ; GCN-HSA-NEXT: v_mov_b32_e32 v2, s2 +; GCN-HSA-NEXT: s_lshr_b32 s42, s44, 16 +; GCN-HSA-NEXT: s_lshr_b32 s48, s44, 24 +; GCN-HSA-NEXT: s_lshr_b32 s28, s44, 8 +; GCN-HSA-NEXT: s_lshr_b32 s6, s45, 16 +; GCN-HSA-NEXT: s_lshr_b32 s2, s45, 8 +; GCN-HSA-NEXT: s_mov_b32 s4, s45 +; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[44:45], 0x80000 +; GCN-HSA-NEXT: s_lshr_b32 s44, s40, 16 +; GCN-HSA-NEXT: s_lshr_b32 s50, s40, 24 +; GCN-HSA-NEXT: s_lshr_b32 s52, s40, 8 +; GCN-HSA-NEXT: s_lshr_b32 s20, s41, 16 +; GCN-HSA-NEXT: s_lshr_b32 s12, s41, 8 +; GCN-HSA-NEXT: s_mov_b32 s14, s41 +; GCN-HSA-NEXT: s_ashr_i32 s33, s9, 31 +; GCN-HSA-NEXT: s_ashr_i32 s37, s7, 31 +; GCN-HSA-NEXT: s_ashr_i32 s38, s7, 24 +; GCN-HSA-NEXT: s_ashr_i32 s34, s9, 24 +; GCN-HSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000 +; GCN-HSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 +; GCN-HSA-NEXT: v_mov_b32_e32 v13, s21 +; GCN-HSA-NEXT: v_mov_b32_e32 v10, s30 +; GCN-HSA-NEXT: v_mov_b32_e32 v11, s31 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s43 ; GCN-HSA-NEXT: v_mov_b32_e32 v3, s3 -; GCN-HSA-NEXT: s_lshr_b32 s14, s37, 16 -; GCN-HSA-NEXT: s_lshr_b32 s10, s37, 8 -; GCN-HSA-NEXT: s_mov_b32 s12, s37 -; GCN-HSA-NEXT: s_lshr_b32 s8, s36, 16 -; GCN-HSA-NEXT: s_lshr_b32 s6, s36, 24 -; GCN-HSA-NEXT: s_lshr_b32 s4, s36, 8 -; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[36:37], 0x80000 -; GCN-HSA-NEXT: s_lshr_b32 s38, s25, 16 -; GCN-HSA-NEXT: s_lshr_b32 s28, s25, 8 -; GCN-HSA-NEXT: s_mov_b32 s36, s25 -; GCN-HSA-NEXT: s_lshr_b32 s48, s24, 16 -; GCN-HSA-NEXT: s_lshr_b32 s22, s24, 24 -; GCN-HSA-NEXT: s_lshr_b32 s18, s24, 8 -; GCN-HSA-NEXT: s_ashr_i32 s50, s7, 31 -; GCN-HSA-NEXT: s_ashr_i32 s51, s7, 24 -; GCN-HSA-NEXT: v_mov_b32_e32 v9, s9 -; GCN-HSA-NEXT: v_mov_b32_e32 v12, s16 -; GCN-HSA-NEXT: v_mov_b32_e32 v13, s17 -; GCN-HSA-NEXT: v_mov_b32_e32 v15, s19 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s23 -; GCN-HSA-NEXT: v_mov_b32_e32 v7, s15 -; GCN-HSA-NEXT: s_ashr_i32 s33, s37, 31 -; GCN-HSA-NEXT: s_ashr_i32 s42, s37, 24 -; GCN-HSA-NEXT: s_ashr_i32 s53, s25, 31 -; GCN-HSA-NEXT: s_ashr_i32 s54, s25, 24 -; GCN-HSA-NEXT: s_bfe_i64 s[16:17], s[24:25], 0x80000 +; GCN-HSA-NEXT: s_ashr_i32 s30, s45, 31 +; GCN-HSA-NEXT: s_ashr_i32 s31, s45, 24 +; GCN-HSA-NEXT: s_ashr_i32 s35, s41, 31 +; GCN-HSA-NEXT: s_ashr_i32 s36, s41, 24 +; GCN-HSA-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x80000 ; GCN-HSA-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000 +; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000 ; GCN-HSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000 ; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[48:49], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000 +; GCN-HSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 +; GCN-HSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 ; GCN-HSA-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000 -; GCN-HSA-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000 -; GCN-HSA-NEXT: s_add_u32 s48, s0, 0x70 -; GCN-HSA-NEXT: s_addc_u32 s49, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v18, s48 -; GCN-HSA-NEXT: v_mov_b32_e32 v19, s49 -; GCN-HSA-NEXT: s_add_u32 s48, s0, 0x60 -; GCN-HSA-NEXT: s_addc_u32 s49, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v24, s48 -; GCN-HSA-NEXT: v_mov_b32_e32 v25, s49 -; GCN-HSA-NEXT: s_add_u32 s48, s0, 0x50 -; GCN-HSA-NEXT: s_addc_u32 s49, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v26, s48 -; GCN-HSA-NEXT: v_mov_b32_e32 v27, s49 -; GCN-HSA-NEXT: s_add_u32 s48, s0, 64 -; GCN-HSA-NEXT: s_addc_u32 s49, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v20, s44 -; GCN-HSA-NEXT: s_add_u32 s44, s0, 48 -; GCN-HSA-NEXT: v_mov_b32_e32 v21, s45 -; GCN-HSA-NEXT: s_addc_u32 s45, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v28, s48 -; GCN-HSA-NEXT: v_mov_b32_e32 v10, s51 -; GCN-HSA-NEXT: v_mov_b32_e32 v11, s50 -; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[4:7] -; GCN-HSA-NEXT: v_mov_b32_e32 v29, s49 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s34 -; GCN-HSA-NEXT: s_add_u32 s34, s0, 32 -; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[8:11] -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s35 -; GCN-HSA-NEXT: v_mov_b32_e32 v10, s44 -; GCN-HSA-NEXT: s_addc_u32 s35, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v16, s46 -; GCN-HSA-NEXT: v_mov_b32_e32 v17, s47 -; GCN-HSA-NEXT: v_mov_b32_e32 v11, s45 -; GCN-HSA-NEXT: v_mov_b32_e32 v18, s52 -; GCN-HSA-NEXT: v_mov_b32_e32 v19, s43 +; GCN-HSA-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000 +; GCN-HSA-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000 +; GCN-HSA-NEXT: s_bfe_i64 s[52:53], s[52:53], 0x80000 +; GCN-HSA-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x80000 +; GCN-HSA-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x80000 +; GCN-HSA-NEXT: s_add_u32 s54, s0, 0x50 +; GCN-HSA-NEXT: s_addc_u32 s55, s1, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v24, s54 +; GCN-HSA-NEXT: v_mov_b32_e32 v25, s55 +; GCN-HSA-NEXT: s_add_u32 s54, s0, 64 +; GCN-HSA-NEXT: s_addc_u32 s55, s1, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v26, s54 +; GCN-HSA-NEXT: v_mov_b32_e32 v27, s55 +; GCN-HSA-NEXT: s_add_u32 s54, s0, 16 +; GCN-HSA-NEXT: s_addc_u32 s55, s1, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v20, s40 +; GCN-HSA-NEXT: s_add_u32 s40, s0, 0xd0 +; GCN-HSA-NEXT: v_mov_b32_e32 v21, s41 +; GCN-HSA-NEXT: s_addc_u32 s41, s1, 0 +; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[8:11] +; GCN-HSA-NEXT: v_mov_b32_e32 v28, s54 +; GCN-HSA-NEXT: v_mov_b32_e32 v8, s40 +; GCN-HSA-NEXT: v_mov_b32_e32 v9, s41 +; GCN-HSA-NEXT: s_add_u32 s40, s0, 0xc0 +; GCN-HSA-NEXT: v_mov_b32_e32 v29, s55 +; GCN-HSA-NEXT: s_addc_u32 s41, s1, 0 ; GCN-HSA-NEXT: flat_store_dwordx4 v[28:29], v[0:3] -; GCN-HSA-NEXT: flat_store_dwordx4 v[10:11], v[16:19] -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s20 -; GCN-HSA-NEXT: s_add_u32 s20, s0, 16 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s21 -; GCN-HSA-NEXT: s_addc_u32 s21, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v16, s20 -; GCN-HSA-NEXT: v_mov_b32_e32 v17, s21 -; GCN-HSA-NEXT: s_add_u32 s20, s0, 0xf0 ; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[12:15] -; GCN-HSA-NEXT: s_addc_u32 s21, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v14, s34 -; GCN-HSA-NEXT: v_mov_b32_e32 v18, s20 -; GCN-HSA-NEXT: v_mov_b32_e32 v22, s40 -; GCN-HSA-NEXT: v_mov_b32_e32 v23, s41 -; GCN-HSA-NEXT: v_mov_b32_e32 v15, s35 -; GCN-HSA-NEXT: v_mov_b32_e32 v6, s30 -; GCN-HSA-NEXT: v_mov_b32_e32 v7, s31 -; GCN-HSA-NEXT: v_mov_b32_e32 v19, s21 -; GCN-HSA-NEXT: s_add_u32 s20, s0, 0xe0 -; GCN-HSA-NEXT: flat_store_dwordx4 v[14:15], v[20:23] -; GCN-HSA-NEXT: s_addc_u32 s21, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v20, s20 -; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[4:7] -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s26 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s1 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s27 -; GCN-HSA-NEXT: v_mov_b32_e32 v21, s21 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s0 -; GCN-HSA-NEXT: s_add_u32 s20, s0, 0xd0 -; GCN-HSA-NEXT: v_mov_b32_e32 v8, s38 -; GCN-HSA-NEXT: v_mov_b32_e32 v9, s39 -; GCN-HSA-NEXT: v_mov_b32_e32 v10, s54 -; GCN-HSA-NEXT: v_mov_b32_e32 v11, s53 -; GCN-HSA-NEXT: v_mov_b32_e32 v12, s36 -; GCN-HSA-NEXT: v_mov_b32_e32 v13, s37 -; GCN-HSA-NEXT: v_mov_b32_e32 v14, s28 -; GCN-HSA-NEXT: v_mov_b32_e32 v15, s29 -; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[8:11] -; GCN-HSA-NEXT: flat_store_dwordx4 v[20:21], v[12:15] -; GCN-HSA-NEXT: s_addc_u32 s21, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s20 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s24 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s25 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s22 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s23 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s21 -; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GCN-HSA-NEXT: s_nop 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s26 +; GCN-HSA-NEXT: s_add_u32 s26, s0, 0x90 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s27 +; GCN-HSA-NEXT: s_addc_u32 s27, s1, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v24, s26 +; GCN-HSA-NEXT: v_mov_b32_e32 v25, s27 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s1 +; GCN-HSA-NEXT: s_add_u32 s26, s0, 0x80 +; GCN-HSA-NEXT: v_mov_b32_e32 v6, s46 +; GCN-HSA-NEXT: v_mov_b32_e32 v7, s47 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s0 +; GCN-HSA-NEXT: s_addc_u32 s27, s1, 0 +; GCN-HSA-NEXT: flat_store_dwordx4 v[2:3], v[4:7] +; GCN-HSA-NEXT: v_mov_b32_e32 v16, s44 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s24 +; GCN-HSA-NEXT: s_add_u32 s24, s0, 0x70 +; GCN-HSA-NEXT: v_mov_b32_e32 v17, s45 +; GCN-HSA-NEXT: v_mov_b32_e32 v18, s50 +; GCN-HSA-NEXT: v_mov_b32_e32 v19, s51 +; GCN-HSA-NEXT: v_mov_b32_e32 v10, s40 +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s25 +; GCN-HSA-NEXT: s_addc_u32 s25, s1, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v22, s52 +; GCN-HSA-NEXT: v_mov_b32_e32 v23, s53 +; GCN-HSA-NEXT: v_mov_b32_e32 v12, s42 +; GCN-HSA-NEXT: v_mov_b32_e32 v13, s43 +; GCN-HSA-NEXT: v_mov_b32_e32 v11, s41 +; GCN-HSA-NEXT: v_mov_b32_e32 v14, s48 +; GCN-HSA-NEXT: v_mov_b32_e32 v15, s49 +; GCN-HSA-NEXT: v_mov_b32_e32 v26, s26 +; GCN-HSA-NEXT: flat_store_dwordx4 v[8:9], v[16:19] +; GCN-HSA-NEXT: flat_store_dwordx4 v[10:11], v[20:23] +; GCN-HSA-NEXT: v_mov_b32_e32 v10, s18 +; GCN-HSA-NEXT: s_add_u32 s18, s0, 0x60 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s28 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s29 +; GCN-HSA-NEXT: v_mov_b32_e32 v27, s27 +; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[12:15] +; GCN-HSA-NEXT: v_mov_b32_e32 v11, s19 +; GCN-HSA-NEXT: v_mov_b32_e32 v12, s24 +; GCN-HSA-NEXT: s_addc_u32 s19, s1, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v14, s18 +; GCN-HSA-NEXT: v_mov_b32_e32 v8, s22 +; GCN-HSA-NEXT: v_mov_b32_e32 v6, s38 +; GCN-HSA-NEXT: v_mov_b32_e32 v7, s37 +; GCN-HSA-NEXT: v_mov_b32_e32 v9, s23 +; GCN-HSA-NEXT: v_mov_b32_e32 v13, s25 +; GCN-HSA-NEXT: v_mov_b32_e32 v15, s19 +; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[0:3] +; GCN-HSA-NEXT: flat_store_dwordx4 v[12:13], v[4:7] +; GCN-HSA-NEXT: flat_store_dwordx4 v[14:15], v[8:11] ; GCN-HSA-NEXT: v_mov_b32_e32 v0, s16 -; GCN-HSA-NEXT: s_add_u32 s16, s0, 0xc0 +; GCN-HSA-NEXT: s_add_u32 s16, s0, 48 ; GCN-HSA-NEXT: v_mov_b32_e32 v1, s17 ; GCN-HSA-NEXT: s_addc_u32 s17, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s16 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s18 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s19 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s34 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s33 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s17 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: s_nop 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s8 +; GCN-HSA-NEXT: s_add_u32 s8, s0, 32 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s9 +; GCN-HSA-NEXT: s_addc_u32 s9, s1, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s8 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s10 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s11 +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s9 +; GCN-HSA-NEXT: s_add_u32 s8, s0, 0xf0 +; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GCN-HSA-NEXT: s_addc_u32 s9, s1, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s8 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s20 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s21 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s36 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s35 +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s9 +; GCN-HSA-NEXT: s_add_u32 s8, s0, 0xe0 +; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; GCN-HSA-NEXT: s_addc_u32 s9, s1, 0 +; GCN-HSA-NEXT: v_mov_b32_e32 v4, s8 ; GCN-HSA-NEXT: v_mov_b32_e32 v0, s14 -; GCN-HSA-NEXT: s_add_u32 s14, s0, 0xb0 ; GCN-HSA-NEXT: v_mov_b32_e32 v1, s15 -; GCN-HSA-NEXT: s_addc_u32 s15, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s14 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s42 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s33 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s15 -; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GCN-HSA-NEXT: s_nop 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s10 -; GCN-HSA-NEXT: s_add_u32 s10, s0, 0xa0 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s11 -; GCN-HSA-NEXT: s_addc_u32 s11, s1, 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v4, s10 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s12 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s13 -; GCN-HSA-NEXT: v_mov_b32_e32 v5, s11 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s12 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s13 +; GCN-HSA-NEXT: v_mov_b32_e32 v5, s9 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: s_nop 0 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s6 -; GCN-HSA-NEXT: s_add_u32 s6, s0, 0x90 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s7 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s6 +; GCN-HSA-NEXT: s_add_u32 s6, s0, 0xb0 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s7 ; GCN-HSA-NEXT: s_addc_u32 s7, s1, 0 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s6 -; GCN-HSA-NEXT: s_add_u32 s0, s0, 0x80 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s8 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s9 +; GCN-HSA-NEXT: s_add_u32 s0, s0, 0xa0 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s31 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s30 ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s7 ; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: v_mov_b32_e32 v5, s1 -; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2 -; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3 -; GCN-HSA-NEXT: v_mov_b32_e32 v2, s4 -; GCN-HSA-NEXT: v_mov_b32_e32 v3, s5 +; GCN-HSA-NEXT: v_mov_b32_e32 v0, s4 +; GCN-HSA-NEXT: v_mov_b32_e32 v1, s5 +; GCN-HSA-NEXT: v_mov_b32_e32 v2, s2 +; GCN-HSA-NEXT: v_mov_b32_e32 v3, s3 ; GCN-HSA-NEXT: v_mov_b32_e32 v4, s0 ; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GCN-HSA-NEXT: s_endpgm @@ -8584,155 +8584,155 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i64(ptr addrspace(1) %out ; GCN-NOHSA-VI-NEXT: s_mov_b32 s0, s4 ; GCN-NOHSA-VI-NEXT: s_mov_b32 s1, s5 ; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(1) -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s9, v3 -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s8, v2 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s30, s9, 16 -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s11, v1 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s38, s9, 8 -; GCN-NOHSA-VI-NEXT: s_mov_b32 s40, s9 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s46, s8, 8 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s71, s9, 31 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s72, s9, 24 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000 -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s10, v0 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s50, s11, 16 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[28:29], s[8:9], 0x80000 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x80000 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x80000 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s72 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s71 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s30 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s31 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s4, v2 ; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(0) -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s13, v7 -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s12, v6 -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s15, v5 -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s14, v4 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s52, s11, 8 -; GCN-NOHSA-VI-NEXT: s_mov_b32 s54, s11 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s69, s11, 31 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s70, s11, 24 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x80000 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v4, s40 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v5, s41 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v6, s38 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v7, s39 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:240 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:224 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s28 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s29 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s46 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s47 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s9, v7 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s11, v5 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s5, v3 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s6, v0 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s10, v4 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s38, s4, 16 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s40, s4, 24 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s42, s4, 8 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s60, s9, 8 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s39, s11, 24 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s7, v1 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s8, v6 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s48, s6, 8 ; GCN-NOHSA-VI-NEXT: s_lshr_b32 s56, s10, 16 ; GCN-NOHSA-VI-NEXT: s_lshr_b32 s58, s10, 24 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[54:55], s[54:55], 0x80000 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s36, s10, 8 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s62, s11, 16 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s64, s11, 8 +; GCN-NOHSA-VI-NEXT: s_mov_b32 s66, s11 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[20:21], s[10:11], 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[68:69], s[4:5], 0x80000 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s33, s11, 31 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[10:11], s[60:61], 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[60:61], s[38:39], 0x80000 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s50, s8, 16 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s52, s8, 24 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[34:35], s[6:7], 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s68 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s69 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v4, s60 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v5, s61 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v6, s40 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v7, s41 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s42 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s43 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s54, s8, 8 ; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[52:53], s[52:53], 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x80000 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:208 ; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:192 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s60, s10, 8 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[28:29], s[8:9], 0x80000 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s34 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s35 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s48 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s49 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[54:55], s[54:55], 0x80000 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:128 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[58:59], s[58:59], 0x80000 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s50 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s51 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s70 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s69 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[58:59], s[58:59], 0x80000 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[56:57], s[56:57], 0x80000 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:176 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s48, s13, 16 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s54 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s55 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s52 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s53 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[18:19], s[10:11], 0x80000 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[60:61], s[60:61], 0x80000 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:160 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s34, s13, 8 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[56:57], s[56:57], 0x80000 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s30, s5, 16 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s28 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s29 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s54 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s55 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s24, s5, 8 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s56 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s57 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s58 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s59 -; GCN-NOHSA-VI-NEXT: s_mov_b32 s36, s13 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s65, s13, 31 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s67, s13, 24 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:144 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s26, s12, 16 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s18 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s19 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s60 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s61 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s24, s12, 24 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:128 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s22, s12, 8 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s48 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s49 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s67 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s65 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 +; GCN-NOHSA-VI-NEXT: s_mov_b32 s26, s5 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s71, s5, 31 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s72, s5, 24 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s22, s7, 16 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s20 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s21 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s36 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s37 ; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s20, s15, 16 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s36 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s37 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s34 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s35 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[6:7], s[12:13], 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s16, s7, 8 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s30 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s31 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s72 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s71 +; GCN-NOHSA-VI-NEXT: s_mov_b32 s18, s7 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s67, s7, 31 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s70, s7, 24 ; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s62, s15, 8 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:240 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s14, s9, 16 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s26 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s27 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s24 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s25 -; GCN-NOHSA-VI-NEXT: s_mov_b32 s16, s15 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s33, s15, 31 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s63, s15, 24 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s64, s14, 16 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s6 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s7 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s22 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s23 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s66, s14, 24 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s68, s14, 8 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[4:5], s[14:15], 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000 ; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[14:15], s[62:63], 0x80000 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s42, s8, 16 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s20 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s21 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s63 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s33 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s44, s8, 24 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[10:11], s[66:67], 0x80000 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[12:13], s[64:65], 0x80000 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[8:9], s[68:69], 0x80000 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s16 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s17 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s14 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s15 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x80000 -; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v8, s42 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:224 +; GCN-NOHSA-VI-NEXT: s_mov_b32 s12, s9 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s22 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s23 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s70 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s67 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s63, s9, 31 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s65, s9, 24 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:176 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s18 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s19 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s16 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s17 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:160 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s44, s6, 16 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s14 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s15 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s65 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s63 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s46, s6, 24 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[8:9], s[62:63], 0x80000 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[6:7], s[66:67], 0x80000 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s12 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s13 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s10 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s11 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v9, s43 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v10, s44 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v11, s45 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:208 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s4 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s5 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s8 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s9 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[4:5], s[64:65], 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x80000 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v8, s44 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s8 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s9 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s39 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s33 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v9, s45 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v10, s46 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v11, s47 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:144 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s6 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s7 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s4 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s5 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32 ; GCN-NOHSA-VI-NEXT: s_endpgm ; ; EG-LABEL: global_sextload_v32i8_to_v32i64: @@ -9887,46 +9887,97 @@ define amdgpu_kernel void @global_zextload_v4i8_to_v4i16(ptr addrspace(1) %out, ; ; EG-LABEL: global_zextload_v4i8_to_v4i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] +; EG-NEXT: ALU 1, @8, KC0[CB0:0-32], KC1[] ; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 6, @9, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T4.XY, T5.X, 1 +; EG-NEXT: ALU 31, @10, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T8.XY, T7.X, 1 ; EG-NEXT: CF_END ; EG-NEXT: PAD ; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_32 T4.X, T4.X, 0, #1 +; EG-NEXT: VTX_READ_32 T7.X, T7.X, 0, #1 ; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: MOV * T4.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 9: +; EG-NEXT: MOV * T0.Y, T4.X, +; EG-NEXT: MOV * T7.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 10: +; EG-NEXT: AND_INT T0.W, T7.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 255(3.573311e-43), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T0.W, T7.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, ; EG-NEXT: MOV * T0.W, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT * T4.Y, T4.X, literal.x, PV.W, +; EG-NEXT: BFE_UINT T0.W, T7.X, literal.x, PV.W, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 16(2.242078e-44), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T7.X, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: AND_INT T4.X, T4.X, literal.x, -; EG-NEXT: LSHR * T5.X, KC0[2].Y, literal.y, -; EG-NEXT: 255(3.573311e-43), 2(2.802597e-45) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: LSHR T7.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T8.Y, PV.W, PS, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T5.X, PV.Y, +; EG-NEXT: MOV * T8.X, T4.X, ; ; CM-LABEL: global_zextload_v4i8_to_v4i16: ; CM: ; %bb.0: -; CM-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] +; CM-NEXT: ALU 1, @8, KC0[CB0:0-32], KC1[] ; CM-NEXT: TEX 0 @6 -; CM-NEXT: ALU 7, @9, KC0[CB0:0-32], KC1[] -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T4, T5.X +; CM-NEXT: ALU 31, @10, KC0[CB0:0-32], KC1[] +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T8, T7.X ; CM-NEXT: CF_END ; CM-NEXT: PAD ; CM-NEXT: Fetch clause starting at 6: -; CM-NEXT: VTX_READ_32 T4.X, T4.X, 0, #1 +; CM-NEXT: VTX_READ_32 T7.X, T7.X, 0, #1 ; CM-NEXT: ALU clause starting at 8: -; CM-NEXT: MOV * T4.X, KC0[2].Z, -; CM-NEXT: ALU clause starting at 9: +; CM-NEXT: MOV * T0.Y, T4.X, +; CM-NEXT: MOV * T7.X, KC0[2].Z, +; CM-NEXT: ALU clause starting at 10: +; CM-NEXT: AND_INT T0.Z, T7.X, literal.x, +; CM-NEXT: AND_INT * T0.W, T0.Y, literal.y, +; CM-NEXT: 255(3.573311e-43), -65536(nan) +; CM-NEXT: OR_INT * T0.W, PV.W, PV.Z, +; CM-NEXT: MOV * T4.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHL * T0.W, T7.X, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T4.X, PV.W, +; CM-NEXT: MOV T0.Y, T5.X, ; CM-NEXT: MOV * T0.W, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: BFE_UINT * T4.Y, T4.X, literal.x, PV.W, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: BFE_UINT * T0.W, T7.X, literal.y, PV.W, +; CM-NEXT: -65536(nan), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T5.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T7.X, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: AND_INT * T4.X, T4.X, literal.x, -; CM-NEXT: 255(3.573311e-43), 0(0.000000e+00) -; CM-NEXT: LSHR * T5.X, KC0[2].Y, literal.x, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: LSHR T7.X, KC0[2].Y, literal.x, +; CM-NEXT: OR_INT * T8.Y, PV.Z, PV.W, ; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; CM-NEXT: MOV * T5.X, PV.Y, +; CM-NEXT: MOV * T8.X, T4.X, %load = load <4 x i8>, ptr addrspace(1) %in %ext = zext <4 x i8> %load to <4 x i16> store <4 x i16> %ext, ptr addrspace(1) %out @@ -10017,43 +10068,109 @@ define amdgpu_kernel void @global_sextload_v4i8_to_v4i16(ptr addrspace(1) %out, ; ; EG-LABEL: global_sextload_v4i8_to_v4i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] +; EG-NEXT: ALU 1, @8, KC0[CB0:0-32], KC1[] ; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 5, @9, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T5.XY, T4.X, 1 +; EG-NEXT: ALU 37, @10, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T8.XY, T7.X, 1 ; EG-NEXT: CF_END ; EG-NEXT: PAD ; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_32 T4.X, T4.X, 0, #1 +; EG-NEXT: VTX_READ_32 T7.X, T7.X, 0, #1 ; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: MOV * T4.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 9: -; EG-NEXT: BFE_INT T5.X, T4.X, 0.0, literal.x, -; EG-NEXT: LSHR T0.W, T4.X, literal.x, -; EG-NEXT: LSHR * T4.X, KC0[2].Y, literal.y, -; EG-NEXT: 8(1.121039e-44), 2(2.802597e-45) -; EG-NEXT: BFE_INT * T5.Y, PV.W, 0.0, literal.x, +; EG-NEXT: MOV * T0.Y, T4.X, +; EG-NEXT: MOV * T7.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 10: +; EG-NEXT: BFE_INT * T0.W, T7.X, 0.0, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T0.W, PV.W, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 65535(9.183409e-41), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T7.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, +; EG-NEXT: LSHR * T0.W, T7.X, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T7.X, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: LSHR T7.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T8.Y, PV.W, PS, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T5.X, PV.Y, +; EG-NEXT: MOV * T8.X, T4.X, ; ; CM-LABEL: global_sextload_v4i8_to_v4i16: ; CM: ; %bb.0: -; CM-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] +; CM-NEXT: ALU 1, @8, KC0[CB0:0-32], KC1[] ; CM-NEXT: TEX 0 @6 -; CM-NEXT: ALU 5, @9, KC0[CB0:0-32], KC1[] -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T5, T4.X +; CM-NEXT: ALU 37, @10, KC0[CB0:0-32], KC1[] +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T8, T7.X ; CM-NEXT: CF_END ; CM-NEXT: PAD ; CM-NEXT: Fetch clause starting at 6: -; CM-NEXT: VTX_READ_32 T4.X, T4.X, 0, #1 +; CM-NEXT: VTX_READ_32 T7.X, T7.X, 0, #1 ; CM-NEXT: ALU clause starting at 8: -; CM-NEXT: MOV * T4.X, KC0[2].Z, -; CM-NEXT: ALU clause starting at 9: -; CM-NEXT: BFE_INT T5.X, T4.X, 0.0, literal.x, -; CM-NEXT: LSHR * T0.W, T4.X, literal.x, +; CM-NEXT: MOV * T0.Y, T4.X, +; CM-NEXT: MOV * T7.X, KC0[2].Z, +; CM-NEXT: ALU clause starting at 10: +; CM-NEXT: BFE_INT * T0.W, T7.X, 0.0, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: LSHR T4.X, KC0[2].Y, literal.x, -; CM-NEXT: BFE_INT * T5.Y, PV.W, 0.0, literal.y, -; CM-NEXT: 2(2.802597e-45), 8(1.121039e-44) +; CM-NEXT: AND_INT T0.Z, PV.W, literal.x, +; CM-NEXT: AND_INT * T0.W, T0.Y, literal.y, +; CM-NEXT: 65535(9.183409e-41), -65536(nan) +; CM-NEXT: OR_INT * T0.W, PV.W, PV.Z, +; CM-NEXT: MOV * T4.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T7.X, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T4.X, PV.W, +; CM-NEXT: MOV T0.Y, T5.X, +; CM-NEXT: LSHR * T0.W, T7.X, literal.x, BS:VEC_120/SCL_212 +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T5.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: ASHR * T0.W, T7.X, literal.x, +; CM-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: LSHR T7.X, KC0[2].Y, literal.x, +; CM-NEXT: OR_INT * T8.Y, PV.Z, PV.W, +; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; CM-NEXT: MOV * T5.X, PV.Y, +; CM-NEXT: MOV * T8.X, T4.X, %load = load <4 x i8>, ptr addrspace(1) %in %ext = sext <4 x i8> %load to <4 x i16> store <4 x i16> %ext, ptr addrspace(1) %out @@ -10158,52 +10275,156 @@ define amdgpu_kernel void @global_zextload_v8i8_to_v8i16(ptr addrspace(1) %out, ; ; EG-LABEL: global_zextload_v8i8_to_v8i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] +; EG-NEXT: ALU 1, @8, KC0[CB0:0-32], KC1[] ; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 9, @9, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T6.XYZW, T5.X, 1 +; EG-NEXT: ALU 61, @10, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T12.XYZW, T11.X, 1 ; EG-NEXT: CF_END ; EG-NEXT: PAD ; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_64 T5.XY, T5.X, 0, #1 +; EG-NEXT: VTX_READ_64 T11.XY, T11.X, 0, #1 ; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: MOV * T5.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 9: +; EG-NEXT: MOV * T0.Y, T8.X, +; EG-NEXT: MOV * T11.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 10: +; EG-NEXT: AND_INT T0.W, T11.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 255(3.573311e-43), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T8.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T0.W, T11.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T8.X, PV.W, +; EG-NEXT: MOV T0.Y, T9.X, ; EG-NEXT: MOV * T0.W, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT * T6.W, T5.Y, literal.x, PV.W, +; EG-NEXT: BFE_UINT T1.W, T11.X, literal.x, PV.W, +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.y, +; EG-NEXT: 16(2.242078e-44), -65536(nan) +; EG-NEXT: OR_INT * T1.W, PS, PV.W, +; EG-NEXT: MOV * T9.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T11.X, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT T6.Y, T5.X, literal.x, T0.W, -; EG-NEXT: AND_INT * T6.Z, T5.Y, literal.y, -; EG-NEXT: 8(1.121039e-44), 255(3.573311e-43) -; EG-NEXT: AND_INT T6.X, T5.X, literal.x, -; EG-NEXT: LSHR * T5.X, KC0[2].Y, literal.y, -; EG-NEXT: 255(3.573311e-43), 2(2.802597e-45) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T12.Y, PV.W, PS, +; EG-NEXT: MOV T9.X, PV.Y, +; EG-NEXT: MOV * T0.Y, T4.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T11.Y, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T11.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, +; EG-NEXT: BFE_UINT * T0.W, T11.Y, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, PV.W, T0.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T11.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: LSHR T11.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T12.W, PV.W, PS, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV * T12.X, T8.X, +; EG-NEXT: MOV * T12.Z, T4.X, ; ; CM-LABEL: global_zextload_v8i8_to_v8i16: ; CM: ; %bb.0: -; CM-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] +; CM-NEXT: ALU 1, @8, KC0[CB0:0-32], KC1[] ; CM-NEXT: TEX 0 @6 -; CM-NEXT: ALU 10, @9, KC0[CB0:0-32], KC1[] -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T6, T5.X +; CM-NEXT: ALU 60, @10, KC0[CB0:0-32], KC1[] +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T12, T11.X ; CM-NEXT: CF_END ; CM-NEXT: PAD ; CM-NEXT: Fetch clause starting at 6: -; CM-NEXT: VTX_READ_64 T5.XY, T5.X, 0, #1 +; CM-NEXT: VTX_READ_64 T11.XY, T11.X, 0, #1 ; CM-NEXT: ALU clause starting at 8: -; CM-NEXT: MOV * T5.X, KC0[2].Z, -; CM-NEXT: ALU clause starting at 9: +; CM-NEXT: MOV * T0.Y, T8.X, +; CM-NEXT: MOV * T11.X, KC0[2].Z, +; CM-NEXT: ALU clause starting at 10: +; CM-NEXT: AND_INT T0.Z, T11.X, literal.x, +; CM-NEXT: AND_INT * T0.W, T0.Y, literal.y, +; CM-NEXT: 255(3.573311e-43), -65536(nan) +; CM-NEXT: OR_INT * T0.W, PV.W, PV.Z, +; CM-NEXT: MOV * T8.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHL * T0.W, T11.X, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T8.X, PV.W, +; CM-NEXT: MOV T0.Y, T9.X, ; CM-NEXT: MOV * T0.W, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: BFE_UINT * T6.W, T5.Y, literal.x, PV.W, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: BFE_UINT * T1.W, T11.X, literal.y, PV.W, +; CM-NEXT: -65536(nan), 16(2.242078e-44) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T9.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T1.W, T11.X, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: BFE_UINT T6.Y, T5.X, literal.x, T0.W, -; CM-NEXT: AND_INT * T6.Z, T5.Y, literal.y, -; CM-NEXT: 8(1.121039e-44), 255(3.573311e-43) -; CM-NEXT: AND_INT * T6.X, T5.X, literal.x, -; CM-NEXT: 255(3.573311e-43), 0(0.000000e+00) -; CM-NEXT: LSHR * T5.X, KC0[2].Y, literal.x, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T12.Y, PV.Z, PV.W, +; CM-NEXT: MOV T9.X, PV.Y, +; CM-NEXT: MOV * T0.Y, T4.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, T11.Y, literal.y, +; CM-NEXT: -65536(nan), 255(3.573311e-43) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T4.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHL * T1.W, T11.Y, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV T4.X, PV.W, +; CM-NEXT: MOV * T0.Y, T5.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: BFE_UINT * T0.W, T11.Y, literal.y, T0.W, +; CM-NEXT: -65536(nan), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T5.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T11.Y, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: LSHR T11.X, KC0[2].Y, literal.x, +; CM-NEXT: OR_INT * T12.W, PV.Z, PV.W, ; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; CM-NEXT: MOV * T5.X, PV.W, +; CM-NEXT: MOV T12.X, T8.X, +; CM-NEXT: MOV * T12.Z, T4.X, BS:VEC_120/SCL_212 %load = load <8 x i8>, ptr addrspace(1) %in %ext = zext <8 x i8> %load to <8 x i16> store <8 x i16> %ext, ptr addrspace(1) %out @@ -10309,33 +10530,34 @@ define amdgpu_kernel void @global_sextload_v8i8_to_v8i16(ptr addrspace(1) %out, ; GCN-NOHSA-VI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0 ; GCN-NOHSA-VI-NEXT: s_mov_b32 s0, s4 ; GCN-NOHSA-VI-NEXT: s_mov_b32 s1, s5 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, 0xffff ; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(0) ; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s4, v0 ; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s5, v1 ; GCN-NOHSA-VI-NEXT: s_lshr_b32 s6, s4, 16 ; GCN-NOHSA-VI-NEXT: s_lshr_b32 s7, s5, 16 ; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s8, s5 +; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s11, s4 ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s9, s5, 0x80000 -; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s10, s4 +; GCN-NOHSA-VI-NEXT: s_bfe_i32 s10, s4, 0x80000 ; GCN-NOHSA-VI-NEXT: s_ashr_i32 s5, s5, 24 ; GCN-NOHSA-VI-NEXT: s_ashr_i32 s4, s4, 24 ; GCN-NOHSA-VI-NEXT: s_lshl_b32 s8, s8, 8 +; GCN-NOHSA-VI-NEXT: s_lshl_b32 s11, s11, 8 ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s7, s7, 0x80000 ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s6, s6, 0x80000 ; GCN-NOHSA-VI-NEXT: s_and_b32 s9, 0xffff, s9 -; GCN-NOHSA-VI-NEXT: s_lshl_b32 s10, s10, 8 +; GCN-NOHSA-VI-NEXT: s_and_b32 s10, 0xffff, s10 ; GCN-NOHSA-VI-NEXT: s_lshl_b32 s5, s5, 16 ; GCN-NOHSA-VI-NEXT: s_lshl_b32 s4, s4, 16 ; GCN-NOHSA-VI-NEXT: s_and_b32 s8, s8, 0xffff0000 +; GCN-NOHSA-VI-NEXT: s_and_b32 s11, s11, 0xffff0000 ; GCN-NOHSA-VI-NEXT: s_and_b32 s7, 0xffff, s7 ; GCN-NOHSA-VI-NEXT: s_and_b32 s6, 0xffff, s6 -; GCN-NOHSA-VI-NEXT: v_and_b32_sdwa v0, v2, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GCN-NOHSA-VI-NEXT: s_and_b32 s10, s10, 0xffff0000 ; GCN-NOHSA-VI-NEXT: s_or_b32 s8, s9, s8 +; GCN-NOHSA-VI-NEXT: s_or_b32 s9, s10, s11 ; GCN-NOHSA-VI-NEXT: s_or_b32 s5, s7, s5 ; GCN-NOHSA-VI-NEXT: s_or_b32 s4, s6, s4 -; GCN-NOHSA-VI-NEXT: v_or_b32_e32 v0, s10, v0 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s9 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s4 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s8 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s5 @@ -10344,53 +10566,183 @@ define amdgpu_kernel void @global_sextload_v8i8_to_v8i16(ptr addrspace(1) %out, ; ; EG-LABEL: global_sextload_v8i8_to_v8i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] +; EG-NEXT: ALU 1, @8, KC0[CB0:0-32], KC1[] ; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 10, @9, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T6.XYZW, T5.X, 1 +; EG-NEXT: ALU 74, @10, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T12.XYZW, T11.X, 1 ; EG-NEXT: CF_END ; EG-NEXT: PAD ; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_64 T5.XY, T5.X, 0, #1 +; EG-NEXT: VTX_READ_64 T11.XY, T11.X, 0, #1 ; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: MOV * T5.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 9: -; EG-NEXT: BFE_INT * T6.Z, T5.Y, 0.0, literal.x, +; EG-NEXT: MOV * T0.Y, T8.X, +; EG-NEXT: MOV * T11.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 10: +; EG-NEXT: BFE_INT * T0.W, T11.X, 0.0, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_INT T6.X, T5.X, 0.0, literal.x, -; EG-NEXT: LSHR * T0.W, T5.Y, literal.x, +; EG-NEXT: AND_INT T0.W, PV.W, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 65535(9.183409e-41), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T8.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T11.X, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_INT T6.W, PV.W, 0.0, literal.x, -; EG-NEXT: LSHR * T0.W, T5.X, literal.x, +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T8.X, PV.W, +; EG-NEXT: MOV T0.Y, T9.X, +; EG-NEXT: LSHR * T0.W, T11.X, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T9.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T11.X, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T12.Y, PV.W, PS, +; EG-NEXT: MOV T9.X, PV.Y, +; EG-NEXT: MOV T0.Y, T4.X, +; EG-NEXT: BFE_INT * T0.W, T11.Y, 0.0, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: LSHR T5.X, KC0[2].Y, literal.x, -; EG-NEXT: BFE_INT * T6.Y, PS, 0.0, literal.y, -; EG-NEXT: 2(2.802597e-45), 8(1.121039e-44) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T11.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, +; EG-NEXT: LSHR * T0.W, T11.Y, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T11.Y, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: LSHR T11.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T12.W, PV.W, PS, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV * T12.X, T8.X, +; EG-NEXT: MOV * T12.Z, T4.X, ; ; CM-LABEL: global_sextload_v8i8_to_v8i16: ; CM: ; %bb.0: -; CM-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] +; CM-NEXT: ALU 1, @8, KC0[CB0:0-32], KC1[] ; CM-NEXT: TEX 0 @6 -; CM-NEXT: ALU 10, @9, KC0[CB0:0-32], KC1[] -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T6, T5.X +; CM-NEXT: ALU 74, @10, KC0[CB0:0-32], KC1[] +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T12, T11.X ; CM-NEXT: CF_END ; CM-NEXT: PAD ; CM-NEXT: Fetch clause starting at 6: -; CM-NEXT: VTX_READ_64 T5.XY, T5.X, 0, #1 +; CM-NEXT: VTX_READ_64 T11.XY, T11.X, 0, #1 ; CM-NEXT: ALU clause starting at 8: -; CM-NEXT: MOV * T5.X, KC0[2].Z, -; CM-NEXT: ALU clause starting at 9: -; CM-NEXT: BFE_INT * T6.Z, T5.Y, 0.0, literal.x, +; CM-NEXT: MOV * T0.Y, T8.X, +; CM-NEXT: MOV * T11.X, KC0[2].Z, +; CM-NEXT: ALU clause starting at 10: +; CM-NEXT: BFE_INT * T0.W, T11.X, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.W, literal.x, +; CM-NEXT: AND_INT * T0.W, T0.Y, literal.y, +; CM-NEXT: 65535(9.183409e-41), -65536(nan) +; CM-NEXT: OR_INT * T0.W, PV.W, PV.Z, +; CM-NEXT: MOV * T8.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T11.X, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: BFE_INT T6.X, T5.X, 0.0, literal.x, -; CM-NEXT: LSHR * T0.W, T5.Y, literal.x, +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: LSHR T0.Z, T5.X, literal.x, -; CM-NEXT: BFE_INT * T6.W, PV.W, 0.0, literal.x, +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T8.X, PV.W, +; CM-NEXT: MOV T0.Y, T9.X, +; CM-NEXT: LSHR * T0.W, T11.X, literal.x, BS:VEC_120/SCL_212 +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: LSHR T5.X, KC0[2].Y, literal.x, -; CM-NEXT: BFE_INT * T6.Y, PV.Z, 0.0, literal.y, -; CM-NEXT: 2(2.802597e-45), 8(1.121039e-44) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T9.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: ASHR * T0.W, T11.X, literal.x, +; CM-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T12.Y, PV.Z, PV.W, +; CM-NEXT: MOV T9.X, PV.Y, +; CM-NEXT: MOV T0.Y, T4.X, +; CM-NEXT: BFE_INT * T0.W, T11.Y, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T4.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T11.Y, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T4.X, PV.W, +; CM-NEXT: MOV T0.Y, T5.X, +; CM-NEXT: LSHR * T0.W, T11.Y, literal.x, +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T5.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: ASHR * T0.W, T11.Y, literal.x, +; CM-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: LSHR T11.X, KC0[2].Y, literal.x, +; CM-NEXT: OR_INT * T12.W, PV.Z, PV.W, +; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; CM-NEXT: MOV * T5.X, PV.W, +; CM-NEXT: MOV T12.X, T8.X, +; CM-NEXT: MOV * T12.Z, T4.X, BS:VEC_120/SCL_212 %load = load <8 x i8>, ptr addrspace(1) %in %ext = sext <8 x i8> %load to <8 x i16> store <8 x i16> %ext, ptr addrspace(1) %out @@ -10547,71 +10899,287 @@ define amdgpu_kernel void @global_zextload_v16i8_to_v16i16(ptr addrspace(1) %out ; ; EG-LABEL: global_zextload_v16i8_to_v16i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] -; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T9.XYZW, T10.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T8.XYZW, T7.X, 1 +; EG-NEXT: ALU 1, @10, KC0[CB0:0-32], KC1[] +; EG-NEXT: TEX 0 @8 +; EG-NEXT: ALU 103, @12, KC0[], KC1[] +; EG-NEXT: ALU 20, @116, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T20.XYZW, T22.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T19.XYZW, T21.X, 1 ; EG-NEXT: CF_END -; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_128 T7.XYZW, T7.X, 0, #1 -; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: MOV * T7.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 9: +; EG-NEXT: PAD +; EG-NEXT: Fetch clause starting at 8: +; EG-NEXT: VTX_READ_128 T19.XYZW, T19.X, 0, #1 +; EG-NEXT: ALU clause starting at 10: +; EG-NEXT: MOV * T0.Y, T16.X, +; EG-NEXT: MOV * T19.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 12: +; EG-NEXT: AND_INT T0.W, T19.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 255(3.573311e-43), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T16.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T0.W, T19.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T16.X, PV.W, +; EG-NEXT: MOV T0.Y, T17.X, ; EG-NEXT: MOV * T0.W, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT * T8.W, T7.Y, literal.x, PV.W, +; EG-NEXT: BFE_UINT T1.W, T19.X, literal.x, PV.W, +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.y, +; EG-NEXT: 16(2.242078e-44), -65536(nan) +; EG-NEXT: OR_INT * T1.W, PS, PV.W, +; EG-NEXT: MOV * T17.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T19.X, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT T8.Y, T7.X, literal.x, T0.W, -; EG-NEXT: AND_INT T8.Z, T7.Y, literal.y, -; EG-NEXT: BFE_UINT * T9.W, T7.W, literal.x, T0.W, -; EG-NEXT: 8(1.121039e-44), 255(3.573311e-43) -; EG-NEXT: AND_INT T8.X, T7.X, literal.x, -; EG-NEXT: BFE_UINT T9.Y, T7.Z, literal.y, T0.W, -; EG-NEXT: LSHR * T7.X, KC0[2].Y, literal.z, -; EG-NEXT: 255(3.573311e-43), 8(1.121039e-44) -; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) -; EG-NEXT: AND_INT * T9.Z, T7.W, literal.x, -; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00) -; EG-NEXT: AND_INT T9.X, T7.Z, literal.x, -; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.y, -; EG-NEXT: 255(3.573311e-43), 16(2.242078e-44) -; EG-NEXT: LSHR * T10.X, PV.W, literal.x, +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T20.Y, PV.W, PS, +; EG-NEXT: MOV T17.X, PV.Y, +; EG-NEXT: MOV * T0.Y, T12.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T19.Y, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T12.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T19.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T12.X, PV.W, +; EG-NEXT: MOV T0.Y, T13.X, +; EG-NEXT: BFE_UINT * T1.W, T19.Y, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T13.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T19.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T20.W, PV.W, PS, +; EG-NEXT: MOV T13.X, PV.W, +; EG-NEXT: MOV * T0.Y, T8.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T19.Z, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T8.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T19.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T8.X, PV.W, +; EG-NEXT: MOV T0.Y, T9.X, +; EG-NEXT: BFE_UINT * T1.W, T19.Z, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T9.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T19.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T19.Y, PV.W, PS, +; EG-NEXT: MOV T9.X, PV.Y, +; EG-NEXT: MOV * T0.Y, T4.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T19.W, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T19.W, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, +; EG-NEXT: BFE_UINT * T0.W, T19.W, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: ALU clause starting at 116: +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, PV.W, T0.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR T0.W, T19.W, literal.x, +; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 16(2.242078e-44) +; EG-NEXT: LSHR T21.X, PS, literal.x, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.y, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.z, +; EG-NEXT: 2(2.802597e-45), 65535(9.183409e-41) +; EG-NEXT: 16711680(2.341805e-38), 0(0.000000e+00) +; EG-NEXT: LSHR T22.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T19.W, PV.W, PS, ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV * T20.X, T16.X, +; EG-NEXT: MOV * T20.Z, T12.X, +; EG-NEXT: MOV T19.X, T8.X, +; EG-NEXT: MOV * T19.Z, T4.X, BS:VEC_120/SCL_212 ; ; CM-LABEL: global_zextload_v16i8_to_v16i16: ; CM: ; %bb.0: -; CM-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] -; CM-NEXT: TEX 0 @6 -; CM-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[] -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T9, T7.X -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T8, T10.X +; CM-NEXT: ALU 1, @10, KC0[CB0:0-32], KC1[] +; CM-NEXT: TEX 0 @8 +; CM-NEXT: ALU 101, @12, KC0[], KC1[] +; CM-NEXT: ALU 20, @114, KC0[CB0:0-32], KC1[] +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T19, T22.X +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T20, T21.X ; CM-NEXT: CF_END -; CM-NEXT: Fetch clause starting at 6: -; CM-NEXT: VTX_READ_128 T7.XYZW, T7.X, 0, #1 -; CM-NEXT: ALU clause starting at 8: -; CM-NEXT: MOV * T7.X, KC0[2].Z, -; CM-NEXT: ALU clause starting at 9: +; CM-NEXT: PAD +; CM-NEXT: Fetch clause starting at 8: +; CM-NEXT: VTX_READ_128 T19.XYZW, T19.X, 0, #1 +; CM-NEXT: ALU clause starting at 10: +; CM-NEXT: MOV * T0.Y, T16.X, +; CM-NEXT: MOV * T19.X, KC0[2].Z, +; CM-NEXT: ALU clause starting at 12: +; CM-NEXT: AND_INT T0.Z, T19.X, literal.x, +; CM-NEXT: AND_INT * T0.W, T0.Y, literal.y, +; CM-NEXT: 255(3.573311e-43), -65536(nan) +; CM-NEXT: OR_INT * T0.W, PV.W, PV.Z, +; CM-NEXT: MOV * T16.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHL * T0.W, T19.X, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T16.X, PV.W, +; CM-NEXT: MOV T0.Y, T17.X, ; CM-NEXT: MOV * T0.W, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: BFE_UINT * T8.W, T7.W, literal.x, PV.W, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: BFE_UINT * T1.W, T19.X, literal.y, PV.W, +; CM-NEXT: -65536(nan), 16(2.242078e-44) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T17.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T1.W, T19.X, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: BFE_UINT T8.Y, T7.Z, literal.x, T0.W, -; CM-NEXT: AND_INT T8.Z, T7.W, literal.y, -; CM-NEXT: BFE_UINT * T9.W, T7.Y, literal.x, T0.W, -; CM-NEXT: 8(1.121039e-44), 255(3.573311e-43) -; CM-NEXT: AND_INT T8.X, T7.Z, literal.x, -; CM-NEXT: BFE_UINT T9.Y, T7.X, literal.y, T0.W, -; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.z, -; CM-NEXT: 255(3.573311e-43), 8(1.121039e-44) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T20.Y, PV.Z, PV.W, +; CM-NEXT: MOV T17.X, PV.Y, +; CM-NEXT: MOV * T0.Y, T12.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, T19.Y, literal.y, +; CM-NEXT: -65536(nan), 255(3.573311e-43) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T12.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHL * T1.W, T19.Y, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV T12.X, PV.W, +; CM-NEXT: MOV * T0.Y, T13.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: BFE_UINT * T1.W, T19.Y, literal.y, T0.W, +; CM-NEXT: -65536(nan), 16(2.242078e-44) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T13.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T1.W, T19.Y, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T20.W, PV.Z, PV.W, +; CM-NEXT: MOV T13.X, PV.W, +; CM-NEXT: MOV * T0.Y, T8.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, T19.Z, literal.y, +; CM-NEXT: -65536(nan), 255(3.573311e-43) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T8.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHL * T1.W, T19.Z, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV T8.X, PV.W, +; CM-NEXT: MOV * T0.Y, T9.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: BFE_UINT * T1.W, T19.Z, literal.y, T0.W, +; CM-NEXT: -65536(nan), 16(2.242078e-44) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T9.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T1.W, T19.Z, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T19.Y, PV.Z, PV.W, +; CM-NEXT: MOV T9.X, PV.Y, +; CM-NEXT: MOV * T0.Y, T4.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, T19.W, literal.y, +; CM-NEXT: -65536(nan), 255(3.573311e-43) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T4.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHL * T1.W, T19.W, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV T4.X, PV.W, +; CM-NEXT: MOV * T0.Y, T5.X, +; CM-NEXT: AND_INT * T0.Z, PV.Y, literal.x, +; CM-NEXT: -65536(nan), 0(0.000000e+00) +; CM-NEXT: ALU clause starting at 114: +; CM-NEXT: BFE_UINT * T0.W, T19.W, literal.x, T0.W, ; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; CM-NEXT: LSHR T10.X, PV.W, literal.x, -; CM-NEXT: AND_INT * T9.Z, T7.Y, literal.y, -; CM-NEXT: 2(2.802597e-45), 255(3.573311e-43) -; CM-NEXT: AND_INT * T9.X, T7.X, literal.x, -; CM-NEXT: 255(3.573311e-43), 0(0.000000e+00) -; CM-NEXT: LSHR * T7.X, KC0[2].Y, literal.x, +; CM-NEXT: OR_INT * T0.W, T0.Z, PV.W, +; CM-NEXT: MOV * T5.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T19.W, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: LSHR T21.X, KC0[2].Y, literal.x, +; CM-NEXT: AND_INT T0.Y, PV.Y, literal.y, +; CM-NEXT: AND_INT T0.Z, PV.W, literal.z, +; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.w, +; CM-NEXT: 2(2.802597e-45), 65535(9.183409e-41) +; CM-NEXT: 16711680(2.341805e-38), 16(2.242078e-44) +; CM-NEXT: LSHR T22.X, PV.W, literal.x, +; CM-NEXT: OR_INT * T19.W, PV.Y, PV.Z, ; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; CM-NEXT: MOV * T5.X, PV.W, +; CM-NEXT: MOV T20.X, T16.X, +; CM-NEXT: MOV * T20.Z, T12.X, BS:VEC_120/SCL_212 +; CM-NEXT: MOV T19.X, T8.X, +; CM-NEXT: MOV * T19.Z, T4.X, BS:VEC_120/SCL_212 %load = load <16 x i8>, ptr addrspace(1) %in %ext = zext <16 x i8> %load to <16 x i16> store <16 x i16> %ext, ptr addrspace(1) %out @@ -10777,27 +11345,27 @@ define amdgpu_kernel void @global_sextload_v16i8_to_v16i16(ptr addrspace(1) %out ; GCN-NOHSA-VI-NEXT: s_mov_b32 s0, s4 ; GCN-NOHSA-VI-NEXT: s_mov_b32 s1, s5 ; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(0) -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s4, v2 -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s5, v3 -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s6, v0 -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s7, v1 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s8, s4, 16 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s9, s5, 16 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s10, s6, 16 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s11, s7, 16 -; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s12, s7 -; GCN-NOHSA-VI-NEXT: s_bfe_i32 s13, s7, 0x80000 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s7, s7, 16 -; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s16, s5 -; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s18, s4 -; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s14, s6 -; GCN-NOHSA-VI-NEXT: s_bfe_i32 s15, s6, 0x80000 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s6, s6, 24 -; GCN-NOHSA-VI-NEXT: s_bfe_i32 s17, s5, 0x80000 -; GCN-NOHSA-VI-NEXT: s_bfe_i32 s19, s4, 0x80000 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s5, s5, 24 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s4, v0 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s5, v1 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s6, v2 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s7, v3 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s8, s6, 16 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s9, s7, 16 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s10, s4, 16 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s11, s5, 16 +; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s12, s5 +; GCN-NOHSA-VI-NEXT: s_bfe_i32 s13, s5, 0x80000 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s5, s5, 16 +; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s16, s7 +; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s18, s6 +; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s14, s4 +; GCN-NOHSA-VI-NEXT: s_bfe_i32 s15, s4, 0x80000 ; GCN-NOHSA-VI-NEXT: s_ashr_i32 s4, s4, 24 -; GCN-NOHSA-VI-NEXT: s_lshl_b32 s7, s7, 8 +; GCN-NOHSA-VI-NEXT: s_bfe_i32 s17, s7, 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i32 s19, s6, 0x80000 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s7, s7, 24 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s6, s6, 24 +; GCN-NOHSA-VI-NEXT: s_lshl_b32 s5, s5, 8 ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s11, s11, 0x80000 ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s10, s10, 0x80000 ; GCN-NOHSA-VI-NEXT: s_lshl_b32 s16, s16, 8 @@ -10806,12 +11374,12 @@ define amdgpu_kernel void @global_sextload_v16i8_to_v16i16(ptr addrspace(1) %out ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s8, s8, 0x80000 ; GCN-NOHSA-VI-NEXT: s_lshl_b32 s12, s12, 8 ; GCN-NOHSA-VI-NEXT: s_lshl_b32 s14, s14, 8 -; GCN-NOHSA-VI-NEXT: s_lshl_b32 s6, s6, 16 +; GCN-NOHSA-VI-NEXT: s_lshl_b32 s4, s4, 16 ; GCN-NOHSA-VI-NEXT: s_and_b32 s17, 0xffff, s17 ; GCN-NOHSA-VI-NEXT: s_and_b32 s19, 0xffff, s19 -; GCN-NOHSA-VI-NEXT: s_lshl_b32 s5, s5, 16 -; GCN-NOHSA-VI-NEXT: s_lshl_b32 s4, s4, 16 -; GCN-NOHSA-VI-NEXT: s_and_b32 s7, s7, 0xffff0000 +; GCN-NOHSA-VI-NEXT: s_lshl_b32 s7, s7, 16 +; GCN-NOHSA-VI-NEXT: s_lshl_b32 s6, s6, 16 +; GCN-NOHSA-VI-NEXT: s_and_b32 s5, s5, 0xffff0000 ; GCN-NOHSA-VI-NEXT: s_and_b32 s11, 0xffff, s11 ; GCN-NOHSA-VI-NEXT: s_and_b32 s10, 0xffff, s10 ; GCN-NOHSA-VI-NEXT: s_and_b32 s16, s16, 0xffff0000 @@ -10822,94 +11390,365 @@ define amdgpu_kernel void @global_sextload_v16i8_to_v16i16(ptr addrspace(1) %out ; GCN-NOHSA-VI-NEXT: s_and_b32 s15, 0xffff, s15 ; GCN-NOHSA-VI-NEXT: s_and_b32 s12, s12, 0xffff0000 ; GCN-NOHSA-VI-NEXT: s_and_b32 s14, s14, 0xffff0000 -; GCN-NOHSA-VI-NEXT: s_or_b32 s7, s11, s7 -; GCN-NOHSA-VI-NEXT: s_or_b32 s6, s10, s6 +; GCN-NOHSA-VI-NEXT: s_or_b32 s5, s11, s5 +; GCN-NOHSA-VI-NEXT: s_or_b32 s4, s10, s4 ; GCN-NOHSA-VI-NEXT: s_or_b32 s10, s17, s16 ; GCN-NOHSA-VI-NEXT: s_or_b32 s11, s19, s18 -; GCN-NOHSA-VI-NEXT: s_or_b32 s5, s9, s5 -; GCN-NOHSA-VI-NEXT: s_or_b32 s4, s8, s4 +; GCN-NOHSA-VI-NEXT: s_or_b32 s7, s9, s7 +; GCN-NOHSA-VI-NEXT: s_or_b32 s6, s8, s6 ; GCN-NOHSA-VI-NEXT: s_or_b32 s12, s13, s12 ; GCN-NOHSA-VI-NEXT: s_or_b32 s13, s15, s14 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s11 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s4 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s6 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s10 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s5 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s7 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v4, s13 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v5, s6 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v5, s4 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v6, s12 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v7, s7 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v7, s5 ; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 ; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 ; GCN-NOHSA-VI-NEXT: s_endpgm ; ; EG-LABEL: global_sextload_v16i8_to_v16i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] -; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 20, @9, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T9.XYZW, T10.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T8.XYZW, T7.X, 1 +; EG-NEXT: ALU 1, @10, KC0[CB0:0-32], KC1[] +; EG-NEXT: TEX 0 @8 +; EG-NEXT: ALU 104, @12, KC0[], KC1[] +; EG-NEXT: ALU 46, @117, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T20.XYZW, T22.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T19.XYZW, T21.X, 1 ; EG-NEXT: CF_END -; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_128 T7.XYZW, T7.X, 0, #1 -; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: MOV * T7.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 9: -; EG-NEXT: BFE_INT * T8.Z, T7.Y, 0.0, literal.x, +; EG-NEXT: PAD +; EG-NEXT: Fetch clause starting at 8: +; EG-NEXT: VTX_READ_128 T19.XYZW, T19.X, 0, #1 +; EG-NEXT: ALU clause starting at 10: +; EG-NEXT: MOV * T0.Y, T16.X, +; EG-NEXT: MOV * T19.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 12: +; EG-NEXT: BFE_INT * T0.W, T19.X, 0.0, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_INT T8.X, T7.X, 0.0, literal.x, -; EG-NEXT: BFE_INT T9.Z, T7.W, 0.0, literal.x, -; EG-NEXT: LSHR * T0.W, T7.Y, literal.x, +; EG-NEXT: AND_INT T0.W, PV.W, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 65535(9.183409e-41), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T16.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T19.X, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_INT T9.X, T7.Z, 0.0, literal.x, -; EG-NEXT: LSHR T0.Z, T7.W, literal.x, -; EG-NEXT: BFE_INT T8.W, PV.W, 0.0, literal.x, -; EG-NEXT: LSHR * T0.W, T7.X, literal.x, +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T16.X, PV.W, +; EG-NEXT: MOV T0.Y, T17.X, +; EG-NEXT: LSHR * T0.W, T19.X, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T17.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T19.X, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T20.Y, PV.W, PS, +; EG-NEXT: MOV T17.X, PV.Y, +; EG-NEXT: MOV T0.Y, T12.X, +; EG-NEXT: BFE_INT * T0.W, T19.Y, 0.0, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: LSHR T7.X, KC0[2].Y, literal.x, -; EG-NEXT: BFE_INT T8.Y, PS, 0.0, literal.y, -; EG-NEXT: LSHR T1.Z, T7.Z, literal.y, -; EG-NEXT: BFE_INT T9.W, PV.Z, 0.0, literal.y, -; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.z, -; EG-NEXT: 2(2.802597e-45), 8(1.121039e-44) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T12.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T19.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, ; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; EG-NEXT: LSHR T10.X, PS, literal.x, -; EG-NEXT: BFE_INT * T9.Y, PV.Z, 0.0, literal.y, -; EG-NEXT: 2(2.802597e-45), 8(1.121039e-44) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T12.X, PV.W, +; EG-NEXT: MOV T0.Y, T13.X, +; EG-NEXT: LSHR * T0.W, T19.Y, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T13.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T19.Y, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T20.W, PV.W, PS, +; EG-NEXT: MOV T13.X, PV.W, +; EG-NEXT: MOV T0.Y, T8.X, +; EG-NEXT: BFE_INT * T0.W, T19.Z, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T8.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T19.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T8.X, PV.W, +; EG-NEXT: MOV T0.Y, T9.X, +; EG-NEXT: LSHR * T0.W, T19.Z, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T9.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T19.Z, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: ALU clause starting at 117: +; EG-NEXT: OR_INT * T19.Y, T1.W, T0.W, +; EG-NEXT: MOV T9.X, PV.Y, +; EG-NEXT: MOV T0.Y, T4.X, +; EG-NEXT: BFE_INT * T0.W, T19.W, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T19.W, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, +; EG-NEXT: LSHR * T0.W, T19.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR T0.W, T19.W, literal.x, +; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.y, +; EG-NEXT: 24(3.363116e-44), 16(2.242078e-44) +; EG-NEXT: LSHR T21.X, PS, literal.x, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.y, +; EG-NEXT: LSHL * T0.W, PV.W, literal.z, +; EG-NEXT: 2(2.802597e-45), 65535(9.183409e-41) +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: LSHR T22.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T19.W, PV.W, PS, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV * T20.X, T16.X, +; EG-NEXT: MOV * T20.Z, T12.X, +; EG-NEXT: MOV T19.X, T8.X, +; EG-NEXT: MOV * T19.Z, T4.X, BS:VEC_120/SCL_212 ; ; CM-LABEL: global_sextload_v16i8_to_v16i16: ; CM: ; %bb.0: -; CM-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] -; CM-NEXT: TEX 0 @6 -; CM-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[] -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T9, T7.X -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T8, T10.X +; CM-NEXT: ALU 1, @10, KC0[CB0:0-32], KC1[] +; CM-NEXT: TEX 0 @8 +; CM-NEXT: ALU 104, @12, KC0[], KC1[] +; CM-NEXT: ALU 46, @117, KC0[CB0:0-32], KC1[] +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T19, T22.X +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T20, T21.X ; CM-NEXT: CF_END -; CM-NEXT: Fetch clause starting at 6: -; CM-NEXT: VTX_READ_128 T7.XYZW, T7.X, 0, #1 -; CM-NEXT: ALU clause starting at 8: -; CM-NEXT: MOV * T7.X, KC0[2].Z, -; CM-NEXT: ALU clause starting at 9: -; CM-NEXT: BFE_INT * T8.Z, T7.W, 0.0, literal.x, +; CM-NEXT: PAD +; CM-NEXT: Fetch clause starting at 8: +; CM-NEXT: VTX_READ_128 T19.XYZW, T19.X, 0, #1 +; CM-NEXT: ALU clause starting at 10: +; CM-NEXT: MOV * T0.Y, T16.X, +; CM-NEXT: MOV * T19.X, KC0[2].Z, +; CM-NEXT: ALU clause starting at 12: +; CM-NEXT: BFE_INT * T0.W, T19.X, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.W, literal.x, +; CM-NEXT: AND_INT * T0.W, T0.Y, literal.y, +; CM-NEXT: 65535(9.183409e-41), -65536(nan) +; CM-NEXT: OR_INT * T0.W, PV.W, PV.Z, +; CM-NEXT: MOV * T16.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T19.X, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: BFE_INT T8.X, T7.Z, 0.0, literal.x, -; CM-NEXT: LSHR T0.Y, T7.Y, literal.x, -; CM-NEXT: BFE_INT T9.Z, T7.Y, 0.0, literal.x, -; CM-NEXT: LSHR * T0.W, T7.W, literal.x, +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: BFE_INT T9.X, T7.X, 0.0, literal.x, -; CM-NEXT: LSHR T1.Y, T7.Z, literal.x, -; CM-NEXT: ADD_INT T0.Z, KC0[2].Y, literal.y, -; CM-NEXT: BFE_INT * T8.W, PV.W, 0.0, literal.x, -; CM-NEXT: 8(1.121039e-44), 16(2.242078e-44) -; CM-NEXT: LSHR T10.X, PV.Z, literal.x, -; CM-NEXT: BFE_INT T8.Y, PV.Y, 0.0, literal.y, -; CM-NEXT: LSHR T0.Z, T7.X, literal.y, -; CM-NEXT: BFE_INT * T9.W, T0.Y, 0.0, literal.y, -; CM-NEXT: 2(2.802597e-45), 8(1.121039e-44) -; CM-NEXT: LSHR T7.X, KC0[2].Y, literal.x, -; CM-NEXT: BFE_INT * T9.Y, PV.Z, 0.0, literal.y, -; CM-NEXT: 2(2.802597e-45), 8(1.121039e-44) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T16.X, PV.W, +; CM-NEXT: MOV T0.Y, T17.X, +; CM-NEXT: LSHR * T0.W, T19.X, literal.x, BS:VEC_120/SCL_212 +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T17.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: ASHR * T0.W, T19.X, literal.x, +; CM-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T20.Y, PV.Z, PV.W, +; CM-NEXT: MOV T17.X, PV.Y, +; CM-NEXT: MOV T0.Y, T12.X, +; CM-NEXT: BFE_INT * T0.W, T19.Y, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T12.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T19.Y, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T12.X, PV.W, +; CM-NEXT: MOV T0.Y, T13.X, +; CM-NEXT: LSHR * T0.W, T19.Y, literal.x, +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T13.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: ASHR * T0.W, T19.Y, literal.x, +; CM-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T20.W, PV.Z, PV.W, +; CM-NEXT: MOV T13.X, PV.W, +; CM-NEXT: MOV T0.Y, T8.X, +; CM-NEXT: BFE_INT * T0.W, T19.Z, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T8.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T19.Z, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T8.X, PV.W, +; CM-NEXT: MOV T0.Y, T9.X, +; CM-NEXT: LSHR * T0.W, T19.Z, literal.x, +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T9.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: ASHR * T0.W, T19.Z, literal.x, +; CM-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: ALU clause starting at 117: +; CM-NEXT: OR_INT * T19.Y, T0.Z, T0.W, +; CM-NEXT: MOV T9.X, PV.Y, +; CM-NEXT: MOV T0.Y, T4.X, +; CM-NEXT: BFE_INT * T0.W, T19.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T4.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T19.W, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T4.X, PV.W, +; CM-NEXT: MOV T0.Y, T5.X, +; CM-NEXT: LSHR * T0.W, T19.W, literal.x, +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T5.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: ASHR * T0.W, T19.W, literal.x, +; CM-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; CM-NEXT: LSHR T21.X, KC0[2].Y, literal.x, +; CM-NEXT: AND_INT T0.Y, PV.Y, literal.y, +; CM-NEXT: LSHL T0.Z, PV.W, literal.z, +; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.z, +; CM-NEXT: 2(2.802597e-45), 65535(9.183409e-41) +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: LSHR T22.X, PV.W, literal.x, +; CM-NEXT: OR_INT * T19.W, PV.Y, PV.Z, +; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; CM-NEXT: MOV * T5.X, PV.W, +; CM-NEXT: MOV T20.X, T16.X, +; CM-NEXT: MOV * T20.Z, T12.X, BS:VEC_120/SCL_212 +; CM-NEXT: MOV T19.X, T8.X, +; CM-NEXT: MOV * T19.Z, T4.X, BS:VEC_120/SCL_212 %load = load <16 x i8>, ptr addrspace(1) %in %ext = sext <16 x i8> %load to <16 x i16> store <16 x i16> %ext, ptr addrspace(1) %out @@ -11181,115 +12020,543 @@ define amdgpu_kernel void @global_zextload_v32i8_to_v32i16(ptr addrspace(1) %out ; ; EG-LABEL: global_zextload_v32i8_to_v32i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @12, KC0[CB0:0-32], KC1[] -; EG-NEXT: TEX 1 @8 -; EG-NEXT: ALU 37, @13, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T17.XYZW, T18.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T15.XYZW, T12.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T14.XYZW, T16.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T13.XYZW, T11.X, 1 +; EG-NEXT: ALU 1, @14, KC0[CB0:0-32], KC1[] +; EG-NEXT: TEX 1 @10 +; EG-NEXT: ALU 103, @16, KC0[], KC1[] +; EG-NEXT: ALU 104, @120, KC0[], KC1[] +; EG-NEXT: ALU 41, @225, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T36.XYZW, T42.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T37.XYZW, T41.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T38.XYZW, T40.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T35.XYZW, T39.X, 1 ; EG-NEXT: CF_END -; EG-NEXT: Fetch clause starting at 8: -; EG-NEXT: VTX_READ_128 T12.XYZW, T11.X, 16, #1 -; EG-NEXT: VTX_READ_128 T11.XYZW, T11.X, 0, #1 -; EG-NEXT: ALU clause starting at 12: -; EG-NEXT: MOV * T11.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 13: +; EG-NEXT: Fetch clause starting at 10: +; EG-NEXT: VTX_READ_128 T37.XYZW, T35.X, 16, #1 +; EG-NEXT: VTX_READ_128 T35.XYZW, T35.X, 0, #1 +; EG-NEXT: ALU clause starting at 14: +; EG-NEXT: MOV * T0.Y, T16.X, +; EG-NEXT: MOV * T35.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 16: +; EG-NEXT: AND_INT T0.W, T37.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 255(3.573311e-43), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T16.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T0.W, T37.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T16.X, PV.W, +; EG-NEXT: MOV T0.Y, T17.X, ; EG-NEXT: MOV * T0.W, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT * T13.W, T11.Y, literal.x, PV.W, +; EG-NEXT: BFE_UINT T1.W, T37.X, literal.x, PV.W, +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.y, +; EG-NEXT: 16(2.242078e-44), -65536(nan) +; EG-NEXT: OR_INT * T1.W, PS, PV.W, +; EG-NEXT: MOV * T17.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T37.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T36.Y, PV.W, PS, +; EG-NEXT: MOV T17.X, PV.Y, +; EG-NEXT: MOV * T0.Y, T12.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T37.Y, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T12.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T37.Y, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT T13.Y, T11.X, literal.x, T0.W, -; EG-NEXT: AND_INT T13.Z, T11.Y, literal.y, -; EG-NEXT: BFE_UINT * T14.W, T11.W, literal.x, T0.W, -; EG-NEXT: 8(1.121039e-44), 255(3.573311e-43) -; EG-NEXT: AND_INT T13.X, T11.X, literal.x, -; EG-NEXT: BFE_UINT T14.Y, T11.Z, literal.y, T0.W, -; EG-NEXT: LSHR * T11.X, KC0[2].Y, literal.z, -; EG-NEXT: 255(3.573311e-43), 8(1.121039e-44) -; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) -; EG-NEXT: AND_INT T14.Z, T11.W, literal.x, -; EG-NEXT: BFE_UINT * T15.W, T12.Y, literal.y, T0.W, -; EG-NEXT: 255(3.573311e-43), 8(1.121039e-44) -; EG-NEXT: AND_INT T14.X, T11.Z, literal.x, -; EG-NEXT: BFE_UINT T15.Y, T12.X, literal.y, T0.W, -; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.z, -; EG-NEXT: 255(3.573311e-43), 8(1.121039e-44) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T12.X, PV.W, +; EG-NEXT: MOV T0.Y, T13.X, +; EG-NEXT: BFE_UINT * T1.W, T37.Y, literal.x, T0.W, ; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; EG-NEXT: LSHR T16.X, PV.W, literal.x, -; EG-NEXT: AND_INT T15.Z, T12.Y, literal.y, -; EG-NEXT: BFE_UINT T17.W, T12.W, literal.z, T0.W, -; EG-NEXT: AND_INT * T15.X, T12.X, literal.y, -; EG-NEXT: 2(2.802597e-45), 255(3.573311e-43) +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T13.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T37.Y, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_UINT T17.Y, T12.Z, literal.x, T0.W, -; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.y, -; EG-NEXT: 8(1.121039e-44), 32(4.484155e-44) -; EG-NEXT: LSHR T12.X, PV.W, literal.x, -; EG-NEXT: AND_INT T17.Z, T12.W, literal.y, -; EG-NEXT: AND_INT * T17.X, T12.Z, literal.y, -; EG-NEXT: 2(2.802597e-45), 255(3.573311e-43) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T36.W, PV.W, PS, +; EG-NEXT: MOV T13.X, PV.W, +; EG-NEXT: MOV * T0.Y, T8.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T37.Z, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T8.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T37.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T8.X, PV.W, +; EG-NEXT: MOV T0.Y, T9.X, +; EG-NEXT: BFE_UINT * T1.W, T37.Z, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T9.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T37.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T37.Y, PV.W, PS, +; EG-NEXT: MOV T9.X, PV.Y, +; EG-NEXT: MOV * T0.Y, T4.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T37.W, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T37.W, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, +; EG-NEXT: BFE_UINT * T1.W, T37.W, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: ALU clause starting at 120: +; EG-NEXT: AND_INT * T2.W, T0.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T37.W, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T37.W, PV.W, PS, +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV * T0.Y, T32.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T35.X, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T32.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T35.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T32.X, PV.W, +; EG-NEXT: MOV T0.Y, T33.X, +; EG-NEXT: BFE_UINT * T1.W, T35.X, literal.x, T0.W, BS:VEC_120/SCL_212 +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T33.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T35.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T38.Y, PV.W, PS, +; EG-NEXT: MOV T33.X, PV.Y, +; EG-NEXT: MOV * T0.Y, T28.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T35.Y, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T28.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T35.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T28.X, PV.W, +; EG-NEXT: MOV T0.Y, T29.X, +; EG-NEXT: BFE_UINT * T1.W, T35.Y, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T29.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T35.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T38.W, PV.W, PS, +; EG-NEXT: MOV T29.X, PV.W, +; EG-NEXT: MOV * T0.Y, T24.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T35.Z, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T24.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHL * T1.W, T35.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T24.X, PV.W, +; EG-NEXT: MOV T0.Y, T25.X, +; EG-NEXT: BFE_UINT * T1.W, T35.Z, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T2.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, PV.W, T1.W, +; EG-NEXT: MOV * T25.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T1.W, T35.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T35.Y, PV.W, PS, +; EG-NEXT: MOV T25.X, PV.Y, +; EG-NEXT: MOV * T0.Y, T20.X, +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T2.W, T35.W, literal.y, +; EG-NEXT: -65536(nan), 255(3.573311e-43) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV * T20.X, PV.W, +; EG-NEXT: ALU clause starting at 225: +; EG-NEXT: MOV T0.Y, T20.X, +; EG-NEXT: LSHL * T1.W, T35.W, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; EG-NEXT: OR_INT * T1.W, PV.W, PS, +; EG-NEXT: MOV T20.X, PV.W, +; EG-NEXT: MOV T0.Y, T21.X, +; EG-NEXT: BFE_UINT * T0.W, T35.W, literal.x, T0.W, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, PV.W, T0.W, +; EG-NEXT: MOV * T21.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, ; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x, -; EG-NEXT: 48(6.726233e-44), 0(0.000000e+00) -; EG-NEXT: LSHR * T18.X, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: LSHR T39.X, PV.W, literal.x, +; EG-NEXT: LSHR * T40.X, KC0[2].Y, literal.x, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: LSHR T0.W, T35.W, literal.x, +; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 48(6.726233e-44) +; EG-NEXT: LSHR T41.X, PS, literal.x, +; EG-NEXT: AND_INT T0.Z, T0.Y, literal.y, +; EG-NEXT: AND_INT T0.W, PV.W, literal.z, +; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.w, +; EG-NEXT: 2(2.802597e-45), 65535(9.183409e-41) +; EG-NEXT: 16711680(2.341805e-38), 32(4.484155e-44) +; EG-NEXT: LSHR T42.X, PS, literal.x, +; EG-NEXT: OR_INT * T35.W, PV.Z, PV.W, ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T21.X, PV.W, +; EG-NEXT: MOV * T36.X, T16.X, +; EG-NEXT: MOV * T36.Z, T12.X, +; EG-NEXT: MOV T37.X, T8.X, +; EG-NEXT: MOV T37.Z, T4.X, BS:VEC_120/SCL_212 +; EG-NEXT: MOV * T38.X, T32.X, +; EG-NEXT: MOV * T38.Z, T28.X, +; EG-NEXT: MOV T35.X, T24.X, +; EG-NEXT: MOV * T35.Z, T20.X, BS:VEC_120/SCL_212 ; ; CM-LABEL: global_zextload_v32i8_to_v32i16: ; CM: ; %bb.0: -; CM-NEXT: ALU 0, @12, KC0[CB0:0-32], KC1[] -; CM-NEXT: TEX 1 @8 -; CM-NEXT: ALU 39, @13, KC0[CB0:0-32], KC1[] -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T17, T12.X -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T11, T18.X -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T14, T16.X -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T13, T15.X +; CM-NEXT: ALU 1, @14, KC0[CB0:0-32], KC1[] +; CM-NEXT: TEX 1 @10 +; CM-NEXT: ALU 101, @16, KC0[], KC1[] +; CM-NEXT: ALU 101, @118, KC0[], KC1[] +; CM-NEXT: ALU 40, @220, KC0[CB0:0-32], KC1[] +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T36, T42.X +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T38, T41.X +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T37, T40.X +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T35, T39.X ; CM-NEXT: CF_END -; CM-NEXT: Fetch clause starting at 8: -; CM-NEXT: VTX_READ_128 T12.XYZW, T11.X, 0, #1 -; CM-NEXT: VTX_READ_128 T11.XYZW, T11.X, 16, #1 -; CM-NEXT: ALU clause starting at 12: -; CM-NEXT: MOV * T11.X, KC0[2].Z, -; CM-NEXT: ALU clause starting at 13: +; CM-NEXT: Fetch clause starting at 10: +; CM-NEXT: VTX_READ_128 T37.XYZW, T35.X, 16, #1 +; CM-NEXT: VTX_READ_128 T36.XYZW, T35.X, 0, #1 +; CM-NEXT: ALU clause starting at 14: +; CM-NEXT: MOV * T0.Y, T16.X, +; CM-NEXT: MOV * T35.X, KC0[2].Z, +; CM-NEXT: ALU clause starting at 16: +; CM-NEXT: AND_INT T0.Z, T37.X, literal.x, +; CM-NEXT: AND_INT * T0.W, T0.Y, literal.y, +; CM-NEXT: 255(3.573311e-43), -65536(nan) +; CM-NEXT: OR_INT * T0.W, PV.W, PV.Z, +; CM-NEXT: MOV * T16.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHL * T0.W, T37.X, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T16.X, PV.W, +; CM-NEXT: MOV T0.Y, T17.X, ; CM-NEXT: MOV * T0.W, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: BFE_UINT * T13.W, T11.W, literal.x, PV.W, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: BFE_UINT * T1.W, T37.X, literal.y, PV.W, +; CM-NEXT: -65536(nan), 16(2.242078e-44) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T17.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T1.W, T37.X, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: BFE_UINT T13.Y, T11.Z, literal.x, T0.W, -; CM-NEXT: AND_INT T13.Z, T11.W, literal.y, -; CM-NEXT: BFE_UINT * T14.W, T11.Y, literal.x, T0.W, -; CM-NEXT: 8(1.121039e-44), 255(3.573311e-43) -; CM-NEXT: AND_INT T13.X, T11.Z, literal.x, -; CM-NEXT: BFE_UINT T14.Y, T11.X, literal.y, T0.W, -; CM-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.z, -; CM-NEXT: 255(3.573311e-43), 8(1.121039e-44) -; CM-NEXT: 48(6.726233e-44), 0(0.000000e+00) -; CM-NEXT: LSHR T15.X, PV.W, literal.x, -; CM-NEXT: AND_INT T14.Z, T11.Y, literal.y, -; CM-NEXT: BFE_UINT * T11.W, T12.W, literal.z, T0.W, -; CM-NEXT: 2(2.802597e-45), 255(3.573311e-43) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T35.Y, PV.Z, PV.W, +; CM-NEXT: MOV T17.X, PV.Y, +; CM-NEXT: MOV * T0.Y, T12.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, T37.Y, literal.y, +; CM-NEXT: -65536(nan), 255(3.573311e-43) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T12.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHL * T1.W, T37.Y, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: AND_INT T14.X, T11.X, literal.x, -; CM-NEXT: BFE_UINT T11.Y, T12.Z, literal.y, T0.W, -; CM-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.z, -; CM-NEXT: 255(3.573311e-43), 8(1.121039e-44) -; CM-NEXT: 32(4.484155e-44), 0(0.000000e+00) -; CM-NEXT: LSHR T16.X, PV.W, literal.x, -; CM-NEXT: AND_INT T11.Z, T12.W, literal.y, -; CM-NEXT: BFE_UINT * T17.W, T12.Y, literal.z, T0.W, -; CM-NEXT: 2(2.802597e-45), 255(3.573311e-43) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV T12.X, PV.W, +; CM-NEXT: MOV * T0.Y, T13.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: BFE_UINT * T1.W, T37.Y, literal.y, T0.W, +; CM-NEXT: -65536(nan), 16(2.242078e-44) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T13.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T1.W, T37.Y, literal.x, ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: AND_INT T11.X, T12.Z, literal.x, -; CM-NEXT: BFE_UINT T17.Y, T12.X, literal.y, T0.W, -; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.z, -; CM-NEXT: 255(3.573311e-43), 8(1.121039e-44) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T35.W, PV.Z, PV.W, +; CM-NEXT: MOV T13.X, PV.W, +; CM-NEXT: MOV * T0.Y, T8.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, T37.Z, literal.y, +; CM-NEXT: -65536(nan), 255(3.573311e-43) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T8.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHL * T1.W, T37.Z, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV T8.X, PV.W, +; CM-NEXT: MOV * T0.Y, T9.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: BFE_UINT * T1.W, T37.Z, literal.y, T0.W, +; CM-NEXT: -65536(nan), 16(2.242078e-44) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T9.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T1.W, T37.Z, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T37.Y, PV.Z, PV.W, +; CM-NEXT: MOV T9.X, PV.Y, +; CM-NEXT: MOV * T0.Y, T4.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, T37.W, literal.y, +; CM-NEXT: -65536(nan), 255(3.573311e-43) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T4.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHL * T1.W, T37.W, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV T4.X, PV.W, +; CM-NEXT: MOV * T0.Y, T5.X, +; CM-NEXT: AND_INT * T0.Z, PV.Y, literal.x, +; CM-NEXT: -65536(nan), 0(0.000000e+00) +; CM-NEXT: ALU clause starting at 118: +; CM-NEXT: BFE_UINT * T1.W, T37.W, literal.x, T0.W, ; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; CM-NEXT: LSHR T18.X, PV.W, literal.x, -; CM-NEXT: AND_INT * T17.Z, T12.Y, literal.y, -; CM-NEXT: 2(2.802597e-45), 255(3.573311e-43) -; CM-NEXT: AND_INT * T17.X, T12.X, literal.x, -; CM-NEXT: 255(3.573311e-43), 0(0.000000e+00) -; CM-NEXT: LSHR * T12.X, KC0[2].Y, literal.x, +; CM-NEXT: OR_INT * T1.W, T0.Z, PV.W, +; CM-NEXT: MOV * T5.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T1.W, T37.W, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T37.W, PV.Z, PV.W, +; CM-NEXT: MOV T5.X, PV.W, +; CM-NEXT: MOV * T0.Y, T32.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, T36.X, literal.y, +; CM-NEXT: -65536(nan), 255(3.573311e-43) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T32.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHL * T1.W, T36.X, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV T32.X, PV.W, +; CM-NEXT: MOV * T0.Y, T33.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: BFE_UINT * T1.W, T36.X, literal.y, T0.W, +; CM-NEXT: -65536(nan), 16(2.242078e-44) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T33.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T1.W, T36.X, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T38.Y, PV.Z, PV.W, +; CM-NEXT: MOV T33.X, PV.Y, +; CM-NEXT: MOV * T0.Y, T28.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, T36.Y, literal.y, +; CM-NEXT: -65536(nan), 255(3.573311e-43) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T28.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHL * T1.W, T36.Y, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV T28.X, PV.W, +; CM-NEXT: MOV * T0.Y, T29.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: BFE_UINT * T1.W, T36.Y, literal.y, T0.W, +; CM-NEXT: -65536(nan), 16(2.242078e-44) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T29.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T1.W, T36.Y, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T38.W, PV.Z, PV.W, +; CM-NEXT: MOV T29.X, PV.W, +; CM-NEXT: MOV * T0.Y, T24.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, T36.Z, literal.y, +; CM-NEXT: -65536(nan), 255(3.573311e-43) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T24.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHL * T1.W, T36.Z, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV T24.X, PV.W, +; CM-NEXT: MOV * T0.Y, T25.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: BFE_UINT * T1.W, T36.Z, literal.y, T0.W, +; CM-NEXT: -65536(nan), 16(2.242078e-44) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T25.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T1.W, T36.Z, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T36.Y, PV.Z, PV.W, +; CM-NEXT: MOV T25.X, PV.Y, +; CM-NEXT: MOV * T0.Y, T20.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, T36.W, literal.y, +; CM-NEXT: -65536(nan), 255(3.573311e-43) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV * T20.X, PV.W, +; CM-NEXT: ALU clause starting at 220: +; CM-NEXT: MOV T0.Y, T20.X, +; CM-NEXT: LSHL * T1.W, T36.W, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T1.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16711680(2.341805e-38) +; CM-NEXT: OR_INT * T1.W, PV.Z, PV.W, +; CM-NEXT: MOV T20.X, PV.W, +; CM-NEXT: MOV * T0.Y, T21.X, +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: BFE_UINT * T0.W, T36.W, literal.y, T0.W, +; CM-NEXT: -65536(nan), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T21.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x, +; CM-NEXT: 32(4.484155e-44), 0(0.000000e+00) +; CM-NEXT: LSHR T39.X, PV.W, literal.x, +; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.y, +; CM-NEXT: 2(2.802597e-45), 48(6.726233e-44) +; CM-NEXT: LSHR T40.X, PV.W, literal.x, +; CM-NEXT: LSHR * T0.W, T36.W, literal.y, +; CM-NEXT: 2(2.802597e-45), 8(1.121039e-44) +; CM-NEXT: LSHR T41.X, KC0[2].Y, literal.x, +; CM-NEXT: AND_INT T0.Y, T0.Y, literal.y, +; CM-NEXT: AND_INT T0.Z, PV.W, literal.z, +; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.w, +; CM-NEXT: 2(2.802597e-45), 65535(9.183409e-41) +; CM-NEXT: 16711680(2.341805e-38), 16(2.242078e-44) +; CM-NEXT: LSHR T42.X, PV.W, literal.x, +; CM-NEXT: OR_INT * T36.W, PV.Y, PV.Z, ; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; CM-NEXT: MOV * T21.X, PV.W, +; CM-NEXT: MOV T35.X, T16.X, +; CM-NEXT: MOV * T35.Z, T12.X, BS:VEC_120/SCL_212 +; CM-NEXT: MOV T37.X, T8.X, +; CM-NEXT: MOV * T37.Z, T4.X, BS:VEC_120/SCL_212 +; CM-NEXT: MOV T38.X, T32.X, +; CM-NEXT: MOV * T38.Z, T28.X, BS:VEC_120/SCL_212 +; CM-NEXT: MOV T36.X, T24.X, +; CM-NEXT: MOV * T36.Z, T20.X, BS:VEC_120/SCL_212 %load = load <32 x i8>, ptr addrspace(1) %in %ext = zext <32 x i8> %load to <32 x i16> store <32 x i16> %ext, ptr addrspace(1) %out @@ -11577,8 +12844,8 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i16(ptr addrspace(1) %out ; GCN-NOHSA-VI-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NOHSA-VI-NEXT: s_mov_b32 s8, s6 ; GCN-NOHSA-VI-NEXT: s_mov_b32 s9, s7 -; GCN-NOHSA-VI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0 offset:16 -; GCN-NOHSA-VI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 +; GCN-NOHSA-VI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0 +; GCN-NOHSA-VI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16 ; GCN-NOHSA-VI-NEXT: s_mov_b32 s1, s5 ; GCN-NOHSA-VI-NEXT: s_mov_b32 s0, s4 ; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(1) @@ -11586,38 +12853,39 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i16(ptr addrspace(1) %out ; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s7, v1 ; GCN-NOHSA-VI-NEXT: s_lshr_b32 s14, s6, 16 ; GCN-NOHSA-VI-NEXT: s_lshr_b32 s15, s7, 16 +; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s28, s7 +; GCN-NOHSA-VI-NEXT: s_bfe_i32 s29, s7, 0x80000 ; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s30, s6 ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s31, s6, 0x80000 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s7, s7, 16 ; GCN-NOHSA-VI-NEXT: s_ashr_i32 s6, s6, 24 ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s14, s14, 0x80000 ; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s5, v3 -; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s28, s7 -; GCN-NOHSA-VI-NEXT: s_bfe_i32 s29, s7, 0x80000 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s7, s7, 24 +; GCN-NOHSA-VI-NEXT: s_lshl_b32 s7, s7, 8 ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s15, s15, 0x80000 ; GCN-NOHSA-VI-NEXT: s_lshl_b32 s6, s6, 16 ; GCN-NOHSA-VI-NEXT: s_and_b32 s14, 0xffff, s14 ; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(0) -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s10, v4 -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s11, v5 -; GCN-NOHSA-VI-NEXT: s_lshl_b32 s7, s7, 16 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s8, v4 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s9, v5 +; GCN-NOHSA-VI-NEXT: s_and_b32 s7, s7, 0xffff0000 ; GCN-NOHSA-VI-NEXT: s_and_b32 s15, 0xffff, s15 ; GCN-NOHSA-VI-NEXT: s_or_b32 s6, s14, s6 ; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s14, s5 -; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s20, s11 -; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s22, s10 +; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s20, s9 +; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s22, s8 ; GCN-NOHSA-VI-NEXT: s_or_b32 s7, s15, s7 ; GCN-NOHSA-VI-NEXT: s_lshl_b32 s14, s14, 8 ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s15, s5, 0x80000 ; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s4, v2 -; GCN-NOHSA-VI-NEXT: s_bfe_i32 s21, s11, 0x80000 -; GCN-NOHSA-VI-NEXT: s_bfe_i32 s23, s10, 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i32 s21, s9, 0x80000 +; GCN-NOHSA-VI-NEXT: s_bfe_i32 s23, s8, 0x80000 ; GCN-NOHSA-VI-NEXT: s_lshl_b32 s20, s20, 8 ; GCN-NOHSA-VI-NEXT: s_lshl_b32 s22, s22, 8 ; GCN-NOHSA-VI-NEXT: s_and_b32 s14, s14, 0xffff0000 ; GCN-NOHSA-VI-NEXT: s_and_b32 s15, 0xffff, s15 -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s8, v6 -; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s9, v7 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s10, v6 +; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s11, v7 ; GCN-NOHSA-VI-NEXT: s_lshr_b32 s12, s4, 16 ; GCN-NOHSA-VI-NEXT: s_lshr_b32 s13, s5, 16 ; GCN-NOHSA-VI-NEXT: s_and_b32 s21, 0xffff, s21 @@ -11627,11 +12895,11 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i16(ptr addrspace(1) %out ; GCN-NOHSA-VI-NEXT: s_or_b32 s14, s15, s14 ; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s15, s4 ; GCN-NOHSA-VI-NEXT: s_ashr_i32 s5, s5, 16 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s16, s8, 16 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s17, s9, 16 -; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s24, s9 -; GCN-NOHSA-VI-NEXT: s_bfe_i32 s25, s9, 0x80000 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s9, s9, 16 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s16, s10, 16 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s17, s11, 16 +; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s24, s11 +; GCN-NOHSA-VI-NEXT: s_bfe_i32 s25, s11, 0x80000 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s11, s11, 16 ; GCN-NOHSA-VI-NEXT: s_or_b32 s20, s21, s20 ; GCN-NOHSA-VI-NEXT: s_or_b32 s21, s23, s22 ; GCN-NOHSA-VI-NEXT: s_lshl_b32 s15, s15, 8 @@ -11640,13 +12908,12 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i16(ptr addrspace(1) %out ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s13, s13, 0x80000 ; GCN-NOHSA-VI-NEXT: s_ashr_i32 s4, s4, 24 ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s12, s12, 0x80000 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s18, s10, 16 -; GCN-NOHSA-VI-NEXT: s_lshr_b32 s19, s11, 16 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s11, s11, 16 -; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s26, s8 -; GCN-NOHSA-VI-NEXT: s_bfe_i32 s27, s8, 0x80000 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s8, s8, 24 -; GCN-NOHSA-VI-NEXT: s_lshl_b32 s9, s9, 8 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s18, s8, 16 +; GCN-NOHSA-VI-NEXT: s_lshr_b32 s19, s9, 16 +; GCN-NOHSA-VI-NEXT: s_sext_i32_i16 s26, s10 +; GCN-NOHSA-VI-NEXT: s_bfe_i32 s27, s10, 0x80000 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s10, s10, 24 +; GCN-NOHSA-VI-NEXT: s_lshl_b32 s11, s11, 8 ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s17, s17, 0x80000 ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s16, s16, 0x80000 ; GCN-NOHSA-VI-NEXT: s_lshl_b32 s28, s28, 8 @@ -11657,16 +12924,16 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i16(ptr addrspace(1) %out ; GCN-NOHSA-VI-NEXT: s_and_b32 s13, 0xffff, s13 ; GCN-NOHSA-VI-NEXT: s_lshl_b32 s4, s4, 16 ; GCN-NOHSA-VI-NEXT: s_and_b32 s12, 0xffff, s12 -; GCN-NOHSA-VI-NEXT: s_ashr_i32 s10, s10, 24 -; GCN-NOHSA-VI-NEXT: s_lshl_b32 s11, s11, 8 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s9, s9, 24 +; GCN-NOHSA-VI-NEXT: s_ashr_i32 s8, s8, 24 ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s19, s19, 0x80000 ; GCN-NOHSA-VI-NEXT: s_bfe_i32 s18, s18, 0x80000 ; GCN-NOHSA-VI-NEXT: s_lshl_b32 s24, s24, 8 ; GCN-NOHSA-VI-NEXT: s_lshl_b32 s26, s26, 8 -; GCN-NOHSA-VI-NEXT: s_lshl_b32 s8, s8, 16 +; GCN-NOHSA-VI-NEXT: s_lshl_b32 s10, s10, 16 ; GCN-NOHSA-VI-NEXT: s_and_b32 s29, 0xffff, s29 ; GCN-NOHSA-VI-NEXT: s_and_b32 s31, 0xffff, s31 -; GCN-NOHSA-VI-NEXT: s_and_b32 s9, s9, 0xffff0000 +; GCN-NOHSA-VI-NEXT: s_and_b32 s11, s11, 0xffff0000 ; GCN-NOHSA-VI-NEXT: s_and_b32 s17, 0xffff, s17 ; GCN-NOHSA-VI-NEXT: s_and_b32 s16, 0xffff, s16 ; GCN-NOHSA-VI-NEXT: s_and_b32 s28, s28, 0xffff0000 @@ -11674,161 +12941,702 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i16(ptr addrspace(1) %out ; GCN-NOHSA-VI-NEXT: s_or_b32 s15, s22, s15 ; GCN-NOHSA-VI-NEXT: s_or_b32 s5, s13, s5 ; GCN-NOHSA-VI-NEXT: s_or_b32 s4, s12, s4 -; GCN-NOHSA-VI-NEXT: s_lshl_b32 s10, s10, 16 +; GCN-NOHSA-VI-NEXT: s_lshl_b32 s9, s9, 16 +; GCN-NOHSA-VI-NEXT: s_lshl_b32 s8, s8, 16 ; GCN-NOHSA-VI-NEXT: s_and_b32 s25, 0xffff, s25 ; GCN-NOHSA-VI-NEXT: s_and_b32 s27, 0xffff, s27 -; GCN-NOHSA-VI-NEXT: s_and_b32 s11, s11, 0xffff0000 ; GCN-NOHSA-VI-NEXT: s_and_b32 s19, 0xffff, s19 ; GCN-NOHSA-VI-NEXT: s_and_b32 s18, 0xffff, s18 ; GCN-NOHSA-VI-NEXT: s_and_b32 s24, s24, 0xffff0000 ; GCN-NOHSA-VI-NEXT: s_and_b32 s26, s26, 0xffff0000 -; GCN-NOHSA-VI-NEXT: s_or_b32 s9, s17, s9 -; GCN-NOHSA-VI-NEXT: s_or_b32 s8, s16, s8 +; GCN-NOHSA-VI-NEXT: s_or_b32 s11, s17, s11 +; GCN-NOHSA-VI-NEXT: s_or_b32 s10, s16, s10 ; GCN-NOHSA-VI-NEXT: s_or_b32 s16, s29, s28 ; GCN-NOHSA-VI-NEXT: s_or_b32 s17, s31, s30 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s15 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s4 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s14 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s5 -; GCN-NOHSA-VI-NEXT: s_or_b32 s11, s19, s11 -; GCN-NOHSA-VI-NEXT: s_or_b32 s10, s18, s10 +; GCN-NOHSA-VI-NEXT: s_or_b32 s9, s19, s9 +; GCN-NOHSA-VI-NEXT: s_or_b32 s8, s18, s8 ; GCN-NOHSA-VI-NEXT: s_or_b32 s18, s25, s24 ; GCN-NOHSA-VI-NEXT: s_or_b32 s19, s27, s26 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 ; GCN-NOHSA-VI-NEXT: s_nop 0 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s17 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s6 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s16 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s7 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 ; GCN-NOHSA-VI-NEXT: s_nop 0 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s19 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s8 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s10 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s18 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s9 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s11 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48 ; GCN-NOHSA-VI-NEXT: s_nop 0 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s21 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s10 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s8 ; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s20 -; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s11 -; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 +; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s9 +; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32 ; GCN-NOHSA-VI-NEXT: s_endpgm ; ; EG-LABEL: global_sextload_v32i8_to_v32i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @12, KC0[CB0:0-32], KC1[] -; EG-NEXT: TEX 1 @8 -; EG-NEXT: ALU 39, @13, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T18.XYZW, T12.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T17.XYZW, T11.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T16.XYZW, T14.X, 0 -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T15.XYZW, T13.X, 1 +; EG-NEXT: ALU 1, @14, KC0[CB0:0-32], KC1[] +; EG-NEXT: TEX 1 @10 +; EG-NEXT: ALU 104, @16, KC0[], KC1[] +; EG-NEXT: ALU 104, @121, KC0[], KC1[] +; EG-NEXT: ALU 95, @226, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T36.XYZW, T42.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T37.XYZW, T41.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T38.XYZW, T40.X, 0 +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T35.XYZW, T39.X, 1 ; EG-NEXT: CF_END -; EG-NEXT: Fetch clause starting at 8: -; EG-NEXT: VTX_READ_128 T12.XYZW, T11.X, 16, #1 -; EG-NEXT: VTX_READ_128 T11.XYZW, T11.X, 0, #1 -; EG-NEXT: ALU clause starting at 12: -; EG-NEXT: MOV * T11.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 13: -; EG-NEXT: LSHR T13.X, KC0[2].Y, literal.x, -; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.y, -; EG-NEXT: 2(2.802597e-45), 16(2.242078e-44) -; EG-NEXT: LSHR T14.X, PV.W, literal.x, -; EG-NEXT: BFE_INT * T15.Z, T11.Y, 0.0, literal.y, -; EG-NEXT: 2(2.802597e-45), 8(1.121039e-44) -; EG-NEXT: BFE_INT T15.X, T11.X, 0.0, literal.x, -; EG-NEXT: LSHR T0.Y, T12.W, literal.x, -; EG-NEXT: BFE_INT T16.Z, T11.W, 0.0, literal.x, BS:VEC_120/SCL_212 -; EG-NEXT: LSHR T0.W, T12.Y, literal.x, -; EG-NEXT: LSHR * T1.W, T11.Y, literal.x, +; EG-NEXT: Fetch clause starting at 10: +; EG-NEXT: VTX_READ_128 T37.XYZW, T35.X, 16, #1 +; EG-NEXT: VTX_READ_128 T35.XYZW, T35.X, 0, #1 +; EG-NEXT: ALU clause starting at 14: +; EG-NEXT: MOV * T0.Y, T16.X, +; EG-NEXT: MOV * T35.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 16: +; EG-NEXT: BFE_INT * T0.W, T37.X, 0.0, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_INT T16.X, T11.Z, 0.0, literal.x, -; EG-NEXT: LSHR T1.Y, T11.W, literal.x, -; EG-NEXT: BFE_INT T17.Z, T12.Y, 0.0, literal.x, -; EG-NEXT: BFE_INT T15.W, PS, 0.0, literal.x, -; EG-NEXT: LSHR * T1.W, T11.X, literal.x, +; EG-NEXT: AND_INT T0.W, PV.W, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 65535(9.183409e-41), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T16.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T37.X, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_INT T17.X, T12.X, 0.0, literal.x, -; EG-NEXT: BFE_INT T15.Y, PS, 0.0, literal.x, -; EG-NEXT: BFE_INT T18.Z, T12.W, 0.0, literal.x, -; EG-NEXT: BFE_INT T16.W, PV.Y, 0.0, literal.x, -; EG-NEXT: LSHR * T1.W, T11.Z, literal.x, +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T16.X, PV.W, +; EG-NEXT: MOV T0.Y, T17.X, +; EG-NEXT: LSHR * T0.W, T37.X, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T17.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T37.X, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T36.Y, PV.W, PS, +; EG-NEXT: MOV T17.X, PV.Y, +; EG-NEXT: MOV T0.Y, T12.X, +; EG-NEXT: BFE_INT * T0.W, T37.Y, 0.0, literal.x, ; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; EG-NEXT: BFE_INT T18.X, T12.Z, 0.0, literal.x, -; EG-NEXT: BFE_INT T16.Y, PS, 0.0, literal.x, -; EG-NEXT: LSHR T0.Z, T12.X, literal.x, -; EG-NEXT: BFE_INT T17.W, T0.W, 0.0, literal.x, -; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.y, -; EG-NEXT: 8(1.121039e-44), 32(4.484155e-44) -; EG-NEXT: LSHR T11.X, PS, literal.x, -; EG-NEXT: BFE_INT T17.Y, PV.Z, 0.0, literal.y, -; EG-NEXT: LSHR T0.Z, T12.Z, literal.y, -; EG-NEXT: BFE_INT T18.W, T0.Y, 0.0, literal.y, -; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.z, -; EG-NEXT: 2(2.802597e-45), 8(1.121039e-44) -; EG-NEXT: 48(6.726233e-44), 0(0.000000e+00) -; EG-NEXT: LSHR T12.X, PS, literal.x, -; EG-NEXT: BFE_INT * T18.Y, PV.Z, 0.0, literal.y, -; EG-NEXT: 2(2.802597e-45), 8(1.121039e-44) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T12.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T37.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T12.X, PV.W, +; EG-NEXT: MOV T0.Y, T13.X, +; EG-NEXT: LSHR * T0.W, T37.Y, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T13.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T37.Y, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T36.W, PV.W, PS, +; EG-NEXT: MOV T13.X, PV.W, +; EG-NEXT: MOV T0.Y, T8.X, +; EG-NEXT: BFE_INT * T0.W, T37.Z, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T8.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T37.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T8.X, PV.W, +; EG-NEXT: MOV T0.Y, T9.X, +; EG-NEXT: LSHR * T0.W, T37.Z, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T9.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T37.Z, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: ALU clause starting at 121: +; EG-NEXT: OR_INT * T37.Y, T1.W, T0.W, +; EG-NEXT: MOV T9.X, PV.Y, +; EG-NEXT: MOV T0.Y, T4.X, +; EG-NEXT: BFE_INT * T0.W, T37.W, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T37.W, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T4.X, PV.W, +; EG-NEXT: MOV T0.Y, T5.X, +; EG-NEXT: LSHR * T0.W, T37.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T37.W, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T37.W, PV.W, PS, +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV T0.Y, T32.X, +; EG-NEXT: BFE_INT * T0.W, T35.X, 0.0, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T32.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T35.X, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T32.X, PV.W, +; EG-NEXT: MOV T0.Y, T33.X, +; EG-NEXT: LSHR * T0.W, T35.X, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T33.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T35.X, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T38.Y, PV.W, PS, +; EG-NEXT: MOV T33.X, PV.Y, +; EG-NEXT: MOV T0.Y, T28.X, +; EG-NEXT: BFE_INT * T0.W, T35.Y, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T28.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T35.Y, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T28.X, PV.W, +; EG-NEXT: MOV T0.Y, T29.X, +; EG-NEXT: LSHR * T0.W, T35.Y, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T29.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T35.Y, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: ALU clause starting at 226: +; EG-NEXT: AND_INT T1.W, T0.Y, literal.x, +; EG-NEXT: LSHL * T0.W, T0.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T38.W, PV.W, PS, +; EG-NEXT: MOV T29.X, PV.W, +; EG-NEXT: MOV T0.Y, T24.X, +; EG-NEXT: BFE_INT * T0.W, T35.Z, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T24.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T35.Z, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T24.X, PV.W, +; EG-NEXT: MOV T0.Y, T25.X, +; EG-NEXT: LSHR * T0.W, T35.Z, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T25.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ASHR * T0.W, T35.Z, literal.x, +; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: LSHL * T0.W, PV.W, literal.y, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: OR_INT * T35.Y, PV.W, PS, +; EG-NEXT: MOV T25.X, PV.Y, +; EG-NEXT: MOV T0.Y, T20.X, +; EG-NEXT: BFE_INT * T0.W, T35.W, 0.0, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: AND_INT T1.W, PV.Y, literal.x, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.y, +; EG-NEXT: -65536(nan), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV * T20.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T35.W, literal.x, +; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), 65535(9.183409e-41) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T20.X, PV.W, +; EG-NEXT: MOV T0.Y, T21.X, +; EG-NEXT: LSHR * T0.W, T35.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.y, +; EG-NEXT: 8(1.121039e-44), -65536(nan) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T21.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: LSHR T39.X, PV.W, literal.x, +; EG-NEXT: LSHR * T40.X, KC0[2].Y, literal.x, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: ASHR T0.W, T35.W, literal.x, +; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.y, +; EG-NEXT: 24(3.363116e-44), 48(6.726233e-44) +; EG-NEXT: LSHR T41.X, PS, literal.x, +; EG-NEXT: AND_INT T0.Z, T0.Y, literal.y, +; EG-NEXT: LSHL T0.W, PV.W, literal.z, +; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, literal.w, +; EG-NEXT: 2(2.802597e-45), 65535(9.183409e-41) +; EG-NEXT: 16(2.242078e-44), 32(4.484155e-44) +; EG-NEXT: LSHR T42.X, PS, literal.x, +; EG-NEXT: OR_INT * T35.W, PV.Z, PV.W, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T21.X, PV.W, +; EG-NEXT: MOV * T36.X, T16.X, +; EG-NEXT: MOV * T36.Z, T12.X, +; EG-NEXT: MOV T37.X, T8.X, +; EG-NEXT: MOV T37.Z, T4.X, BS:VEC_120/SCL_212 +; EG-NEXT: MOV * T38.X, T32.X, +; EG-NEXT: MOV * T38.Z, T28.X, +; EG-NEXT: MOV T35.X, T24.X, +; EG-NEXT: MOV * T35.Z, T20.X, BS:VEC_120/SCL_212 ; ; CM-LABEL: global_sextload_v32i8_to_v32i16: ; CM: ; %bb.0: -; CM-NEXT: ALU 0, @12, KC0[CB0:0-32], KC1[] -; CM-NEXT: TEX 1 @8 -; CM-NEXT: ALU 40, @13, KC0[CB0:0-32], KC1[] -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T17, T11.X -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T12, T18.X -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T16, T14.X -; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T15, T13.X +; CM-NEXT: ALU 1, @14, KC0[CB0:0-32], KC1[] +; CM-NEXT: TEX 1 @10 +; CM-NEXT: ALU 104, @16, KC0[], KC1[] +; CM-NEXT: ALU 104, @121, KC0[], KC1[] +; CM-NEXT: ALU 95, @226, KC0[CB0:0-32], KC1[] +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T35, T42.X +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T38, T41.X +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T37, T40.X +; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T36, T39.X ; CM-NEXT: CF_END -; CM-NEXT: Fetch clause starting at 8: -; CM-NEXT: VTX_READ_128 T12.XYZW, T11.X, 16, #1 -; CM-NEXT: VTX_READ_128 T11.XYZW, T11.X, 0, #1 -; CM-NEXT: ALU clause starting at 12: -; CM-NEXT: MOV * T11.X, KC0[2].Z, -; CM-NEXT: ALU clause starting at 13: +; CM-NEXT: Fetch clause starting at 10: +; CM-NEXT: VTX_READ_128 T37.XYZW, T35.X, 16, #1 +; CM-NEXT: VTX_READ_128 T35.XYZW, T35.X, 0, #1 +; CM-NEXT: ALU clause starting at 14: +; CM-NEXT: MOV * T0.Y, T16.X, +; CM-NEXT: MOV * T35.X, KC0[2].Z, +; CM-NEXT: ALU clause starting at 16: +; CM-NEXT: BFE_INT * T0.W, T37.X, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.W, literal.x, +; CM-NEXT: AND_INT * T0.W, T0.Y, literal.y, +; CM-NEXT: 65535(9.183409e-41), -65536(nan) +; CM-NEXT: OR_INT * T0.W, PV.W, PV.Z, +; CM-NEXT: MOV * T16.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T37.X, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T16.X, PV.W, +; CM-NEXT: MOV T0.Y, T17.X, +; CM-NEXT: LSHR * T0.W, T37.X, literal.x, BS:VEC_120/SCL_212 +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T17.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: ASHR * T0.W, T37.X, literal.x, +; CM-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T36.Y, PV.Z, PV.W, +; CM-NEXT: MOV T17.X, PV.Y, +; CM-NEXT: MOV T0.Y, T12.X, +; CM-NEXT: BFE_INT * T0.W, T37.Y, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T12.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T37.Y, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T12.X, PV.W, +; CM-NEXT: MOV T0.Y, T13.X, +; CM-NEXT: LSHR * T0.W, T37.Y, literal.x, +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T13.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: ASHR * T0.W, T37.Y, literal.x, +; CM-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T36.W, PV.Z, PV.W, +; CM-NEXT: MOV T13.X, PV.W, +; CM-NEXT: MOV T0.Y, T8.X, +; CM-NEXT: BFE_INT * T0.W, T37.Z, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T8.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T37.Z, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T8.X, PV.W, +; CM-NEXT: MOV T0.Y, T9.X, +; CM-NEXT: LSHR * T0.W, T37.Z, literal.x, +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T9.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: ASHR * T0.W, T37.Z, literal.x, +; CM-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: ALU clause starting at 121: +; CM-NEXT: OR_INT * T37.Y, T0.Z, T0.W, +; CM-NEXT: MOV T9.X, PV.Y, +; CM-NEXT: MOV T0.Y, T4.X, +; CM-NEXT: BFE_INT * T0.W, T37.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T4.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T37.W, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T4.X, PV.W, +; CM-NEXT: MOV T0.Y, T5.X, +; CM-NEXT: LSHR * T0.W, T37.W, literal.x, +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T5.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: ASHR * T0.W, T37.W, literal.x, +; CM-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T37.W, PV.Z, PV.W, +; CM-NEXT: MOV T5.X, PV.W, +; CM-NEXT: MOV T0.Y, T32.X, +; CM-NEXT: BFE_INT * T0.W, T35.X, 0.0, literal.x, BS:VEC_120/SCL_212 +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T32.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T35.X, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T32.X, PV.W, +; CM-NEXT: MOV T0.Y, T33.X, +; CM-NEXT: LSHR * T0.W, T35.X, literal.x, BS:VEC_120/SCL_212 +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T33.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: ASHR * T0.W, T35.X, literal.x, +; CM-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T38.Y, PV.Z, PV.W, +; CM-NEXT: MOV T33.X, PV.Y, +; CM-NEXT: MOV T0.Y, T28.X, +; CM-NEXT: BFE_INT * T0.W, T35.Y, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T28.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T35.Y, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T28.X, PV.W, +; CM-NEXT: MOV T0.Y, T29.X, +; CM-NEXT: LSHR * T0.W, T35.Y, literal.x, +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T29.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: ASHR * T0.W, T35.Y, literal.x, +; CM-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; CM-NEXT: ALU clause starting at 226: +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, T0.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T38.W, PV.Z, PV.W, +; CM-NEXT: MOV T29.X, PV.W, +; CM-NEXT: MOV T0.Y, T24.X, +; CM-NEXT: BFE_INT * T0.W, T35.Z, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T24.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T35.Z, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T24.X, PV.W, +; CM-NEXT: MOV T0.Y, T25.X, +; CM-NEXT: LSHR * T0.W, T35.Z, literal.x, +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T25.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: ASHR * T0.W, T35.Z, literal.x, +; CM-NEXT: 24(3.363116e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T35.Y, PV.Z, PV.W, +; CM-NEXT: MOV T25.X, PV.Y, +; CM-NEXT: MOV T0.Y, T20.X, +; CM-NEXT: BFE_INT * T0.W, T35.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, PV.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T20.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, +; CM-NEXT: LSHR * T0.W, T35.W, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: LSHL * T0.W, PV.W, literal.y, +; CM-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV T20.X, PV.W, +; CM-NEXT: MOV T0.Y, T21.X, +; CM-NEXT: LSHR * T0.W, T35.W, literal.x, +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x, +; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) +; CM-NEXT: AND_INT T0.Z, T0.Y, literal.x, +; CM-NEXT: AND_INT * T0.W, PV.W, literal.y, +; CM-NEXT: -65536(nan), 65535(9.183409e-41) +; CM-NEXT: OR_INT * T0.W, PV.Z, PV.W, +; CM-NEXT: MOV * T21.X, PV.W, +; CM-NEXT: MOV T0.Y, PV.X, ; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x, -; CM-NEXT: 48(6.726233e-44), 0(0.000000e+00) -; CM-NEXT: LSHR T13.X, PV.W, literal.x, -; CM-NEXT: LSHR T0.Y, T11.Y, literal.y, -; CM-NEXT: LSHR T0.Z, T11.Z, literal.y, -; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.z, -; CM-NEXT: 2(2.802597e-45), 8(1.121039e-44) ; CM-NEXT: 32(4.484155e-44), 0(0.000000e+00) -; CM-NEXT: LSHR T14.X, PV.W, literal.x, -; CM-NEXT: LSHR T1.Y, T11.W, literal.y, -; CM-NEXT: BFE_INT T15.Z, T12.W, 0.0, literal.y, BS:VEC_120/SCL_212 -; CM-NEXT: LSHR * T0.W, T12.X, literal.y, -; CM-NEXT: 2(2.802597e-45), 8(1.121039e-44) -; CM-NEXT: BFE_INT T15.X, T12.Z, 0.0, literal.x, -; CM-NEXT: LSHR T2.Y, T12.Y, literal.x, -; CM-NEXT: BFE_INT T16.Z, T12.Y, 0.0, literal.x, -; CM-NEXT: LSHR * T1.W, T12.W, literal.x, -; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: BFE_INT T16.X, T12.X, 0.0, literal.x, -; CM-NEXT: LSHR T3.Y, T12.Z, literal.x, -; CM-NEXT: BFE_INT T12.Z, T11.W, 0.0, literal.x, -; CM-NEXT: BFE_INT * T15.W, PV.W, 0.0, literal.x, -; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: BFE_INT T12.X, T11.Z, 0.0, literal.x, -; CM-NEXT: BFE_INT T15.Y, PV.Y, 0.0, literal.x, -; CM-NEXT: BFE_INT T17.Z, T11.Y, 0.0, literal.x, -; CM-NEXT: BFE_INT * T16.W, T2.Y, 0.0, literal.x, BS:VEC_120/SCL_212 -; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00) -; CM-NEXT: BFE_INT T17.X, T11.X, 0.0, literal.x, -; CM-NEXT: BFE_INT T16.Y, T0.W, 0.0, literal.x, -; CM-NEXT: ADD_INT T1.Z, KC0[2].Y, literal.y, -; CM-NEXT: BFE_INT * T12.W, T1.Y, 0.0, literal.x, -; CM-NEXT: 8(1.121039e-44), 16(2.242078e-44) -; CM-NEXT: LSHR T18.X, PV.Z, literal.x, -; CM-NEXT: BFE_INT T12.Y, T0.Z, 0.0, literal.y, -; CM-NEXT: LSHR T0.Z, T11.X, literal.y, -; CM-NEXT: BFE_INT * T17.W, T0.Y, 0.0, literal.y, -; CM-NEXT: 2(2.802597e-45), 8(1.121039e-44) -; CM-NEXT: LSHR T11.X, KC0[2].Y, literal.x, -; CM-NEXT: BFE_INT * T17.Y, PV.Z, 0.0, literal.y, -; CM-NEXT: 2(2.802597e-45), 8(1.121039e-44) +; CM-NEXT: LSHR T39.X, PV.W, literal.x, +; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.y, +; CM-NEXT: 2(2.802597e-45), 48(6.726233e-44) +; CM-NEXT: LSHR T40.X, PV.W, literal.x, +; CM-NEXT: ASHR * T0.W, T35.W, literal.y, +; CM-NEXT: 2(2.802597e-45), 24(3.363116e-44) +; CM-NEXT: LSHR T41.X, KC0[2].Y, literal.x, +; CM-NEXT: AND_INT T0.Y, T0.Y, literal.y, +; CM-NEXT: LSHL T0.Z, PV.W, literal.z, +; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.z, +; CM-NEXT: 2(2.802597e-45), 65535(9.183409e-41) +; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; CM-NEXT: LSHR T42.X, PV.W, literal.x, +; CM-NEXT: OR_INT * T35.W, PV.Y, PV.Z, +; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; CM-NEXT: MOV * T21.X, PV.W, +; CM-NEXT: MOV T36.X, T16.X, +; CM-NEXT: MOV * T36.Z, T12.X, BS:VEC_120/SCL_212 +; CM-NEXT: MOV T37.X, T8.X, +; CM-NEXT: MOV * T37.Z, T4.X, BS:VEC_120/SCL_212 +; CM-NEXT: MOV T38.X, T32.X, +; CM-NEXT: MOV * T38.Z, T28.X, BS:VEC_120/SCL_212 +; CM-NEXT: MOV T35.X, T24.X, +; CM-NEXT: MOV * T35.Z, T20.X, BS:VEC_120/SCL_212 %load = load <32 x i8>, ptr addrspace(1) %in %ext = sext <32 x i8> %load to <32 x i16> store <32 x i16> %ext, ptr addrspace(1) %out diff --git a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll index 8dcecfe..ddd1ce66 100644 --- a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll +++ b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll @@ -151,19 +151,27 @@ define amdgpu_kernel void @local_load_v3i16(ptr addrspace(3) %out, ptr addrspace ; ; EG-LABEL: local_load_v3i16: ; EG: ; %bb.0: ; %entry -; EG-NEXT: ALU 11, @2, KC0[CB0:0-32], KC1[] -; EG-NEXT: ADD_INT * T0.W, KC0[2].Z, literal.x, -; EG-NEXT: 4(5.605194e-45), 0(0.000000e+00) -; EG-NEXT: LDS_USHORT_READ_RET * OQAP, T0.W -; EG-NEXT: MOV T0.X, OQAP, +; EG-NEXT: ALU 19, @2, KC0[CB0:0-32], KC1[] ; EG-NEXT: MOV * T0.W, KC0[2].Z, ; EG-NEXT: LDS_USHORT_READ_RET * OQAP, T0.W ; EG-NEXT: MOV T0.Y, OQAP, -; EG-NEXT: MOV * T0.W, KC0[2].Y, -; EG-NEXT: LDS_WRITE * T0.W, T0.Y, +; EG-NEXT: ADD_INT * T0.W, KC0[2].Z, literal.x, +; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: LDS_USHORT_READ_RET * OQAP, T0.W +; EG-NEXT: MOV * T0.Z, OQAP, +; EG-NEXT: LSHL T0.Z, PV.Z, literal.x, +; EG-NEXT: AND_INT T0.W, T0.Y, literal.y, +; EG-NEXT: ADD_INT * T1.W, KC0[2].Z, literal.z, +; EG-NEXT: 16(2.242078e-44), 65535(9.183409e-41) +; EG-NEXT: 4(5.605194e-45), 0(0.000000e+00) +; EG-NEXT: LDS_USHORT_READ_RET * OQAP, T1.W +; EG-NEXT: MOV T0.Y, OQAP, +; EG-NEXT: OR_INT T0.W, T0.Z, T0.W, +; EG-NEXT: MOV * T1.W, KC0[2].Y, +; EG-NEXT: LDS_WRITE * T1.W, T0.W, ; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x, ; EG-NEXT: 4(5.605194e-45), 0(0.000000e+00) -; EG-NEXT: LDS_SHORT_WRITE * T0.W, T0.X, +; EG-NEXT: LDS_SHORT_WRITE * T0.W, T0.Y, ; EG-NEXT: RETURN entry: %ld = load <3 x i16>, ptr addrspace(3) %in @@ -1075,12 +1083,12 @@ define amdgpu_kernel void @local_sextload_v4i16_to_v4i32(ptr addrspace(3) %out, ; SI-NEXT: s_mov_b32 m0, -1 ; SI-NEXT: ds_read_b64 v[0:1], v0 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: v_ashrrev_i32_e32 v2, 16, v1 -; SI-NEXT: v_ashrrev_i32_e32 v4, 16, v0 -; SI-NEXT: v_bfe_i32 v1, v1, 0, 16 -; SI-NEXT: v_bfe_i32 v3, v0, 0, 16 +; SI-NEXT: v_ashrrev_i32_e32 v3, 16, v0 +; SI-NEXT: v_ashrrev_i32_e32 v5, 16, v1 +; SI-NEXT: v_bfe_i32 v2, v0, 0, 16 +; SI-NEXT: v_bfe_i32 v4, v1, 0, 16 ; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: ds_write2_b64 v0, v[3:4], v[1:2] offset1:1 +; SI-NEXT: ds_write2_b64 v0, v[2:3], v[4:5] offset1:1 ; SI-NEXT: s_endpgm ; ; VI-NO-DS128-LABEL: local_sextload_v4i16_to_v4i32: @@ -6145,11 +6153,11 @@ define amdgpu_kernel void @local_sextload_v8i16_to_v8i64(ptr addrspace(3) %out, ; SI-NEXT: v_bfe_i32 v8, v1, 0, 16 ; SI-NEXT: v_bfe_i32 v2, v2, 0, 16 ; SI-NEXT: v_bfe_i32 v10, v9, 0, 16 -; SI-NEXT: v_bfe_i32 v12, v12, 0, 16 -; SI-NEXT: v_bfe_i32 v14, v11, 0, 16 ; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; SI-NEXT: v_bfe_i32 v12, v12, 0, 16 ; SI-NEXT: v_ashrrev_i32_e32 v9, 31, v8 ; SI-NEXT: v_ashrrev_i32_e32 v3, 31, v2 +; SI-NEXT: v_bfe_i32 v14, v11, 0, 16 ; SI-NEXT: v_ashrrev_i32_e32 v11, 31, v10 ; SI-NEXT: v_ashrrev_i32_e32 v13, 31, v12 ; SI-NEXT: v_ashrrev_i32_e32 v15, 31, v14 @@ -6811,10 +6819,10 @@ define amdgpu_kernel void @local_sextload_v16i16_to_v16i64(ptr addrspace(3) %out ; SI-NEXT: v_mov_b32_e32 v18, s0 ; SI-NEXT: s_waitcnt lgkmcnt(1) ; SI-NEXT: v_mov_b32_e32 v12, v3 -; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v14, v7 ; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v2 ; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v14, v7 ; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v6 ; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v4 ; SI-NEXT: v_ashrrev_i32_e32 v9, 31, v5 @@ -6837,24 +6845,24 @@ define amdgpu_kernel void @local_sextload_v16i16_to_v16i64(ptr addrspace(3) %out ; SI-NEXT: v_bfe_i32 v1, v4, 0, 16 ; SI-NEXT: v_bfe_i32 v3, v5, 0, 16 ; SI-NEXT: v_bfe_i32 v5, v6, 0, 16 -; SI-NEXT: v_bfe_i32 v7, v0, 0, 16 -; SI-NEXT: v_bfe_i32 v10, v2, 0, 16 +; SI-NEXT: v_bfe_i32 v10, v0, 0, 16 +; SI-NEXT: v_bfe_i32 v7, v2, 0, 16 ; SI-NEXT: v_bfe_i32 v12, v19, 0, 16 +; SI-NEXT: v_ashrrev_i32_e32 v2, 31, v1 +; SI-NEXT: v_ashrrev_i32_e32 v4, 31, v3 ; SI-NEXT: v_bfe_i32 v14, v17, 0, 16 +; SI-NEXT: v_ashrrev_i32_e32 v6, 31, v5 ; SI-NEXT: v_bfe_i32 v16, v16, 0, 16 -; SI-NEXT: v_ashrrev_i32_e32 v4, 31, v3 +; SI-NEXT: v_ashrrev_i32_e32 v11, 31, v10 ; SI-NEXT: ds_write2_b64 v18, v[3:4], v[8:9] offset0:2 offset1:3 ; SI-NEXT: v_bfe_i32 v3, v15, 0, 16 -; SI-NEXT: v_ashrrev_i32_e32 v2, 31, v1 -; SI-NEXT: v_ashrrev_i32_e32 v6, 31, v5 ; SI-NEXT: v_ashrrev_i32_e32 v8, 31, v7 -; SI-NEXT: v_ashrrev_i32_e32 v11, 31, v10 ; SI-NEXT: v_ashrrev_i32_e32 v13, 31, v12 ; SI-NEXT: v_ashrrev_i32_e32 v15, 31, v14 ; SI-NEXT: v_ashrrev_i32_e32 v17, 31, v16 ; SI-NEXT: v_ashrrev_i32_e32 v4, 31, v3 -; SI-NEXT: ds_write2_b64 v18, v[10:11], v[3:4] offset0:12 offset1:13 -; SI-NEXT: ds_write2_b64 v18, v[7:8], v[16:17] offset0:8 offset1:9 +; SI-NEXT: ds_write2_b64 v18, v[7:8], v[3:4] offset0:12 offset1:13 +; SI-NEXT: ds_write2_b64 v18, v[10:11], v[16:17] offset0:8 offset1:9 ; SI-NEXT: ds_write2_b64 v18, v[5:6], v[14:15] offset0:4 offset1:5 ; SI-NEXT: ds_write2_b64 v18, v[1:2], v[12:13] offset1:1 ; SI-NEXT: s_endpgm @@ -8106,16 +8114,16 @@ define amdgpu_kernel void @local_sextload_v32i16_to_v32i64(ptr addrspace(3) %out ; SI-NEXT: v_ashrrev_i32_e32 v13, 31, v12 ; SI-NEXT: ds_write2_b64 v7, v[10:11], v[12:13] offset0:4 offset1:5 ; SI-NEXT: v_bfe_i32 v11, v6, 0, 16 +; SI-NEXT: v_ashrrev_i32_e32 v2, 31, v1 ; SI-NEXT: v_bfe_i32 v13, v4, 0, 16 +; SI-NEXT: v_ashrrev_i32_e32 v4, 31, v3 ; SI-NEXT: v_bfe_i32 v15, v15, 0, 16 +; SI-NEXT: v_ashrrev_i32_e32 v6, 31, v5 ; SI-NEXT: v_bfe_i32 v16, v14, 0, 16 ; SI-NEXT: v_ashrrev_i32_e32 v10, 31, v9 ; SI-NEXT: v_ashrrev_i32_e32 v17, 31, v16 ; SI-NEXT: ds_write2_b64 v7, v[9:10], v[16:17] offset1:1 ; SI-NEXT: v_bfe_i32 v17, v18, 0, 16 -; SI-NEXT: v_ashrrev_i32_e32 v2, 31, v1 -; SI-NEXT: v_ashrrev_i32_e32 v4, 31, v3 -; SI-NEXT: v_ashrrev_i32_e32 v6, 31, v5 ; SI-NEXT: v_ashrrev_i32_e32 v9, 31, v8 ; SI-NEXT: v_ashrrev_i32_e32 v12, 31, v11 ; SI-NEXT: v_ashrrev_i32_e32 v14, 31, v13 diff --git a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll index a3ebaec..5f0ca7b 100644 --- a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll +++ b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll @@ -74,7 +74,8 @@ define amdgpu_kernel void @local_stack_offset_uses_sp(ptr addrspace(1) %out) { ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: s_cbranch_scc1 .LBB0_1 ; FLATSCR-NEXT: ; %bb.2: ; %split -; FLATSCR-NEXT: s_movk_i32 s0, 0x5000 +; FLATSCR-NEXT: s_movk_i32 s0, 0x2000 +; FLATSCR-NEXT: s_addk_i32 s0, 0x3000 ; FLATSCR-NEXT: scratch_load_dwordx2 v[0:1], off, s0 offset:208 glc ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: s_movk_i32 s0, 0x3000 @@ -175,7 +176,9 @@ define void @func_local_stack_offset_uses_sp(ptr addrspace(1) %out) { ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: s_cbranch_scc1 .LBB1_1 ; FLATSCR-NEXT: ; %bb.2: ; %split -; FLATSCR-NEXT: s_add_i32 s0, s33, 0x5000 +; FLATSCR-NEXT: s_movk_i32 s0, 0x2000 +; FLATSCR-NEXT: s_add_i32 s1, s33, s0 +; FLATSCR-NEXT: s_add_i32 s0, s1, 0x3000 ; FLATSCR-NEXT: scratch_load_dwordx2 v[2:3], off, s0 offset:208 glc ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: s_add_i32 s0, s33, 0x3000 @@ -223,30 +226,35 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(ptr addrspace(1) %out ; MUBUF-NEXT: s_waitcnt vmcnt(0) ; MUBUF-NEXT: s_cbranch_scc1 .LBB2_1 ; MUBUF-NEXT: ; %bb.2: ; %split +; MUBUF-NEXT: s_movk_i32 s5, 0x12d4 ; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000 -; MUBUF-NEXT: v_or_b32_e32 v0, 0x12d4, v1 +; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1 +; MUBUF-NEXT: s_movk_i32 s5, 0x12d0 ; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000 ; MUBUF-NEXT: s_movk_i32 s4, 0x4000 ; MUBUF-NEXT: buffer_load_dword v5, v0, s[0:3], 0 offen glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) -; MUBUF-NEXT: v_or_b32_e32 v0, 0x12d0, v1 +; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1 +; MUBUF-NEXT: s_movk_i32 s5, 0x12c4 ; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000 ; MUBUF-NEXT: s_or_b32 s4, s4, 0x12c0 ; MUBUF-NEXT: buffer_load_dword v4, v0, s[0:3], 0 offen glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) -; MUBUF-NEXT: v_or_b32_e32 v0, 0x12c4, v1 -; MUBUF-NEXT: v_mov_b32_e32 v3, 0x4000 +; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1 ; MUBUF-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) ; MUBUF-NEXT: v_mov_b32_e32 v0, s4 -; MUBUF-NEXT: v_or_b32_e32 v2, 0x12cc, v3 +; MUBUF-NEXT: s_movk_i32 s4, 0x12cc +; MUBUF-NEXT: v_mov_b32_e32 v3, 0x4000 +; MUBUF-NEXT: v_or_b32_e32 v2, s4, v3 +; MUBUF-NEXT: s_movk_i32 s4, 0x12c8 ; MUBUF-NEXT: v_mov_b32_e32 v6, 0x4000 ; MUBUF-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) ; MUBUF-NEXT: v_mov_b32_e32 v7, 0x4000 ; MUBUF-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) -; MUBUF-NEXT: v_or_b32_e32 v2, 0x12c8, v6 +; MUBUF-NEXT: v_or_b32_e32 v2, s4, v6 ; MUBUF-NEXT: v_mov_b32_e32 v8, 0x4000 ; MUBUF-NEXT: v_mov_b32_e32 v9, 0x4000 ; MUBUF-NEXT: buffer_load_dword v2, v2, s[0:3], 0 offen glc @@ -298,7 +306,8 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(ptr addrspace(1) %out ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: s_cbranch_scc1 .LBB2_1 ; FLATSCR-NEXT: ; %bb.2: ; %split -; FLATSCR-NEXT: s_movk_i32 s0, 0x3000 +; FLATSCR-NEXT: s_movk_i32 s0, 0x1000 +; FLATSCR-NEXT: s_addk_i32 s0, 0x2000 ; FLATSCR-NEXT: scratch_load_dwordx2 v[8:9], off, s0 offset:720 glc ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 offset:704 glc diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-hi.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-hi.ll index 88c619e..1ae3434 100644 --- a/llvm/test/CodeGen/AMDGPU/mad-mix-hi.ll +++ b/llvm/test/CodeGen/AMDGPU/mad-mix-hi.ll @@ -372,9 +372,8 @@ define <2 x half> @v_mad_mixhi_f16_f16lo_f16lo_f16lo_undeflo_clamp_precvt(half % ; SDAG-GFX11-TRUE16: ; %bb.0: ; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SDAG-GFX11-TRUE16-NEXT: v_fma_mix_f32 v0, v0, v1, v2 op_sel_hi:[1,1,1] clamp -; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; SDAG-GFX11-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0 -; SDAG-GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; SDAG-GFX11-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.h, v0 ; SDAG-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; SDAG-GFX11-FAKE16-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_undeflo_clamp_precvt: diff --git a/llvm/test/CodeGen/AMDGPU/min.ll b/llvm/test/CodeGen/AMDGPU/min.ll index 721f974..311527d 100644 --- a/llvm/test/CodeGen/AMDGPU/min.ll +++ b/llvm/test/CodeGen/AMDGPU/min.ll @@ -991,30 +991,81 @@ define amdgpu_kernel void @s_test_imin_sle_v2i16(ptr addrspace(1) %out, <2 x i16 define amdgpu_kernel void @s_test_imin_sle_v4i16(ptr addrspace(1) %out, <4 x i16> %a, <4 x i16> %b) #0 { ; EG-LABEL: s_test_imin_sle_v4i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @14, KC0[], KC1[] -; EG-NEXT: TEX 3 @6 -; EG-NEXT: ALU 9, @15, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1 +; EG-NEXT: ALU 1, @28, KC0[], KC1[] +; EG-NEXT: TEX 1 @12 +; EG-NEXT: ALU 9, @30, KC0[], KC1[] +; EG-NEXT: TEX 1 @16 +; EG-NEXT: ALU 10, @40, KC0[], KC1[] +; EG-NEXT: TEX 1 @20 +; EG-NEXT: ALU 10, @51, KC0[], KC1[] +; EG-NEXT: TEX 1 @24 +; EG-NEXT: ALU 11, @62, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T6.XY, T5.X, 1 ; EG-NEXT: CF_END ; EG-NEXT: PAD -; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_16 T1.X, T0.X, 46, #3 -; EG-NEXT: VTX_READ_16 T2.X, T0.X, 52, #3 -; EG-NEXT: VTX_READ_16 T3.X, T0.X, 44, #3 -; EG-NEXT: VTX_READ_16 T0.X, T0.X, 54, #3 -; EG-NEXT: ALU clause starting at 14: -; EG-NEXT: MOV * T0.X, 0.0, -; EG-NEXT: ALU clause starting at 15: -; EG-NEXT: BFE_INT T0.Z, T1.X, 0.0, literal.x, -; EG-NEXT: BFE_INT * T0.W, T0.X, 0.0, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: Fetch clause starting at 12: +; EG-NEXT: VTX_READ_16 T6.X, T5.X, 50, #3 +; EG-NEXT: VTX_READ_16 T7.X, T5.X, 58, #3 +; EG-NEXT: Fetch clause starting at 16: +; EG-NEXT: VTX_READ_16 T6.X, T5.X, 48, #3 +; EG-NEXT: VTX_READ_16 T7.X, T5.X, 56, #3 +; EG-NEXT: Fetch clause starting at 20: +; EG-NEXT: VTX_READ_16 T6.X, T5.X, 46, #3 +; EG-NEXT: VTX_READ_16 T7.X, T5.X, 54, #3 +; EG-NEXT: Fetch clause starting at 24: +; EG-NEXT: VTX_READ_16 T6.X, T5.X, 44, #3 +; EG-NEXT: VTX_READ_16 T5.X, T5.X, 52, #3 +; EG-NEXT: ALU clause starting at 28: +; EG-NEXT: MOV * T0.Y, T3.X, +; EG-NEXT: MOV * T5.X, 0.0, +; EG-NEXT: ALU clause starting at 30: +; EG-NEXT: BFE_INT T0.Z, T6.X, 0.0, literal.x, +; EG-NEXT: BFE_INT * T0.W, T7.X, 0.0, literal.x, BS:VEC_120/SCL_212 ; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; EG-NEXT: MIN_INT T0.Y, PV.Z, PV.W, -; EG-NEXT: BFE_INT T0.Z, T3.X, 0.0, literal.x, -; EG-NEXT: BFE_INT * T0.W, T2.X, 0.0, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: MIN_INT * T0.W, PV.Z, PV.W, +; EG-NEXT: LSHL T0.W, PV.W, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 16(2.242078e-44), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T3.X, PV.W, +; EG-NEXT: MOV * T0.Y, PV.X, +; EG-NEXT: ALU clause starting at 40: +; EG-NEXT: BFE_INT T0.Z, T6.X, 0.0, literal.x, +; EG-NEXT: BFE_INT * T0.W, T7.X, 0.0, literal.x, BS:VEC_120/SCL_212 ; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; EG-NEXT: MIN_INT T0.X, PV.Z, PV.W, -; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x, -; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MIN_INT T0.W, PV.Z, PV.W, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T3.X, PV.W, +; EG-NEXT: MOV * T0.Y, T2.X, +; EG-NEXT: ALU clause starting at 51: +; EG-NEXT: BFE_INT T0.Z, T6.X, 0.0, literal.x, +; EG-NEXT: BFE_INT * T0.W, T7.X, 0.0, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: MIN_INT T0.W, PV.Z, PV.W, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T2.X, PV.W, +; EG-NEXT: MOV * T0.Y, PV.X, +; EG-NEXT: ALU clause starting at 62: +; EG-NEXT: BFE_INT T0.Z, T6.X, 0.0, literal.x, +; EG-NEXT: BFE_INT * T0.W, T5.X, 0.0, literal.x, BS:VEC_120/SCL_212 +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: MIN_INT * T0.W, PV.Z, PV.W, +; EG-NEXT: LSHR T5.X, KC0[2].Y, literal.x, +; EG-NEXT: AND_INT T1.W, T0.Y, literal.y, +; EG-NEXT: AND_INT * T0.W, PV.W, literal.z, +; EG-NEXT: 2(2.802597e-45), -65536(nan) +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T6.X, PV.W, PS, +; EG-NEXT: MOV T2.X, PV.X, +; EG-NEXT: MOV * T6.Y, T3.X, ; ; CI-LABEL: s_test_imin_sle_v4i16: ; CI: ; %bb.0: @@ -2154,40 +2205,49 @@ define amdgpu_kernel void @v_test_umin_ule_v3i32(ptr addrspace(1) %out, ptr addr define amdgpu_kernel void @v_test_umin_ule_v3i16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { ; EG-LABEL: v_test_umin_ule_v3i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 3, @14, KC0[CB0:0-32], KC1[] -; EG-NEXT: TEX 3 @6 -; EG-NEXT: ALU 17, @18, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T2.X, 0 -; EG-NEXT: MEM_RAT MSKOR T4.XW, T0.X +; EG-NEXT: ALU 3, @20, KC0[CB0:0-32], KC1[] +; EG-NEXT: TEX 1 @8 +; EG-NEXT: ALU 11, @24, KC0[CB0:0-32], KC1[] +; EG-NEXT: TEX 3 @12 +; EG-NEXT: ALU 8, @36, KC0[], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T6.X, T8.X, 0 +; EG-NEXT: MEM_RAT MSKOR T7.XW, T0.X ; EG-NEXT: CF_END -; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_16 T2.X, T1.X, 0, #1 -; EG-NEXT: VTX_READ_16 T3.X, T0.X, 0, #1 -; EG-NEXT: VTX_READ_16 T1.X, T1.X, 4, #1 -; EG-NEXT: VTX_READ_16 T0.X, T0.X, 4, #1 -; EG-NEXT: ALU clause starting at 14: +; EG-NEXT: Fetch clause starting at 8: +; EG-NEXT: VTX_READ_16 T7.X, T6.X, 4, #1 +; EG-NEXT: VTX_READ_16 T8.X, T0.X, 4, #1 +; EG-NEXT: Fetch clause starting at 12: +; EG-NEXT: VTX_READ_16 T8.X, T6.X, 0, #1 +; EG-NEXT: VTX_READ_16 T9.X, T0.X, 0, #1 +; EG-NEXT: VTX_READ_16 T6.X, T6.X, 2, #1 +; EG-NEXT: VTX_READ_16 T0.X, T0.X, 2, #1 +; EG-NEXT: ALU clause starting at 20: ; EG-NEXT: LSHL * T0.W, T0.X, literal.x, ; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00) ; EG-NEXT: ADD_INT T0.X, KC0[2].Z, PV.W, -; EG-NEXT: ADD_INT * T1.X, KC0[2].W, PV.W, -; EG-NEXT: ALU clause starting at 18: +; EG-NEXT: ADD_INT * T6.X, KC0[2].W, PV.W, +; EG-NEXT: ALU clause starting at 24: ; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, T0.W, ; EG-NEXT: ADD_INT * T1.W, PV.W, literal.x, ; EG-NEXT: 4(5.605194e-45), 0(0.000000e+00) ; EG-NEXT: AND_INT * T2.W, PV.W, literal.x, ; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00) ; EG-NEXT: LSHL T2.W, PV.W, literal.x, -; EG-NEXT: MIN_UINT * T3.W, T0.X, T1.X, +; EG-NEXT: MIN_UINT * T3.W, T8.X, T7.X, ; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00) -; EG-NEXT: LSHL T4.X, PS, PV.W, -; EG-NEXT: LSHL * T4.W, literal.x, PV.W, +; EG-NEXT: LSHL T7.X, PS, PV.W, +; EG-NEXT: LSHL * T7.W, literal.x, PV.W, ; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; EG-NEXT: MOV T4.Y, 0.0, -; EG-NEXT: MOV * T4.Z, 0.0, +; EG-NEXT: MOV * T7.Y, 0.0, +; EG-NEXT: ALU clause starting at 36: +; EG-NEXT: MOV T7.Z, 0.0, +; EG-NEXT: MIN_UINT * T2.W, T0.X, T6.X, ; EG-NEXT: LSHR T0.X, T1.W, literal.x, -; EG-NEXT: MIN_UINT * T1.X, T3.X, T2.X, -; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) -; EG-NEXT: LSHR * T2.X, T0.W, literal.x, +; EG-NEXT: LSHL T1.W, PV.W, literal.y, +; EG-NEXT: MIN_UINT * T2.W, T9.X, T8.X, +; EG-NEXT: 2(2.802597e-45), 16(2.242078e-44) +; EG-NEXT: OR_INT T6.X, PV.W, PS, +; EG-NEXT: LSHR * T8.X, T0.W, literal.x, ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) ; ; CI-LABEL: v_test_umin_ule_v3i16: @@ -3483,46 +3543,142 @@ define amdgpu_kernel void @s_test_umin_ult_v8i32(ptr addrspace(1) %out, <8 x i32 define amdgpu_kernel void @s_test_umin_ult_v8i16(ptr addrspace(1) %out, <8 x i16> %a, <8 x i16> %b) #0 { ; EG-LABEL: s_test_umin_ult_v8i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @24, KC0[], KC1[] -; EG-NEXT: TEX 2 @8 -; EG-NEXT: ALU 2, @25, KC0[], KC1[] -; EG-NEXT: TEX 4 @14 -; EG-NEXT: ALU 14, @28, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1 +; EG-NEXT: ALU 1, @52, KC0[], KC1[] +; EG-NEXT: TEX 1 @20 +; EG-NEXT: ALU 9, @54, KC0[], KC1[] +; EG-NEXT: TEX 1 @24 +; EG-NEXT: ALU 8, @64, KC0[], KC1[] +; EG-NEXT: TEX 1 @28 +; EG-NEXT: ALU 10, @73, KC0[], KC1[] +; EG-NEXT: TEX 1 @32 +; EG-NEXT: ALU 8, @84, KC0[], KC1[] +; EG-NEXT: TEX 1 @36 +; EG-NEXT: ALU 10, @93, KC0[], KC1[] +; EG-NEXT: TEX 1 @40 +; EG-NEXT: ALU 8, @104, KC0[], KC1[] +; EG-NEXT: TEX 1 @44 +; EG-NEXT: ALU 10, @113, KC0[], KC1[] +; EG-NEXT: TEX 1 @48 +; EG-NEXT: ALU 10, @124, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T7.XYZW, T8.X, 1 ; EG-NEXT: CF_END ; EG-NEXT: PAD -; EG-NEXT: Fetch clause starting at 8: -; EG-NEXT: VTX_READ_16 T1.X, T0.X, 62, #3 -; EG-NEXT: VTX_READ_16 T2.X, T0.X, 60, #3 -; EG-NEXT: VTX_READ_16 T3.X, T0.X, 78, #3 -; EG-NEXT: Fetch clause starting at 14: -; EG-NEXT: VTX_READ_16 T1.X, T0.X, 68, #3 -; EG-NEXT: VTX_READ_16 T3.X, T0.X, 52, #3 -; EG-NEXT: VTX_READ_16 T4.X, T0.X, 70, #3 -; EG-NEXT: VTX_READ_16 T5.X, T0.X, 54, #3 -; EG-NEXT: VTX_READ_16 T0.X, T0.X, 76, #3 -; EG-NEXT: ALU clause starting at 24: -; EG-NEXT: MOV * T0.X, 0.0, -; EG-NEXT: ALU clause starting at 25: -; EG-NEXT: AND_INT T0.W, T1.X, literal.x, -; EG-NEXT: AND_INT * T1.W, T3.X, literal.x, +; EG-NEXT: Fetch clause starting at 20: +; EG-NEXT: VTX_READ_16 T8.X, T7.X, 66, #3 +; EG-NEXT: VTX_READ_16 T9.X, T7.X, 82, #3 +; EG-NEXT: Fetch clause starting at 24: +; EG-NEXT: VTX_READ_16 T8.X, T7.X, 64, #3 +; EG-NEXT: VTX_READ_16 T9.X, T7.X, 80, #3 +; EG-NEXT: Fetch clause starting at 28: +; EG-NEXT: VTX_READ_16 T8.X, T7.X, 62, #3 +; EG-NEXT: VTX_READ_16 T9.X, T7.X, 78, #3 +; EG-NEXT: Fetch clause starting at 32: +; EG-NEXT: VTX_READ_16 T8.X, T7.X, 60, #3 +; EG-NEXT: VTX_READ_16 T9.X, T7.X, 76, #3 +; EG-NEXT: Fetch clause starting at 36: +; EG-NEXT: VTX_READ_16 T8.X, T7.X, 58, #3 +; EG-NEXT: VTX_READ_16 T9.X, T7.X, 74, #3 +; EG-NEXT: Fetch clause starting at 40: +; EG-NEXT: VTX_READ_16 T8.X, T7.X, 56, #3 +; EG-NEXT: VTX_READ_16 T9.X, T7.X, 72, #3 +; EG-NEXT: Fetch clause starting at 44: +; EG-NEXT: VTX_READ_16 T8.X, T7.X, 54, #3 +; EG-NEXT: VTX_READ_16 T9.X, T7.X, 70, #3 +; EG-NEXT: Fetch clause starting at 48: +; EG-NEXT: VTX_READ_16 T8.X, T7.X, 52, #3 +; EG-NEXT: VTX_READ_16 T7.X, T7.X, 68, #3 +; EG-NEXT: ALU clause starting at 52: +; EG-NEXT: MOV * T0.Y, T3.X, +; EG-NEXT: MOV * T7.X, 0.0, +; EG-NEXT: ALU clause starting at 54: +; EG-NEXT: AND_INT T0.W, T8.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T9.X, literal.x, ; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; EG-NEXT: ALU clause starting at 28: -; EG-NEXT: AND_INT T0.Z, T2.X, literal.x, -; EG-NEXT: AND_INT T2.W, T0.X, literal.x, BS:VEC_120/SCL_212 -; EG-NEXT: MIN_UINT * T0.W, T0.W, T1.W, +; EG-NEXT: MIN_UINT * T0.W, PV.W, PS, +; EG-NEXT: LSHL T0.W, PV.W, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 16(2.242078e-44), 65535(9.183409e-41) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T3.X, PV.W, +; EG-NEXT: MOV * T0.Y, PV.X, +; EG-NEXT: ALU clause starting at 64: +; EG-NEXT: AND_INT T0.W, T8.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T9.X, literal.x, ; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; EG-NEXT: MIN_UINT T0.Z, PV.Z, PV.W, -; EG-NEXT: AND_INT T1.W, T5.X, literal.x, -; EG-NEXT: AND_INT * T2.W, T4.X, literal.x, +; EG-NEXT: AND_INT T2.W, T0.Y, literal.x, +; EG-NEXT: MIN_UINT * T0.W, PV.W, PS, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T3.X, PV.W, +; EG-NEXT: MOV * T0.Y, T2.X, +; EG-NEXT: ALU clause starting at 73: +; EG-NEXT: AND_INT T0.W, T8.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T9.X, literal.x, ; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; EG-NEXT: MIN_UINT T0.Y, PV.W, PS, -; EG-NEXT: AND_INT T1.W, T3.X, literal.x, -; EG-NEXT: AND_INT * T2.W, T1.X, literal.x, +; EG-NEXT: MIN_UINT T0.W, PV.W, PS, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.x, ; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; EG-NEXT: MIN_UINT T0.X, PV.W, PS, -; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x, -; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T2.X, PV.W, +; EG-NEXT: MOV * T0.Y, PV.X, +; EG-NEXT: ALU clause starting at 84: +; EG-NEXT: AND_INT T0.W, T8.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T9.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, T0.Y, literal.x, +; EG-NEXT: MIN_UINT * T0.W, PV.W, PS, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T7.Z, PV.W, PS, +; EG-NEXT: MOV T2.X, PV.Z, +; EG-NEXT: MOV * T0.Y, T5.X, +; EG-NEXT: ALU clause starting at 93: +; EG-NEXT: AND_INT T0.W, T8.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T9.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: MIN_UINT T0.W, PV.W, PS, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T5.X, PV.W, +; EG-NEXT: MOV * T0.Y, PV.X, +; EG-NEXT: ALU clause starting at 104: +; EG-NEXT: AND_INT T0.W, T8.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T9.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: AND_INT T2.W, T0.Y, literal.x, +; EG-NEXT: MIN_UINT * T0.W, PV.W, PS, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, PV.W, PS, +; EG-NEXT: MOV T5.X, PV.W, +; EG-NEXT: MOV * T0.Y, T4.X, +; EG-NEXT: ALU clause starting at 113: +; EG-NEXT: AND_INT T0.W, T8.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T9.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: MIN_UINT T0.W, PV.W, PS, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T4.X, PV.W, +; EG-NEXT: MOV * T0.Y, PV.X, +; EG-NEXT: ALU clause starting at 124: +; EG-NEXT: AND_INT T0.W, T8.X, literal.x, +; EG-NEXT: AND_INT * T1.W, T7.X, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: LSHR T8.X, KC0[2].Y, literal.x, +; EG-NEXT: AND_INT T2.W, T0.Y, literal.y, +; EG-NEXT: MIN_UINT * T0.W, PV.W, PS, +; EG-NEXT: 2(2.802597e-45), -65536(nan) +; EG-NEXT: OR_INT * T7.X, PV.W, PS, +; EG-NEXT: MOV T4.X, PV.X, +; EG-NEXT: MOV * T7.W, T3.X, +; EG-NEXT: MOV * T7.Y, T5.X, ; ; CI-LABEL: s_test_umin_ult_v8i16: ; CI: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll new file mode 100644 index 0000000..6d0aa1e --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll @@ -0,0 +1,108 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck %s + +define protected amdgpu_kernel void @no_folding_imm_to_inst_with_fi(<4 x i64> %val4, <16 x i64> %val16) { +; CHECK-LABEL: no_folding_imm_to_inst_with_fi: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_clause 0x2 +; CHECK-NEXT: s_load_b256 s[36:43], s[4:5], 0x24 +; CHECK-NEXT: s_load_b512 s[16:31], s[4:5], 0xe4 +; CHECK-NEXT: s_load_b512 s[0:15], s[4:5], 0xa4 +; CHECK-NEXT: s_mov_b64 s[34:35], src_private_base +; CHECK-NEXT: s_movk_i32 s33, 0x70 +; CHECK-NEXT: s_movk_i32 s34, 0x60 +; CHECK-NEXT: s_or_b32 s44, 0x80, s33 +; CHECK-NEXT: s_mov_b32 s45, s35 +; CHECK-NEXT: s_or_b32 s46, 0x80, s34 +; CHECK-NEXT: s_mov_b32 s47, s35 +; CHECK-NEXT: v_dual_mov_b32 v20, s44 :: v_dual_mov_b32 v21, s45 +; CHECK-NEXT: v_dual_mov_b32 v22, s46 :: v_dual_mov_b32 v23, s47 +; CHECK-NEXT: s_movk_i32 s34, 0x80 +; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; CHECK-NEXT: v_dual_mov_b32 v34, s34 :: v_dual_mov_b32 v35, s35 +; CHECK-NEXT: s_wait_kmcnt 0x0 +; CHECK-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v1, s41 +; CHECK-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43 +; CHECK-NEXT: v_dual_mov_b32 v4, s36 :: v_dual_mov_b32 v5, s37 +; CHECK-NEXT: v_dual_mov_b32 v6, s38 :: v_dual_mov_b32 v7, s39 +; CHECK-NEXT: scratch_store_b128 off, v[0:3], off offset:16 scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: v_dual_mov_b32 v0, s20 :: v_dual_mov_b32 v1, s21 +; CHECK-NEXT: s_movk_i32 s20, 0x50 +; CHECK-NEXT: v_dual_mov_b32 v8, s28 :: v_dual_mov_b32 v9, s29 +; CHECK-NEXT: v_dual_mov_b32 v10, s30 :: v_dual_mov_b32 v11, s31 +; CHECK-NEXT: s_wait_alu 0xfffe +; CHECK-NEXT: s_or_b32 s20, 0x80, s20 +; CHECK-NEXT: s_mov_b32 s21, s35 +; CHECK-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25 +; CHECK-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27 +; CHECK-NEXT: v_dual_mov_b32 v2, s22 :: v_dual_mov_b32 v3, s23 +; CHECK-NEXT: s_wait_alu 0xfffe +; CHECK-NEXT: v_dual_mov_b32 v25, s21 :: v_dual_mov_b32 v24, s20 +; CHECK-NEXT: scratch_store_b128 off, v[4:7], off scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[20:21], v[8:11] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[22:23], v[12:15] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[24:25], v[0:3] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v1, s17 +; CHECK-NEXT: s_or_b32 s16, 0x80, 64 +; CHECK-NEXT: s_mov_b32 s17, s35 +; CHECK-NEXT: v_dual_mov_b32 v4, s12 :: v_dual_mov_b32 v5, s13 +; CHECK-NEXT: s_or_b32 s12, 0x80, 48 +; CHECK-NEXT: s_mov_b32 s13, s35 +; CHECK-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9 +; CHECK-NEXT: s_or_b32 s8, 0x80, 32 +; CHECK-NEXT: s_mov_b32 s9, s35 +; CHECK-NEXT: v_dual_mov_b32 v12, s4 :: v_dual_mov_b32 v13, s5 +; CHECK-NEXT: s_or_b32 s4, 0x80, 16 +; CHECK-NEXT: s_mov_b32 s5, s35 +; CHECK-NEXT: v_dual_mov_b32 v2, s18 :: v_dual_mov_b32 v3, s19 +; CHECK-NEXT: s_wait_alu 0xfffe +; CHECK-NEXT: v_dual_mov_b32 v27, s17 :: v_dual_mov_b32 v26, s16 +; CHECK-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s15 +; CHECK-NEXT: v_dual_mov_b32 v29, s13 :: v_dual_mov_b32 v28, s12 +; CHECK-NEXT: v_dual_mov_b32 v31, s9 :: v_dual_mov_b32 v30, s8 +; CHECK-NEXT: v_dual_mov_b32 v33, s5 :: v_dual_mov_b32 v32, s4 +; CHECK-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11 +; CHECK-NEXT: v_dual_mov_b32 v14, s6 :: v_dual_mov_b32 v15, s7 +; CHECK-NEXT: v_dual_mov_b32 v16, s0 :: v_dual_mov_b32 v17, s1 +; CHECK-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v19, s3 +; CHECK-NEXT: flat_store_b128 v[26:27], v[0:3] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[28:29], v[4:7] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[30:31], v[8:11] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[32:33], v[12:15] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[34:35], v[16:19] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[22:23] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[26:27] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[24:25] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[30:31] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[28:29] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[34:35] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[32:33] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt 0x0 +; CHECK-NEXT: s_endpgm +bb: + %alloca = alloca <4 x i64>, align 32, addrspace(5) + %alloca1 = alloca <16 x i64>, align 128, addrspace(5) + store volatile <4 x i64> %val4, ptr addrspace(5) %alloca + %ascast = addrspacecast ptr addrspace(5) %alloca1 to ptr + store volatile <16 x i64> %val16, ptr %ascast + %load = load volatile <16 x i64>, ptr %ascast + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll index 5b0d2d2..42401af 100644 --- a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll +++ b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll @@ -3238,11 +3238,8 @@ define amdgpu_kernel void @fadd_fadd_fsub_0(<2 x float> %arg) { ; GFX90A-GISEL-LABEL: fadd_fadd_fsub_0: ; GFX90A-GISEL: ; %bb.0: ; %bb ; GFX90A-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; GFX90A-GISEL-NEXT: s_mov_b32 s2, 0 -; GFX90A-GISEL-NEXT: s_mov_b32 s3, s2 -; GFX90A-GISEL-NEXT: v_pk_mov_b32 v[0:1], s[2:3], s[2:3] op_sel:[0,1] ; GFX90A-GISEL-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-GISEL-NEXT: v_pk_add_f32 v[0:1], s[0:1], v[0:1] +; GFX90A-GISEL-NEXT: v_pk_add_f32 v[0:1], s[0:1], 0 ; GFX90A-GISEL-NEXT: v_mov_b32_e32 v0, v1 ; GFX90A-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], 0 ; GFX90A-GISEL-NEXT: v_mov_b32_e32 v2, s0 @@ -3253,11 +3250,8 @@ define amdgpu_kernel void @fadd_fadd_fsub_0(<2 x float> %arg) { ; GFX942-GISEL-LABEL: fadd_fadd_fsub_0: ; GFX942-GISEL: ; %bb.0: ; %bb ; GFX942-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; GFX942-GISEL-NEXT: s_mov_b32 s2, 0 -; GFX942-GISEL-NEXT: s_mov_b32 s3, s2 -; GFX942-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX942-GISEL-NEXT: s_waitcnt lgkmcnt(0) -; GFX942-GISEL-NEXT: v_pk_add_f32 v[0:1], s[0:1], v[0:1] +; GFX942-GISEL-NEXT: v_pk_add_f32 v[0:1], s[0:1], 0 ; GFX942-GISEL-NEXT: s_nop 0 ; GFX942-GISEL-NEXT: v_mov_b32_e32 v0, v1 ; GFX942-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], 0 diff --git a/llvm/test/CodeGen/AMDGPU/rem_i128.ll b/llvm/test/CodeGen/AMDGPU/rem_i128.ll index 8fe68ba..f0c8fed 100644 --- a/llvm/test/CodeGen/AMDGPU/rem_i128.ll +++ b/llvm/test/CodeGen/AMDGPU/rem_i128.ll @@ -533,8 +533,9 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: v_and_b32_e64 v6, 1, v6 ; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[8:9], v6, 1 ; GFX9-O0-NEXT: s_or_b64 s[8:9], s[4:5], s[8:9] -; GFX9-O0-NEXT: s_mov_b64 s[4:5], -1 -; GFX9-O0-NEXT: s_xor_b64 s[4:5], s[8:9], s[4:5] +; GFX9-O0-NEXT: s_mov_b64 s[14:15], -1 +; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[8:9] +; GFX9-O0-NEXT: s_xor_b64 s[4:5], s[4:5], s[14:15] ; GFX9-O0-NEXT: v_mov_b32_e32 v6, v5 ; GFX9-O0-NEXT: s_mov_b32 s14, s13 ; GFX9-O0-NEXT: v_xor_b32_e64 v6, v6, s14 @@ -1930,19 +1931,16 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v5, v6 ; GFX9-O0-NEXT: v_cmp_ne_u64_e64 s[6:7], v[4:5], s[6:7] -; GFX9-O0-NEXT: s_mov_b64 s[12:13], s[8:9] ; GFX9-O0-NEXT: v_mov_b32_e32 v4, s11 -; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v1, v4, s[12:13] -; GFX9-O0-NEXT: s_mov_b64 s[12:13], s[8:9] +; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v1, v4, s[8:9] ; GFX9-O0-NEXT: v_mov_b32_e32 v1, s10 -; GFX9-O0-NEXT: v_cndmask_b32_e64 v0, v0, v1, s[12:13] +; GFX9-O0-NEXT: v_cndmask_b32_e64 v0, v0, v1, s[8:9] ; GFX9-O0-NEXT: ; implicit-def: $sgpr12 ; GFX9-O0-NEXT: ; implicit-def: $sgpr12 ; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec ; GFX9-O0-NEXT: v_mov_b32_e32 v1, v4 -; GFX9-O0-NEXT: s_mov_b64 s[12:13], s[8:9] ; GFX9-O0-NEXT: v_mov_b32_e32 v4, s11 -; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v3, v4, s[12:13] +; GFX9-O0-NEXT: v_cndmask_b32_e64 v4, v3, v4, s[8:9] ; GFX9-O0-NEXT: v_mov_b32_e32 v3, s10 ; GFX9-O0-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[8:9] ; GFX9-O0-NEXT: ; implicit-def: $sgpr8 diff --git a/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll index 735720a..725d57d 100644 --- a/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll +++ b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll @@ -285,7 +285,7 @@ define amdgpu_ps void @flat_store_b32_idxprom(ptr align 4 inreg %p, i32 %idx) { ; GCN-LABEL: flat_store_b32_idxprom: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: v_mov_b32_e32 v1, 1.0 -; GCN-NEXT: flat_store_b32 v0, v1, s[0:1] scale_offset +; GCN-NEXT: flat_store_b32 v0, v1, s[0:1] scale_offset scope:SCOPE_SE ; GCN-NEXT: s_endpgm entry: %idxprom = sext i32 %idx to i64 @@ -298,7 +298,7 @@ define amdgpu_ps void @flat_store_b16_idxprom(ptr align 2 inreg %p, i32 %idx) { ; GCN-LABEL: flat_store_b16_idxprom: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: v_mov_b32_e32 v1, 1 -; GCN-NEXT: flat_store_b16 v0, v1, s[0:1] scale_offset +; GCN-NEXT: flat_store_b16 v0, v1, s[0:1] scale_offset scope:SCOPE_SE ; GCN-NEXT: s_endpgm entry: %idxprom = sext i32 %idx to i64 @@ -311,7 +311,7 @@ define amdgpu_ps void @flat_store_b64_idxprom(ptr align 4 inreg %p, i32 %idx) { ; GCN-LABEL: flat_store_b64_idxprom: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: v_mov_b64_e32 v[2:3], 1.0 -; GCN-NEXT: flat_store_b64 v0, v[2:3], s[0:1] scale_offset +; GCN-NEXT: flat_store_b64 v0, v[2:3], s[0:1] scale_offset scope:SCOPE_SE ; GCN-NEXT: s_endpgm entry: %idxprom = sext i32 %idx to i64 @@ -337,12 +337,15 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; SDAG-LABEL: flat_atomicrmw_b64_rtn_idxprom: ; SDAG: ; %bb.0: ; %entry ; SDAG-NEXT: v_ashrrev_i32_e32 v1, 31, v0 -; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; SDAG-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 3, s[0:1] -; SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; SDAG-NEXT: s_mov_b32 s0, exec_lo +; SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; SDAG-NEXT: v_xor_b32_e32 v0, s0, v3 +; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; SDAG-NEXT: s_cbranch_execnz .LBB21_3 ; SDAG-NEXT: ; %bb.1: ; %Flow @@ -360,13 +363,16 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; SDAG-NEXT: s_cbranch_execz .LBB21_2 ; SDAG-NEXT: .LBB21_4: ; %atomicrmw.private +; SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v2, vcc_lo ; SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; SDAG-NEXT: s_wait_loadcnt 0x0 ; SDAG-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] -; SDAG-NEXT: scratch_store_b64 v4, v[2:3], off +; SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE ; SDAG-NEXT: s_wait_xcnt 0x0 ; SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; SDAG-NEXT: s_branch .LBB21_5 @@ -374,19 +380,21 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; ; GISEL-LABEL: flat_atomicrmw_b64_rtn_idxprom: ; GISEL: ; %bb.0: ; %entry +; GISEL-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi ; GISEL-NEXT: v_mov_b32_e32 v2, v0 ; GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1] -; GISEL-NEXT: s_mov_b64 s[2:3], src_private_base -; GISEL-NEXT: s_mov_b32 s2, exec_lo ; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GISEL-NEXT: v_ashrrev_i32_e32 v3, 31, v2 ; GISEL-NEXT: v_lshlrev_b64_e32 v[0:1], 3, v[2:3] ; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v4, v0 ; GISEL-NEXT: v_add_co_ci_u32_e64 v5, null, v5, v1, vcc_lo +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GISEL-NEXT: v_xor_b32_e32 v0, s2, v5 +; GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GISEL-NEXT: v_cmpx_ne_u32_e64 s3, v5 +; GISEL-NEXT: s_and_saveexec_b32 s2, vcc_lo +; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GISEL-NEXT: s_xor_b32 s2, exec_lo, s2 ; GISEL-NEXT: s_cbranch_execnz .LBB21_3 ; GISEL-NEXT: ; %bb.1: ; %Flow @@ -398,19 +406,22 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; GISEL-NEXT: s_branch .LBB21_5 ; GISEL-NEXT: .LBB21_3: ; %atomicrmw.global ; GISEL-NEXT: v_mov_b64_e32 v[0:1], 1 -; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GISEL-NEXT: ; implicit-def: $vgpr4 ; GISEL-NEXT: flat_atomic_add_u64 v[0:1], v2, v[0:1], s[0:1] scale_offset th:TH_ATOMIC_RETURN scope:SCOPE_SYS ; GISEL-NEXT: s_wait_xcnt 0x0 ; GISEL-NEXT: s_and_not1_saveexec_b32 s0, s2 ; GISEL-NEXT: s_cbranch_execz .LBB21_2 ; GISEL-NEXT: .LBB21_4: ; %atomicrmw.private +; GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GISEL-NEXT: scratch_load_b64 v[0:1], v4, off ; GISEL-NEXT: s_wait_loadcnt 0x0 ; GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] -; GISEL-NEXT: scratch_store_b64 v4, v[2:3], off +; GISEL-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE ; GISEL-NEXT: s_wait_xcnt 0x0 ; GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GISEL-NEXT: s_branch .LBB21_5 diff --git a/llvm/test/CodeGen/AMDGPU/sdiv.ll b/llvm/test/CodeGen/AMDGPU/sdiv.ll index 676359fce..5c0f813 100644 --- a/llvm/test/CodeGen/AMDGPU/sdiv.ll +++ b/llvm/test/CodeGen/AMDGPU/sdiv.ll @@ -391,144 +391,156 @@ define amdgpu_kernel void @slow_sdiv_i32_3435(ptr addrspace(1) %out, ptr addrspa define amdgpu_kernel void @sdiv_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GCN-LABEL: sdiv_v2i32: ; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: s_mov_b32 s6, -1 -; GCN-NEXT: s_mov_b32 s10, s6 -; GCN-NEXT: s_mov_b32 s11, s7 +; GCN-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9 +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: s_mov_b32 s10, s2 +; GCN-NEXT: s_mov_b32 s11, s3 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_mov_b32 s8, s2 -; GCN-NEXT: s_mov_b32 s9, s3 +; GCN-NEXT: s_mov_b32 s8, s6 +; GCN-NEXT: s_mov_b32 s9, s7 ; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0 -; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: s_mov_b32 s5, s1 ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_sub_i32_e32 v6, vcc, 0, v2 -; GCN-NEXT: v_sub_i32_e32 v9, vcc, 0, v3 -; GCN-NEXT: v_xor_b32_e32 v4, v0, v2 -; GCN-NEXT: v_xor_b32_e32 v7, v1, v3 -; GCN-NEXT: v_max_i32_e32 v2, v2, v6 -; GCN-NEXT: v_max_i32_e32 v3, v3, v9 -; GCN-NEXT: v_cvt_f32_u32_e32 v6, v2 -; GCN-NEXT: v_cvt_f32_u32_e32 v9, v3 -; GCN-NEXT: v_sub_i32_e32 v5, vcc, 0, v0 -; GCN-NEXT: v_rcp_iflag_f32_e32 v6, v6 -; GCN-NEXT: v_max_i32_e32 v0, v0, v5 -; GCN-NEXT: v_rcp_iflag_f32_e32 v5, v9 -; GCN-NEXT: v_sub_i32_e32 v9, vcc, 0, v2 -; GCN-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6 -; GCN-NEXT: v_mul_f32_e32 v5, 0x4f7ffffe, v5 -; GCN-NEXT: v_cvt_u32_f32_e32 v6, v6 -; GCN-NEXT: v_cvt_u32_f32_e32 v5, v5 -; GCN-NEXT: v_sub_i32_e32 v10, vcc, 0, v3 -; GCN-NEXT: v_mul_lo_u32 v9, v9, v6 -; GCN-NEXT: v_mul_lo_u32 v10, v10, v5 -; GCN-NEXT: v_sub_i32_e32 v8, vcc, 0, v1 -; GCN-NEXT: v_mul_hi_u32 v9, v6, v9 -; GCN-NEXT: v_max_i32_e32 v1, v1, v8 -; GCN-NEXT: v_mul_hi_u32 v8, v5, v10 -; GCN-NEXT: v_ashrrev_i32_e32 v4, 31, v4 -; GCN-NEXT: v_add_i32_e32 v6, vcc, v6, v9 -; GCN-NEXT: v_add_i32_e32 v5, vcc, v5, v8 -; GCN-NEXT: v_mul_hi_u32 v6, v0, v6 -; GCN-NEXT: v_mul_hi_u32 v5, v1, v5 -; GCN-NEXT: v_ashrrev_i32_e32 v7, 31, v7 -; GCN-NEXT: v_mul_lo_u32 v8, v6, v2 -; GCN-NEXT: v_mul_lo_u32 v10, v5, v3 -; GCN-NEXT: v_add_i32_e32 v9, vcc, 1, v6 -; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v8 -; GCN-NEXT: v_sub_i32_e32 v1, vcc, v1, v10 -; GCN-NEXT: v_add_i32_e32 v11, vcc, 1, v5 -; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v2 -; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v1, v3 -; GCN-NEXT: v_sub_i32_e32 v8, vcc, v0, v2 -; GCN-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[0:1] -; GCN-NEXT: v_sub_i32_e32 v9, vcc, v1, v3 -; GCN-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[2:3] -; GCN-NEXT: v_cndmask_b32_e64 v0, v0, v8, s[0:1] -; GCN-NEXT: v_add_i32_e32 v8, vcc, 1, v6 -; GCN-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[2:3] -; GCN-NEXT: v_add_i32_e32 v9, vcc, 1, v5 -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 -; GCN-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3 -; GCN-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc -; GCN-NEXT: v_xor_b32_e32 v0, v0, v4 -; GCN-NEXT: v_xor_b32_e32 v1, v1, v7 -; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v4 -; GCN-NEXT: v_sub_i32_e32 v1, vcc, v1, v7 -; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-NEXT: v_readfirstlane_b32 s0, v2 +; GCN-NEXT: s_abs_i32 s1, s0 +; GCN-NEXT: v_cvt_f32_u32_e32 v2, s1 +; GCN-NEXT: s_sub_i32 s6, 0, s1 +; GCN-NEXT: v_readfirstlane_b32 s8, v3 +; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2 +; GCN-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2 +; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2 +; GCN-NEXT: v_mul_lo_u32 v4, s6, v2 +; GCN-NEXT: v_readfirstlane_b32 s6, v0 +; GCN-NEXT: s_abs_i32 s7, s6 +; GCN-NEXT: s_xor_b32 s0, s6, s0 +; GCN-NEXT: v_mul_hi_u32 v4, v2, v4 +; GCN-NEXT: s_ashr_i32 s6, s0, 31 +; GCN-NEXT: v_add_i32_e32 v0, vcc, v2, v4 +; GCN-NEXT: v_mul_hi_u32 v0, s7, v0 +; GCN-NEXT: v_readfirstlane_b32 s0, v0 +; GCN-NEXT: s_mul_i32 s0, s0, s1 +; GCN-NEXT: s_sub_i32 s0, s7, s0 +; GCN-NEXT: s_sub_i32 s7, s0, s1 +; GCN-NEXT: v_add_i32_e32 v2, vcc, 1, v0 +; GCN-NEXT: s_cmp_ge_u32 s0, s1 +; GCN-NEXT: s_cselect_b64 vcc, -1, 0 +; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GCN-NEXT: s_cselect_b32 s0, s7, s0 +; GCN-NEXT: v_add_i32_e32 v2, vcc, 1, v0 +; GCN-NEXT: s_cmp_ge_u32 s0, s1 +; GCN-NEXT: s_cselect_b64 vcc, -1, 0 +; GCN-NEXT: s_abs_i32 s7, s8 +; GCN-NEXT: v_cvt_f32_u32_e32 v3, s7 +; GCN-NEXT: s_mov_b32 s0, s4 +; GCN-NEXT: s_sub_i32 s4, 0, s7 +; GCN-NEXT: s_mov_b32 s1, s5 +; GCN-NEXT: v_rcp_iflag_f32_e32 v3, v3 +; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GCN-NEXT: v_xor_b32_e32 v0, s6, v0 +; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s6, v0 +; GCN-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3 +; GCN-NEXT: v_cvt_u32_f32_e32 v3, v3 +; GCN-NEXT: v_mul_lo_u32 v4, s4, v3 +; GCN-NEXT: v_readfirstlane_b32 s4, v1 +; GCN-NEXT: s_xor_b32 s5, s4, s8 +; GCN-NEXT: s_abs_i32 s4, s4 +; GCN-NEXT: v_mul_hi_u32 v1, v3, v4 +; GCN-NEXT: s_ashr_i32 s5, s5, 31 +; GCN-NEXT: v_add_i32_e32 v1, vcc, v3, v1 +; GCN-NEXT: v_mul_hi_u32 v1, s4, v1 +; GCN-NEXT: v_readfirstlane_b32 s6, v1 +; GCN-NEXT: s_mul_i32 s6, s6, s7 +; GCN-NEXT: s_sub_i32 s4, s4, s6 +; GCN-NEXT: s_sub_i32 s6, s4, s7 +; GCN-NEXT: v_add_i32_e32 v2, vcc, 1, v1 +; GCN-NEXT: s_cmp_ge_u32 s4, s7 +; GCN-NEXT: s_cselect_b64 vcc, -1, 0 +; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; GCN-NEXT: s_cselect_b32 s4, s6, s4 +; GCN-NEXT: v_add_i32_e32 v2, vcc, 1, v1 +; GCN-NEXT: s_cmp_ge_u32 s4, s7 +; GCN-NEXT: s_cselect_b64 vcc, -1, 0 +; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; GCN-NEXT: v_xor_b32_e32 v1, s5, v1 +; GCN-NEXT: v_subrev_i32_e32 v1, vcc, s5, v1 +; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GCN-NEXT: s_endpgm ; ; TONGA-LABEL: sdiv_v2i32: ; TONGA: ; %bb.0: -; TONGA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; TONGA-NEXT: s_mov_b32 s7, 0xf000 -; TONGA-NEXT: s_mov_b32 s6, -1 -; TONGA-NEXT: s_mov_b32 s10, s6 -; TONGA-NEXT: s_mov_b32 s11, s7 +; TONGA-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24 +; TONGA-NEXT: s_mov_b32 s3, 0xf000 +; TONGA-NEXT: s_mov_b32 s2, -1 +; TONGA-NEXT: s_mov_b32 s10, s2 +; TONGA-NEXT: s_mov_b32 s11, s3 ; TONGA-NEXT: s_waitcnt lgkmcnt(0) -; TONGA-NEXT: s_mov_b32 s8, s2 -; TONGA-NEXT: s_mov_b32 s9, s3 +; TONGA-NEXT: s_mov_b32 s8, s6 +; TONGA-NEXT: s_mov_b32 s9, s7 ; TONGA-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0 -; TONGA-NEXT: s_mov_b32 s4, s0 -; TONGA-NEXT: s_mov_b32 s5, s1 ; TONGA-NEXT: s_waitcnt vmcnt(0) -; TONGA-NEXT: v_sub_u32_e32 v6, vcc, 0, v2 -; TONGA-NEXT: v_sub_u32_e32 v9, vcc, 0, v3 -; TONGA-NEXT: v_xor_b32_e32 v4, v0, v2 -; TONGA-NEXT: v_xor_b32_e32 v7, v1, v3 -; TONGA-NEXT: v_max_i32_e32 v2, v2, v6 -; TONGA-NEXT: v_max_i32_e32 v3, v3, v9 -; TONGA-NEXT: v_cvt_f32_u32_e32 v6, v2 -; TONGA-NEXT: v_cvt_f32_u32_e32 v9, v3 -; TONGA-NEXT: v_sub_u32_e32 v5, vcc, 0, v0 -; TONGA-NEXT: v_rcp_iflag_f32_e32 v6, v6 -; TONGA-NEXT: v_max_i32_e32 v0, v0, v5 -; TONGA-NEXT: v_rcp_iflag_f32_e32 v5, v9 -; TONGA-NEXT: v_sub_u32_e32 v9, vcc, 0, v2 -; TONGA-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6 -; TONGA-NEXT: v_mul_f32_e32 v5, 0x4f7ffffe, v5 -; TONGA-NEXT: v_cvt_u32_f32_e32 v6, v6 -; TONGA-NEXT: v_cvt_u32_f32_e32 v5, v5 -; TONGA-NEXT: v_sub_u32_e32 v10, vcc, 0, v3 -; TONGA-NEXT: v_mul_lo_u32 v9, v9, v6 -; TONGA-NEXT: v_mul_lo_u32 v10, v10, v5 -; TONGA-NEXT: v_sub_u32_e32 v8, vcc, 0, v1 -; TONGA-NEXT: v_mul_hi_u32 v9, v6, v9 -; TONGA-NEXT: v_max_i32_e32 v1, v1, v8 -; TONGA-NEXT: v_mul_hi_u32 v8, v5, v10 -; TONGA-NEXT: v_ashrrev_i32_e32 v4, 31, v4 -; TONGA-NEXT: v_add_u32_e32 v6, vcc, v6, v9 -; TONGA-NEXT: v_add_u32_e32 v5, vcc, v5, v8 -; TONGA-NEXT: v_mul_hi_u32 v6, v0, v6 -; TONGA-NEXT: v_mul_hi_u32 v5, v1, v5 -; TONGA-NEXT: v_ashrrev_i32_e32 v7, 31, v7 -; TONGA-NEXT: v_mul_lo_u32 v8, v6, v2 -; TONGA-NEXT: v_mul_lo_u32 v10, v5, v3 -; TONGA-NEXT: v_add_u32_e32 v9, vcc, 1, v6 -; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v8 -; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v1, v10 -; TONGA-NEXT: v_add_u32_e32 v11, vcc, 1, v5 -; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v2 -; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v1, v3 -; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v0, v2 -; TONGA-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[0:1] -; TONGA-NEXT: v_sub_u32_e32 v9, vcc, v1, v3 -; TONGA-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[2:3] -; TONGA-NEXT: v_cndmask_b32_e64 v0, v0, v8, s[0:1] -; TONGA-NEXT: v_add_u32_e32 v8, vcc, 1, v6 -; TONGA-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[2:3] -; TONGA-NEXT: v_add_u32_e32 v9, vcc, 1, v5 -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 -; TONGA-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3 -; TONGA-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc -; TONGA-NEXT: v_xor_b32_e32 v0, v0, v4 -; TONGA-NEXT: v_xor_b32_e32 v1, v1, v7 -; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v4 -; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v1, v7 -; TONGA-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; TONGA-NEXT: v_readfirstlane_b32 s0, v2 +; TONGA-NEXT: s_abs_i32 s1, s0 +; TONGA-NEXT: v_cvt_f32_u32_e32 v2, s1 +; TONGA-NEXT: s_sub_i32 s6, 0, s1 +; TONGA-NEXT: v_readfirstlane_b32 s8, v3 +; TONGA-NEXT: v_rcp_iflag_f32_e32 v2, v2 +; TONGA-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2 +; TONGA-NEXT: v_cvt_u32_f32_e32 v2, v2 +; TONGA-NEXT: v_mul_lo_u32 v4, s6, v2 +; TONGA-NEXT: v_readfirstlane_b32 s6, v0 +; TONGA-NEXT: s_abs_i32 s7, s6 +; TONGA-NEXT: s_xor_b32 s0, s6, s0 +; TONGA-NEXT: v_mul_hi_u32 v4, v2, v4 +; TONGA-NEXT: s_ashr_i32 s6, s0, 31 +; TONGA-NEXT: v_add_u32_e32 v0, vcc, v2, v4 +; TONGA-NEXT: v_mul_hi_u32 v0, s7, v0 +; TONGA-NEXT: v_readfirstlane_b32 s0, v0 +; TONGA-NEXT: s_mul_i32 s0, s0, s1 +; TONGA-NEXT: s_sub_i32 s0, s7, s0 +; TONGA-NEXT: s_sub_i32 s7, s0, s1 +; TONGA-NEXT: v_add_u32_e32 v2, vcc, 1, v0 +; TONGA-NEXT: s_cmp_ge_u32 s0, s1 +; TONGA-NEXT: s_cselect_b64 vcc, -1, 0 +; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; TONGA-NEXT: s_cselect_b32 s0, s7, s0 +; TONGA-NEXT: v_add_u32_e32 v2, vcc, 1, v0 +; TONGA-NEXT: s_cmp_ge_u32 s0, s1 +; TONGA-NEXT: s_cselect_b64 vcc, -1, 0 +; TONGA-NEXT: s_abs_i32 s7, s8 +; TONGA-NEXT: v_cvt_f32_u32_e32 v3, s7 +; TONGA-NEXT: s_mov_b32 s0, s4 +; TONGA-NEXT: s_sub_i32 s4, 0, s7 +; TONGA-NEXT: s_mov_b32 s1, s5 +; TONGA-NEXT: v_rcp_iflag_f32_e32 v3, v3 +; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; TONGA-NEXT: v_xor_b32_e32 v0, s6, v0 +; TONGA-NEXT: v_subrev_u32_e32 v0, vcc, s6, v0 +; TONGA-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3 +; TONGA-NEXT: v_cvt_u32_f32_e32 v3, v3 +; TONGA-NEXT: v_mul_lo_u32 v4, s4, v3 +; TONGA-NEXT: v_readfirstlane_b32 s4, v1 +; TONGA-NEXT: s_xor_b32 s5, s4, s8 +; TONGA-NEXT: s_abs_i32 s4, s4 +; TONGA-NEXT: v_mul_hi_u32 v1, v3, v4 +; TONGA-NEXT: s_ashr_i32 s5, s5, 31 +; TONGA-NEXT: v_add_u32_e32 v1, vcc, v3, v1 +; TONGA-NEXT: v_mul_hi_u32 v1, s4, v1 +; TONGA-NEXT: v_readfirstlane_b32 s6, v1 +; TONGA-NEXT: s_mul_i32 s6, s6, s7 +; TONGA-NEXT: s_sub_i32 s4, s4, s6 +; TONGA-NEXT: s_sub_i32 s6, s4, s7 +; TONGA-NEXT: v_add_u32_e32 v2, vcc, 1, v1 +; TONGA-NEXT: s_cmp_ge_u32 s4, s7 +; TONGA-NEXT: s_cselect_b64 vcc, -1, 0 +; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; TONGA-NEXT: s_cselect_b32 s4, s6, s4 +; TONGA-NEXT: v_add_u32_e32 v2, vcc, 1, v1 +; TONGA-NEXT: s_cmp_ge_u32 s4, s7 +; TONGA-NEXT: s_cselect_b64 vcc, -1, 0 +; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; TONGA-NEXT: v_xor_b32_e32 v1, s5, v1 +; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, s5, v1 +; TONGA-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; TONGA-NEXT: s_endpgm ; ; GFX9-LABEL: sdiv_v2i32: @@ -546,44 +558,44 @@ define amdgpu_kernel void @sdiv_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: v_readfirstlane_b32 s0, v2 ; GFX9-NEXT: s_abs_i32 s1, s0 ; GFX9-NEXT: v_cvt_f32_u32_e32 v2, s1 -; GFX9-NEXT: v_readfirstlane_b32 s5, v0 -; GFX9-NEXT: s_xor_b32 s0, s5, s0 +; GFX9-NEXT: v_readfirstlane_b32 s4, v0 +; GFX9-NEXT: s_xor_b32 s0, s4, s0 ; GFX9-NEXT: s_ashr_i32 s6, s0, 31 ; GFX9-NEXT: v_rcp_iflag_f32_e32 v2, v2 ; GFX9-NEXT: s_sub_i32 s0, 0, s1 -; GFX9-NEXT: s_abs_i32 s5, s5 -; GFX9-NEXT: v_readfirstlane_b32 s4, v3 +; GFX9-NEXT: s_abs_i32 s4, s4 +; GFX9-NEXT: v_readfirstlane_b32 s5, v3 ; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v2 ; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0 ; GFX9-NEXT: v_readfirstlane_b32 s7, v0 ; GFX9-NEXT: s_mul_i32 s0, s0, s7 ; GFX9-NEXT: s_mul_hi_u32 s0, s7, s0 ; GFX9-NEXT: s_add_i32 s7, s7, s0 -; GFX9-NEXT: s_mul_hi_u32 s0, s5, s7 +; GFX9-NEXT: s_mul_hi_u32 s0, s4, s7 ; GFX9-NEXT: s_mul_i32 s7, s0, s1 -; GFX9-NEXT: s_sub_i32 s5, s5, s7 +; GFX9-NEXT: s_sub_i32 s4, s4, s7 ; GFX9-NEXT: s_add_i32 s10, s0, 1 -; GFX9-NEXT: s_sub_i32 s7, s5, s1 -; GFX9-NEXT: s_cmp_ge_u32 s5, s1 +; GFX9-NEXT: s_sub_i32 s7, s4, s1 +; GFX9-NEXT: s_cmp_ge_u32 s4, s1 ; GFX9-NEXT: s_cselect_b32 s0, s10, s0 -; GFX9-NEXT: s_cselect_b32 s5, s7, s5 +; GFX9-NEXT: s_cselect_b32 s4, s7, s4 ; GFX9-NEXT: s_add_i32 s7, s0, 1 -; GFX9-NEXT: s_cmp_ge_u32 s5, s1 -; GFX9-NEXT: s_cselect_b32 s5, s7, s0 -; GFX9-NEXT: s_abs_i32 s7, s4 +; GFX9-NEXT: s_cmp_ge_u32 s4, s1 +; GFX9-NEXT: s_cselect_b32 s4, s7, s0 +; GFX9-NEXT: s_abs_i32 s7, s5 ; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s7 -; GFX9-NEXT: s_xor_b32 s5, s5, s6 +; GFX9-NEXT: s_xor_b32 s4, s4, s6 ; GFX9-NEXT: s_mov_b32 s1, s9 ; GFX9-NEXT: s_sub_i32 s9, 0, s7 ; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0 -; GFX9-NEXT: s_sub_i32 s5, s5, s6 +; GFX9-NEXT: s_sub_i32 s4, s4, s6 ; GFX9-NEXT: s_mov_b32 s0, s8 ; GFX9-NEXT: v_readfirstlane_b32 s8, v1 ; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX9-NEXT: s_xor_b32 s4, s8, s4 +; GFX9-NEXT: s_xor_b32 s5, s8, s5 ; GFX9-NEXT: s_abs_i32 s8, s8 -; GFX9-NEXT: s_ashr_i32 s4, s4, 31 +; GFX9-NEXT: s_ashr_i32 s5, s5, 31 ; GFX9-NEXT: v_readfirstlane_b32 s6, v0 ; GFX9-NEXT: s_mul_i32 s9, s9, s6 ; GFX9-NEXT: s_mul_hi_u32 s9, s6, s9 @@ -599,10 +611,10 @@ define amdgpu_kernel void @sdiv_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GFX9-NEXT: s_add_i32 s9, s6, 1 ; GFX9-NEXT: s_cmp_ge_u32 s8, s7 ; GFX9-NEXT: s_cselect_b32 s6, s9, s6 -; GFX9-NEXT: s_xor_b32 s6, s6, s4 -; GFX9-NEXT: s_sub_i32 s4, s6, s4 -; GFX9-NEXT: v_mov_b32_e32 v0, s5 -; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: s_xor_b32 s6, s6, s5 +; GFX9-NEXT: s_sub_i32 s5, s6, s5 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: v_mov_b32_e32 v1, s5 ; GFX9-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX9-NEXT: s_endpgm ; @@ -792,255 +804,255 @@ define amdgpu_kernel void @sdiv_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %i ; GCN-LABEL: sdiv_v4i32: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: s_mov_b32 s6, -1 -; GCN-NEXT: s_mov_b32 s10, s6 -; GCN-NEXT: s_mov_b32 s11, s7 +; GCN-NEXT: s_mov_b32 s11, 0xf000 +; GCN-NEXT: s_mov_b32 s10, -1 +; GCN-NEXT: s_mov_b32 s6, s10 +; GCN-NEXT: s_mov_b32 s7, s11 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_mov_b32 s8, s2 -; GCN-NEXT: s_mov_b32 s9, s3 -; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0 -; GCN-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16 -; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: s_mov_b32 s4, s2 +; GCN-NEXT: s_mov_b32 s5, s3 +; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0 offset:16 +; GCN-NEXT: buffer_load_dwordx4 v[4:7], off, s[4:7], 0 +; GCN-NEXT: s_mov_b32 s8, s0 +; GCN-NEXT: s_mov_b32 s9, s1 ; GCN-NEXT: s_waitcnt vmcnt(1) -; GCN-NEXT: v_sub_i32_e32 v9, vcc, 0, v0 -; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_sub_i32_e32 v10, vcc, 0, v4 -; GCN-NEXT: v_xor_b32_e32 v8, v0, v4 -; GCN-NEXT: v_max_i32_e32 v4, v4, v10 -; GCN-NEXT: v_cvt_f32_u32_e32 v10, v4 -; GCN-NEXT: v_sub_i32_e32 v13, vcc, 0, v5 -; GCN-NEXT: v_xor_b32_e32 v11, v1, v5 -; GCN-NEXT: v_rcp_iflag_f32_e32 v10, v10 -; GCN-NEXT: v_max_i32_e32 v5, v5, v13 -; GCN-NEXT: v_cvt_f32_u32_e32 v13, v5 -; GCN-NEXT: v_sub_i32_e32 v16, vcc, 0, v4 -; GCN-NEXT: v_mul_f32_e32 v10, 0x4f7ffffe, v10 -; GCN-NEXT: v_cvt_u32_f32_e32 v10, v10 -; GCN-NEXT: v_rcp_iflag_f32_e32 v13, v13 -; GCN-NEXT: v_sub_i32_e32 v12, vcc, 0, v1 -; GCN-NEXT: v_mul_lo_u32 v16, v16, v10 -; GCN-NEXT: v_mul_f32_e32 v13, 0x4f7ffffe, v13 -; GCN-NEXT: v_cvt_u32_f32_e32 v13, v13 -; GCN-NEXT: v_max_i32_e32 v0, v0, v9 -; GCN-NEXT: v_mul_hi_u32 v16, v10, v16 -; GCN-NEXT: v_max_i32_e32 v1, v1, v12 -; GCN-NEXT: v_sub_i32_e32 v15, vcc, 0, v6 -; GCN-NEXT: v_add_i32_e32 v10, vcc, v10, v16 -; GCN-NEXT: v_sub_i32_e32 v16, vcc, 0, v5 -; GCN-NEXT: v_mul_lo_u32 v16, v16, v13 -; GCN-NEXT: v_mul_hi_u32 v10, v0, v10 -; GCN-NEXT: v_xor_b32_e32 v14, v2, v6 -; GCN-NEXT: v_max_i32_e32 v6, v6, v15 -; GCN-NEXT: v_mul_hi_u32 v12, v13, v16 -; GCN-NEXT: v_cvt_f32_u32_e32 v15, v6 -; GCN-NEXT: v_ashrrev_i32_e32 v8, 31, v8 -; GCN-NEXT: v_ashrrev_i32_e32 v11, 31, v11 -; GCN-NEXT: v_add_i32_e32 v12, vcc, v13, v12 -; GCN-NEXT: v_mul_lo_u32 v13, v10, v4 -; GCN-NEXT: v_mul_hi_u32 v12, v1, v12 -; GCN-NEXT: v_rcp_iflag_f32_e32 v9, v15 -; GCN-NEXT: v_ashrrev_i32_e32 v14, 31, v14 -; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v13 -; GCN-NEXT: v_add_i32_e32 v13, vcc, 1, v10 -; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v4 -; GCN-NEXT: v_cndmask_b32_e64 v10, v10, v13, s[0:1] -; GCN-NEXT: v_sub_i32_e32 v13, vcc, v0, v4 -; GCN-NEXT: v_cndmask_b32_e64 v0, v0, v13, s[0:1] -; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v4 -; GCN-NEXT: v_mul_lo_u32 v0, v12, v5 -; GCN-NEXT: v_mul_f32_e32 v9, 0x4f7ffffe, v9 -; GCN-NEXT: v_cvt_u32_f32_e32 v9, v9 -; GCN-NEXT: v_sub_i32_e32 v4, vcc, 0, v6 -; GCN-NEXT: v_sub_i32_e32 v0, vcc, v1, v0 -; GCN-NEXT: v_add_i32_e32 v1, vcc, 1, v12 -; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v0, v5 -; GCN-NEXT: v_cndmask_b32_e64 v1, v12, v1, s[2:3] -; GCN-NEXT: v_sub_i32_e32 v12, vcc, v0, v5 -; GCN-NEXT: v_mul_lo_u32 v4, v4, v9 -; GCN-NEXT: v_cndmask_b32_e64 v0, v0, v12, s[2:3] -; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v0, v5 -; GCN-NEXT: v_sub_i32_e32 v0, vcc, 0, v7 -; GCN-NEXT: v_max_i32_e32 v5, v7, v0 -; GCN-NEXT: v_cvt_f32_u32_e32 v0, v5 -; GCN-NEXT: v_mul_hi_u32 v4, v9, v4 -; GCN-NEXT: v_add_i32_e32 v13, vcc, 1, v10 +; GCN-NEXT: v_readfirstlane_b32 s0, v0 +; GCN-NEXT: v_readfirstlane_b32 s1, v1 +; GCN-NEXT: v_readfirstlane_b32 s2, v2 +; GCN-NEXT: s_abs_i32 s13, s0 +; GCN-NEXT: s_abs_i32 s14, s1 +; GCN-NEXT: s_abs_i32 s15, s2 +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s13 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s14 +; GCN-NEXT: v_cvt_f32_u32_e32 v2, s15 +; GCN-NEXT: v_readfirstlane_b32 s6, v3 ; GCN-NEXT: v_rcp_iflag_f32_e32 v0, v0 -; GCN-NEXT: v_add_i32_e32 v4, vcc, v9, v4 -; GCN-NEXT: v_sub_i32_e32 v9, vcc, 0, v2 -; GCN-NEXT: v_max_i32_e32 v2, v2, v9 -; GCN-NEXT: v_mul_hi_u32 v4, v2, v4 +; GCN-NEXT: v_rcp_iflag_f32_e32 v1, v1 +; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2 +; GCN-NEXT: s_abs_i32 s17, s6 +; GCN-NEXT: v_cvt_f32_u32_e32 v3, s17 ; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 -; GCN-NEXT: v_cvt_u32_f32_e32 v9, v0 -; GCN-NEXT: v_cndmask_b32_e64 v0, v10, v13, s[0:1] -; GCN-NEXT: v_xor_b32_e32 v0, v0, v8 -; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v8 -; GCN-NEXT: v_mul_lo_u32 v8, v4, v6 -; GCN-NEXT: v_add_i32_e32 v12, vcc, 1, v1 -; GCN-NEXT: v_sub_i32_e32 v10, vcc, 0, v5 -; GCN-NEXT: v_sub_i32_e32 v2, vcc, v2, v8 -; GCN-NEXT: v_cndmask_b32_e64 v1, v1, v12, s[2:3] -; GCN-NEXT: v_mul_lo_u32 v10, v10, v9 -; GCN-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-NEXT: v_cmp_ge_u32_e64 s[0:1], v2, v6 -; GCN-NEXT: v_xor_b32_e32 v1, v1, v11 -; GCN-NEXT: v_cndmask_b32_e64 v4, v4, v8, s[0:1] -; GCN-NEXT: v_sub_i32_e32 v8, vcc, v2, v6 -; GCN-NEXT: v_sub_i32_e32 v1, vcc, v1, v11 -; GCN-NEXT: v_cndmask_b32_e64 v2, v2, v8, s[0:1] -; GCN-NEXT: v_add_i32_e32 v8, vcc, 1, v4 -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v2, v6 -; GCN-NEXT: v_cndmask_b32_e32 v2, v4, v8, vcc -; GCN-NEXT: v_mul_hi_u32 v4, v9, v10 -; GCN-NEXT: v_sub_i32_e32 v6, vcc, 0, v3 -; GCN-NEXT: v_max_i32_e32 v6, v3, v6 -; GCN-NEXT: v_add_i32_e32 v4, vcc, v9, v4 -; GCN-NEXT: v_mul_hi_u32 v4, v6, v4 -; GCN-NEXT: v_xor_b32_e32 v2, v2, v14 -; GCN-NEXT: v_sub_i32_e32 v2, vcc, v2, v14 -; GCN-NEXT: v_mul_lo_u32 v8, v4, v5 -; GCN-NEXT: v_xor_b32_e32 v3, v3, v7 -; GCN-NEXT: v_add_i32_e32 v7, vcc, 1, v4 -; GCN-NEXT: v_sub_i32_e32 v6, vcc, v6, v8 -; GCN-NEXT: v_sub_i32_e32 v8, vcc, v6, v5 -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v6, v5 -; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc -; GCN-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc -; GCN-NEXT: v_add_i32_e32 v7, vcc, 1, v4 -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v6, v5 -; GCN-NEXT: v_ashrrev_i32_e32 v3, 31, v3 -; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc -; GCN-NEXT: v_xor_b32_e32 v4, v4, v3 -; GCN-NEXT: v_sub_i32_e32 v3, vcc, v4, v3 -; GCN-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 +; GCN-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1 +; GCN-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2 +; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_readfirstlane_b32 s3, v4 +; GCN-NEXT: v_readfirstlane_b32 s4, v5 +; GCN-NEXT: v_readfirstlane_b32 s5, v6 +; GCN-NEXT: s_xor_b32 s12, s3, s0 +; GCN-NEXT: s_xor_b32 s0, s4, s1 +; GCN-NEXT: s_xor_b32 s1, s5, s2 +; GCN-NEXT: s_sub_i32 s2, 0, s13 +; GCN-NEXT: s_ashr_i32 s18, s0, 31 +; GCN-NEXT: s_sub_i32 s0, 0, s14 +; GCN-NEXT: s_ashr_i32 s19, s1, 31 +; GCN-NEXT: s_sub_i32 s1, 0, s15 +; GCN-NEXT: v_rcp_iflag_f32_e32 v3, v3 +; GCN-NEXT: v_mul_lo_u32 v4, s2, v0 +; GCN-NEXT: v_mul_lo_u32 v5, s0, v1 +; GCN-NEXT: v_mul_lo_u32 v6, s1, v2 +; GCN-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3 +; GCN-NEXT: v_cvt_u32_f32_e32 v3, v3 +; GCN-NEXT: v_mul_hi_u32 v4, v0, v4 +; GCN-NEXT: v_mul_hi_u32 v5, v1, v5 +; GCN-NEXT: v_mul_hi_u32 v6, v2, v6 +; GCN-NEXT: s_sub_i32 s20, 0, s17 +; GCN-NEXT: v_readfirstlane_b32 s7, v7 +; GCN-NEXT: s_abs_i32 s3, s3 +; GCN-NEXT: s_abs_i32 s4, s4 +; GCN-NEXT: s_abs_i32 s5, s5 +; GCN-NEXT: v_mul_lo_u32 v7, s20, v3 +; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v4 +; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v5 +; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v6 +; GCN-NEXT: v_mul_hi_u32 v0, s3, v0 +; GCN-NEXT: v_mul_hi_u32 v1, s4, v1 +; GCN-NEXT: v_mul_hi_u32 v2, s5, v2 +; GCN-NEXT: v_mul_hi_u32 v7, v3, v7 +; GCN-NEXT: v_mul_lo_u32 v4, v0, s13 +; GCN-NEXT: v_mul_lo_u32 v6, v1, s14 +; GCN-NEXT: v_mul_lo_u32 v8, v2, s15 +; GCN-NEXT: s_abs_i32 s16, s7 +; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v7 +; GCN-NEXT: v_mul_hi_u32 v3, s16, v3 +; GCN-NEXT: v_sub_i32_e32 v4, vcc, s3, v4 +; GCN-NEXT: v_sub_i32_e32 v6, vcc, s4, v6 +; GCN-NEXT: v_sub_i32_e32 v8, vcc, s5, v8 +; GCN-NEXT: v_add_i32_e32 v5, vcc, 1, v0 +; GCN-NEXT: v_add_i32_e32 v7, vcc, 1, v1 +; GCN-NEXT: v_add_i32_e32 v9, vcc, 1, v2 +; GCN-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v4 +; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s14, v6 +; GCN-NEXT: v_cmp_le_u32_e64 s[4:5], s15, v8 +; GCN-NEXT: v_subrev_i32_e32 v10, vcc, s13, v4 +; GCN-NEXT: v_cndmask_b32_e64 v0, v0, v5, s[0:1] +; GCN-NEXT: v_subrev_i32_e32 v5, vcc, s14, v6 +; GCN-NEXT: v_cndmask_b32_e64 v1, v1, v7, s[2:3] +; GCN-NEXT: v_subrev_i32_e32 v7, vcc, s15, v8 +; GCN-NEXT: v_cndmask_b32_e64 v2, v2, v9, s[4:5] +; GCN-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[0:1] +; GCN-NEXT: v_add_i32_e32 v9, vcc, 1, v0 +; GCN-NEXT: v_cndmask_b32_e64 v5, v6, v5, s[2:3] +; GCN-NEXT: v_add_i32_e32 v6, vcc, 1, v1 +; GCN-NEXT: v_cndmask_b32_e64 v7, v8, v7, s[4:5] +; GCN-NEXT: v_add_i32_e32 v8, vcc, 1, v2 +; GCN-NEXT: v_cmp_le_u32_e32 vcc, s13, v4 +; GCN-NEXT: v_mul_lo_u32 v4, v3, s17 +; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc +; GCN-NEXT: v_cmp_le_u32_e32 vcc, s14, v5 +; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc +; GCN-NEXT: v_cmp_le_u32_e32 vcc, s15, v7 +; GCN-NEXT: s_ashr_i32 s12, s12, 31 +; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc +; GCN-NEXT: v_xor_b32_e32 v0, s12, v0 +; GCN-NEXT: v_xor_b32_e32 v1, s18, v1 +; GCN-NEXT: v_xor_b32_e32 v2, s19, v2 +; GCN-NEXT: v_sub_i32_e32 v4, vcc, s16, v4 +; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s12, v0 +; GCN-NEXT: v_subrev_i32_e32 v1, vcc, s18, v1 +; GCN-NEXT: v_subrev_i32_e32 v2, vcc, s19, v2 +; GCN-NEXT: v_add_i32_e32 v5, vcc, 1, v3 +; GCN-NEXT: v_subrev_i32_e32 v6, vcc, s17, v4 +; GCN-NEXT: v_cmp_le_u32_e32 vcc, s17, v4 +; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc +; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc +; GCN-NEXT: v_add_i32_e32 v5, vcc, 1, v3 +; GCN-NEXT: s_xor_b32 s0, s7, s6 +; GCN-NEXT: v_cmp_le_u32_e32 vcc, s17, v4 +; GCN-NEXT: s_ashr_i32 s0, s0, 31 +; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc +; GCN-NEXT: v_xor_b32_e32 v3, s0, v3 +; GCN-NEXT: v_subrev_i32_e32 v3, vcc, s0, v3 +; GCN-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 ; GCN-NEXT: s_endpgm ; ; TONGA-LABEL: sdiv_v4i32: ; TONGA: ; %bb.0: ; TONGA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; TONGA-NEXT: s_mov_b32 s7, 0xf000 -; TONGA-NEXT: s_mov_b32 s6, -1 -; TONGA-NEXT: s_mov_b32 s10, s6 -; TONGA-NEXT: s_mov_b32 s11, s7 +; TONGA-NEXT: s_mov_b32 s11, 0xf000 +; TONGA-NEXT: s_mov_b32 s10, -1 +; TONGA-NEXT: s_mov_b32 s6, s10 +; TONGA-NEXT: s_mov_b32 s7, s11 ; TONGA-NEXT: s_waitcnt lgkmcnt(0) -; TONGA-NEXT: s_mov_b32 s8, s2 -; TONGA-NEXT: s_mov_b32 s9, s3 -; TONGA-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0 -; TONGA-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16 -; TONGA-NEXT: s_mov_b32 s4, s0 -; TONGA-NEXT: s_mov_b32 s5, s1 +; TONGA-NEXT: s_mov_b32 s4, s2 +; TONGA-NEXT: s_mov_b32 s5, s3 +; TONGA-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0 offset:16 +; TONGA-NEXT: buffer_load_dwordx4 v[4:7], off, s[4:7], 0 +; TONGA-NEXT: s_mov_b32 s8, s0 +; TONGA-NEXT: s_mov_b32 s9, s1 ; TONGA-NEXT: s_waitcnt vmcnt(1) -; TONGA-NEXT: v_sub_u32_e32 v9, vcc, 0, v0 -; TONGA-NEXT: s_waitcnt vmcnt(0) -; TONGA-NEXT: v_sub_u32_e32 v10, vcc, 0, v4 -; TONGA-NEXT: v_xor_b32_e32 v8, v0, v4 -; TONGA-NEXT: v_max_i32_e32 v4, v4, v10 -; TONGA-NEXT: v_cvt_f32_u32_e32 v10, v4 -; TONGA-NEXT: v_sub_u32_e32 v13, vcc, 0, v5 -; TONGA-NEXT: v_xor_b32_e32 v11, v1, v5 -; TONGA-NEXT: v_rcp_iflag_f32_e32 v10, v10 -; TONGA-NEXT: v_max_i32_e32 v5, v5, v13 -; TONGA-NEXT: v_cvt_f32_u32_e32 v13, v5 -; TONGA-NEXT: v_sub_u32_e32 v16, vcc, 0, v4 -; TONGA-NEXT: v_mul_f32_e32 v10, 0x4f7ffffe, v10 -; TONGA-NEXT: v_cvt_u32_f32_e32 v10, v10 -; TONGA-NEXT: v_rcp_iflag_f32_e32 v13, v13 -; TONGA-NEXT: v_sub_u32_e32 v12, vcc, 0, v1 -; TONGA-NEXT: v_mul_lo_u32 v16, v16, v10 -; TONGA-NEXT: v_mul_f32_e32 v13, 0x4f7ffffe, v13 -; TONGA-NEXT: v_cvt_u32_f32_e32 v13, v13 -; TONGA-NEXT: v_max_i32_e32 v0, v0, v9 -; TONGA-NEXT: v_mul_hi_u32 v16, v10, v16 -; TONGA-NEXT: v_max_i32_e32 v1, v1, v12 -; TONGA-NEXT: v_sub_u32_e32 v15, vcc, 0, v6 -; TONGA-NEXT: v_add_u32_e32 v10, vcc, v10, v16 -; TONGA-NEXT: v_sub_u32_e32 v16, vcc, 0, v5 -; TONGA-NEXT: v_mul_lo_u32 v16, v16, v13 -; TONGA-NEXT: v_mul_hi_u32 v10, v0, v10 -; TONGA-NEXT: v_xor_b32_e32 v14, v2, v6 -; TONGA-NEXT: v_max_i32_e32 v6, v6, v15 -; TONGA-NEXT: v_mul_hi_u32 v12, v13, v16 -; TONGA-NEXT: v_cvt_f32_u32_e32 v15, v6 -; TONGA-NEXT: v_ashrrev_i32_e32 v8, 31, v8 -; TONGA-NEXT: v_ashrrev_i32_e32 v11, 31, v11 -; TONGA-NEXT: v_add_u32_e32 v12, vcc, v13, v12 -; TONGA-NEXT: v_mul_lo_u32 v13, v10, v4 -; TONGA-NEXT: v_mul_hi_u32 v12, v1, v12 -; TONGA-NEXT: v_rcp_iflag_f32_e32 v9, v15 -; TONGA-NEXT: v_ashrrev_i32_e32 v14, 31, v14 -; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v13 -; TONGA-NEXT: v_add_u32_e32 v13, vcc, 1, v10 -; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v4 -; TONGA-NEXT: v_cndmask_b32_e64 v10, v10, v13, s[0:1] -; TONGA-NEXT: v_sub_u32_e32 v13, vcc, v0, v4 -; TONGA-NEXT: v_cndmask_b32_e64 v0, v0, v13, s[0:1] -; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v0, v4 -; TONGA-NEXT: v_mul_lo_u32 v0, v12, v5 -; TONGA-NEXT: v_mul_f32_e32 v9, 0x4f7ffffe, v9 -; TONGA-NEXT: v_cvt_u32_f32_e32 v9, v9 -; TONGA-NEXT: v_sub_u32_e32 v4, vcc, 0, v6 -; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v1, v0 -; TONGA-NEXT: v_add_u32_e32 v1, vcc, 1, v12 -; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v0, v5 -; TONGA-NEXT: v_cndmask_b32_e64 v1, v12, v1, s[2:3] -; TONGA-NEXT: v_sub_u32_e32 v12, vcc, v0, v5 -; TONGA-NEXT: v_mul_lo_u32 v4, v4, v9 -; TONGA-NEXT: v_cndmask_b32_e64 v0, v0, v12, s[2:3] -; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v0, v5 -; TONGA-NEXT: v_sub_u32_e32 v0, vcc, 0, v7 -; TONGA-NEXT: v_max_i32_e32 v5, v7, v0 -; TONGA-NEXT: v_cvt_f32_u32_e32 v0, v5 -; TONGA-NEXT: v_mul_hi_u32 v4, v9, v4 -; TONGA-NEXT: v_add_u32_e32 v13, vcc, 1, v10 +; TONGA-NEXT: v_readfirstlane_b32 s0, v0 +; TONGA-NEXT: v_readfirstlane_b32 s1, v1 +; TONGA-NEXT: v_readfirstlane_b32 s2, v2 +; TONGA-NEXT: s_abs_i32 s13, s0 +; TONGA-NEXT: s_abs_i32 s14, s1 +; TONGA-NEXT: s_abs_i32 s15, s2 +; TONGA-NEXT: v_cvt_f32_u32_e32 v0, s13 +; TONGA-NEXT: v_cvt_f32_u32_e32 v1, s14 +; TONGA-NEXT: v_cvt_f32_u32_e32 v2, s15 +; TONGA-NEXT: v_readfirstlane_b32 s6, v3 ; TONGA-NEXT: v_rcp_iflag_f32_e32 v0, v0 -; TONGA-NEXT: v_add_u32_e32 v4, vcc, v9, v4 -; TONGA-NEXT: v_sub_u32_e32 v9, vcc, 0, v2 -; TONGA-NEXT: v_max_i32_e32 v2, v2, v9 -; TONGA-NEXT: v_mul_hi_u32 v4, v2, v4 +; TONGA-NEXT: v_rcp_iflag_f32_e32 v1, v1 +; TONGA-NEXT: v_rcp_iflag_f32_e32 v2, v2 +; TONGA-NEXT: s_abs_i32 s17, s6 +; TONGA-NEXT: v_cvt_f32_u32_e32 v3, s17 ; TONGA-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 -; TONGA-NEXT: v_cvt_u32_f32_e32 v9, v0 -; TONGA-NEXT: v_cndmask_b32_e64 v0, v10, v13, s[0:1] -; TONGA-NEXT: v_xor_b32_e32 v0, v0, v8 -; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v8 -; TONGA-NEXT: v_mul_lo_u32 v8, v4, v6 -; TONGA-NEXT: v_add_u32_e32 v12, vcc, 1, v1 -; TONGA-NEXT: v_sub_u32_e32 v10, vcc, 0, v5 -; TONGA-NEXT: v_sub_u32_e32 v2, vcc, v2, v8 -; TONGA-NEXT: v_cndmask_b32_e64 v1, v1, v12, s[2:3] -; TONGA-NEXT: v_mul_lo_u32 v10, v10, v9 -; TONGA-NEXT: v_add_u32_e32 v8, vcc, 1, v4 -; TONGA-NEXT: v_cmp_ge_u32_e64 s[0:1], v2, v6 -; TONGA-NEXT: v_xor_b32_e32 v1, v1, v11 -; TONGA-NEXT: v_cndmask_b32_e64 v4, v4, v8, s[0:1] -; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v2, v6 -; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v1, v11 -; TONGA-NEXT: v_cndmask_b32_e64 v2, v2, v8, s[0:1] -; TONGA-NEXT: v_add_u32_e32 v8, vcc, 1, v4 -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v2, v6 -; TONGA-NEXT: v_cndmask_b32_e32 v2, v4, v8, vcc -; TONGA-NEXT: v_mul_hi_u32 v4, v9, v10 -; TONGA-NEXT: v_sub_u32_e32 v6, vcc, 0, v3 -; TONGA-NEXT: v_max_i32_e32 v6, v3, v6 -; TONGA-NEXT: v_add_u32_e32 v4, vcc, v9, v4 -; TONGA-NEXT: v_mul_hi_u32 v4, v6, v4 -; TONGA-NEXT: v_xor_b32_e32 v2, v2, v14 -; TONGA-NEXT: v_sub_u32_e32 v2, vcc, v2, v14 -; TONGA-NEXT: v_mul_lo_u32 v8, v4, v5 -; TONGA-NEXT: v_xor_b32_e32 v3, v3, v7 -; TONGA-NEXT: v_add_u32_e32 v7, vcc, 1, v4 -; TONGA-NEXT: v_sub_u32_e32 v6, vcc, v6, v8 -; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v6, v5 -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v6, v5 -; TONGA-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc -; TONGA-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc -; TONGA-NEXT: v_add_u32_e32 v7, vcc, 1, v4 -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v6, v5 -; TONGA-NEXT: v_ashrrev_i32_e32 v3, 31, v3 -; TONGA-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc -; TONGA-NEXT: v_xor_b32_e32 v4, v4, v3 -; TONGA-NEXT: v_sub_u32_e32 v3, vcc, v4, v3 -; TONGA-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 +; TONGA-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1 +; TONGA-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2 +; TONGA-NEXT: v_cvt_u32_f32_e32 v0, v0 +; TONGA-NEXT: v_cvt_u32_f32_e32 v1, v1 +; TONGA-NEXT: v_cvt_u32_f32_e32 v2, v2 +; TONGA-NEXT: s_waitcnt vmcnt(0) +; TONGA-NEXT: v_readfirstlane_b32 s3, v4 +; TONGA-NEXT: v_readfirstlane_b32 s4, v5 +; TONGA-NEXT: v_readfirstlane_b32 s5, v6 +; TONGA-NEXT: s_xor_b32 s12, s3, s0 +; TONGA-NEXT: s_xor_b32 s0, s4, s1 +; TONGA-NEXT: s_xor_b32 s1, s5, s2 +; TONGA-NEXT: s_sub_i32 s2, 0, s13 +; TONGA-NEXT: s_ashr_i32 s18, s0, 31 +; TONGA-NEXT: s_sub_i32 s0, 0, s14 +; TONGA-NEXT: s_ashr_i32 s19, s1, 31 +; TONGA-NEXT: s_sub_i32 s1, 0, s15 +; TONGA-NEXT: v_rcp_iflag_f32_e32 v3, v3 +; TONGA-NEXT: v_mul_lo_u32 v4, s2, v0 +; TONGA-NEXT: v_mul_lo_u32 v5, s0, v1 +; TONGA-NEXT: v_mul_lo_u32 v6, s1, v2 +; TONGA-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3 +; TONGA-NEXT: v_cvt_u32_f32_e32 v3, v3 +; TONGA-NEXT: v_mul_hi_u32 v4, v0, v4 +; TONGA-NEXT: v_mul_hi_u32 v5, v1, v5 +; TONGA-NEXT: v_mul_hi_u32 v6, v2, v6 +; TONGA-NEXT: s_sub_i32 s20, 0, s17 +; TONGA-NEXT: v_readfirstlane_b32 s7, v7 +; TONGA-NEXT: s_abs_i32 s3, s3 +; TONGA-NEXT: s_abs_i32 s4, s4 +; TONGA-NEXT: s_abs_i32 s5, s5 +; TONGA-NEXT: v_mul_lo_u32 v7, s20, v3 +; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v4 +; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v5 +; TONGA-NEXT: v_add_u32_e32 v2, vcc, v2, v6 +; TONGA-NEXT: v_mul_hi_u32 v0, s3, v0 +; TONGA-NEXT: v_mul_hi_u32 v1, s4, v1 +; TONGA-NEXT: v_mul_hi_u32 v2, s5, v2 +; TONGA-NEXT: v_mul_hi_u32 v7, v3, v7 +; TONGA-NEXT: v_mul_lo_u32 v4, v0, s13 +; TONGA-NEXT: v_mul_lo_u32 v6, v1, s14 +; TONGA-NEXT: v_mul_lo_u32 v8, v2, s15 +; TONGA-NEXT: s_abs_i32 s16, s7 +; TONGA-NEXT: v_add_u32_e32 v3, vcc, v3, v7 +; TONGA-NEXT: v_mul_hi_u32 v3, s16, v3 +; TONGA-NEXT: v_sub_u32_e32 v4, vcc, s3, v4 +; TONGA-NEXT: v_sub_u32_e32 v6, vcc, s4, v6 +; TONGA-NEXT: v_sub_u32_e32 v8, vcc, s5, v8 +; TONGA-NEXT: v_add_u32_e32 v5, vcc, 1, v0 +; TONGA-NEXT: v_add_u32_e32 v7, vcc, 1, v1 +; TONGA-NEXT: v_add_u32_e32 v9, vcc, 1, v2 +; TONGA-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v4 +; TONGA-NEXT: v_cmp_le_u32_e64 s[2:3], s14, v6 +; TONGA-NEXT: v_cmp_le_u32_e64 s[4:5], s15, v8 +; TONGA-NEXT: v_subrev_u32_e32 v10, vcc, s13, v4 +; TONGA-NEXT: v_cndmask_b32_e64 v0, v0, v5, s[0:1] +; TONGA-NEXT: v_subrev_u32_e32 v5, vcc, s14, v6 +; TONGA-NEXT: v_cndmask_b32_e64 v1, v1, v7, s[2:3] +; TONGA-NEXT: v_subrev_u32_e32 v7, vcc, s15, v8 +; TONGA-NEXT: v_cndmask_b32_e64 v2, v2, v9, s[4:5] +; TONGA-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[0:1] +; TONGA-NEXT: v_add_u32_e32 v9, vcc, 1, v0 +; TONGA-NEXT: v_cndmask_b32_e64 v5, v6, v5, s[2:3] +; TONGA-NEXT: v_add_u32_e32 v6, vcc, 1, v1 +; TONGA-NEXT: v_cndmask_b32_e64 v7, v8, v7, s[4:5] +; TONGA-NEXT: v_add_u32_e32 v8, vcc, 1, v2 +; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s13, v4 +; TONGA-NEXT: v_mul_lo_u32 v4, v3, s17 +; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc +; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s14, v5 +; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc +; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s15, v7 +; TONGA-NEXT: s_ashr_i32 s12, s12, 31 +; TONGA-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc +; TONGA-NEXT: v_xor_b32_e32 v0, s12, v0 +; TONGA-NEXT: v_xor_b32_e32 v1, s18, v1 +; TONGA-NEXT: v_xor_b32_e32 v2, s19, v2 +; TONGA-NEXT: v_sub_u32_e32 v4, vcc, s16, v4 +; TONGA-NEXT: v_subrev_u32_e32 v0, vcc, s12, v0 +; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, s18, v1 +; TONGA-NEXT: v_subrev_u32_e32 v2, vcc, s19, v2 +; TONGA-NEXT: v_add_u32_e32 v5, vcc, 1, v3 +; TONGA-NEXT: v_subrev_u32_e32 v6, vcc, s17, v4 +; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s17, v4 +; TONGA-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc +; TONGA-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc +; TONGA-NEXT: v_add_u32_e32 v5, vcc, 1, v3 +; TONGA-NEXT: s_xor_b32 s0, s7, s6 +; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s17, v4 +; TONGA-NEXT: s_ashr_i32 s0, s0, 31 +; TONGA-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc +; TONGA-NEXT: v_xor_b32_e32 v3, s0, v3 +; TONGA-NEXT: v_subrev_u32_e32 v3, vcc, s0, v3 +; TONGA-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 ; TONGA-NEXT: s_endpgm ; ; GFX9-LABEL: sdiv_v4i32: @@ -2002,7 +2014,7 @@ define amdgpu_kernel void @v_sdiv_i25(ptr addrspace(1) %out, ptr addrspace(1) %i ; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2 ; GCN-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc ; GCN-NEXT: v_xor_b32_e32 v1, v1, v0 -; GCN-NEXT: v_sub_i32_e32 v0, vcc, v1, v0 +; GCN-NEXT: v_subrev_i32_e32 v0, vcc, v0, v1 ; GCN-NEXT: v_bfe_i32 v0, v0, 0, 25 ; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GCN-NEXT: s_endpgm @@ -2049,7 +2061,7 @@ define amdgpu_kernel void @v_sdiv_i25(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2 ; TONGA-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc ; TONGA-NEXT: v_xor_b32_e32 v1, v1, v0 -; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v1, v0 +; TONGA-NEXT: v_subrev_u32_e32 v0, vcc, v0, v1 ; TONGA-NEXT: v_bfe_i32 v0, v0, 0, 25 ; TONGA-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; TONGA-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/sext-in-reg-vector-shuffle.ll b/llvm/test/CodeGen/AMDGPU/sext-in-reg-vector-shuffle.ll index 49dec15..584d26e 100644 --- a/llvm/test/CodeGen/AMDGPU/sext-in-reg-vector-shuffle.ll +++ b/llvm/test/CodeGen/AMDGPU/sext-in-reg-vector-shuffle.ll @@ -42,34 +42,35 @@ define amdgpu_kernel void @v_sext_in_reg_i8_i16_shuffle_vector(ptr addrspace(1) ; GFX11-FAKE16-LABEL: v_sext_in_reg_i8_i16_shuffle_vector: ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v9, 0 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v10, 0 ; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-FAKE16-NEXT: global_load_b64 v[0:1], v0, s[2:3] ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 ; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v0 -; GFX11-FAKE16-NEXT: v_ashrrev_i32_e32 v2, 24, v1 +; GFX11-FAKE16-NEXT: v_ashrrev_i32_e32 v3, 24, v1 ; GFX11-FAKE16-NEXT: v_ashrrev_i32_e32 v5, 24, v0 ; GFX11-FAKE16-NEXT: v_ashrrev_i16 v6, 8, v1 ; GFX11-FAKE16-NEXT: v_bfe_i32 v7, v0, 0, 8 ; GFX11-FAKE16-NEXT: v_ashrrev_i16 v0, 8, v0 ; GFX11-FAKE16-NEXT: v_bfe_i32 v1, v1, 0, 8 -; GFX11-FAKE16-NEXT: v_bfe_i32 v3, v3, 0, 8 +; GFX11-FAKE16-NEXT: v_bfe_i32 v2, v2, 0, 8 ; GFX11-FAKE16-NEXT: v_bfe_i32 v4, v4, 0, 8 ; GFX11-FAKE16-NEXT: v_cvt_f16_i16_e32 v7, v7 ; GFX11-FAKE16-NEXT: v_cvt_f16_i16_e32 v0, v0 ; GFX11-FAKE16-NEXT: v_cvt_f16_i16_e32 v1, v1 ; GFX11-FAKE16-NEXT: v_cvt_f16_i16_e32 v6, v6 ; GFX11-FAKE16-NEXT: v_cvt_f16_i16_e32 v5, v5 -; GFX11-FAKE16-NEXT: v_cvt_f16_i16_e32 v8, v2 -; GFX11-FAKE16-NEXT: v_cvt_f16_i16_e32 v2, v4 -; GFX11-FAKE16-NEXT: v_cvt_f16_i16_e32 v4, v3 +; GFX11-FAKE16-NEXT: v_cvt_f16_i16_e32 v8, v3 +; GFX11-FAKE16-NEXT: v_cvt_f16_i16_e32 v4, v4 +; GFX11-FAKE16-NEXT: v_cvt_f16_i16_e32 v9, v2 ; GFX11-FAKE16-NEXT: v_pack_b32_f16 v3, v0, v7 ; GFX11-FAKE16-NEXT: v_pack_b32_f16 v1, v6, v1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-FAKE16-NEXT: v_pack_b32_f16 v2, v5, v2 -; GFX11-FAKE16-NEXT: v_pack_b32_f16 v0, v8, v4 -; GFX11-FAKE16-NEXT: global_store_b128 v9, v[0:3], s[0:1] +; GFX11-FAKE16-NEXT: v_pack_b32_f16 v2, v5, v4 +; GFX11-FAKE16-NEXT: v_pack_b32_f16 v0, v8, v9 +; GFX11-FAKE16-NEXT: global_store_b128 v10, v[0:3], s[0:1] ; GFX11-FAKE16-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %in.gep = getelementptr <{ [0 x i8] }>, ptr addrspace(1) %ptr, i64 0, i32 0, i32 %tid diff --git a/llvm/test/CodeGen/AMDGPU/shl.ll b/llvm/test/CodeGen/AMDGPU/shl.ll index 7aa7342..28330bf 100644 --- a/llvm/test/CodeGen/AMDGPU/shl.ll +++ b/llvm/test/CodeGen/AMDGPU/shl.ll @@ -681,30 +681,63 @@ define amdgpu_kernel void @shl_v4i16(ptr addrspace(1) %out, ptr addrspace(1) %in ; ; EG-LABEL: shl_v4i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 2, @8, KC0[CB0:0-32], KC1[] +; EG-NEXT: ALU 3, @8, KC0[CB0:0-32], KC1[] ; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 10, @11, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T8.X, 1 +; EG-NEXT: ALU 42, @12, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T10.XY, T0.X, 1 ; EG-NEXT: CF_END ; EG-NEXT: PAD ; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_128 T8.XYZW, T0.X, 0, #1 +; EG-NEXT: VTX_READ_128 T10.XYZW, T0.X, 0, #1 ; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: LSHL * T0.W, T0.X, literal.x, +; EG-NEXT: MOV T0.Y, T6.X, +; EG-NEXT: LSHL * T0.W, T0.X, literal.x, BS:VEC_120/SCL_212 ; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00) ; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, PV.W, -; EG-NEXT: ALU clause starting at 11: -; EG-NEXT: LSHR T1.W, T8.Z, literal.x, -; EG-NEXT: LSHR * T2.W, T8.X, literal.x, +; EG-NEXT: ALU clause starting at 12: +; EG-NEXT: AND_INT * T1.W, T10.Z, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: LSHL * T1.W, T10.X, PV.W, +; EG-NEXT: AND_INT T1.W, PV.W, literal.x, +; EG-NEXT: AND_INT * T2.W, T0.Y, literal.y, +; EG-NEXT: 65535(9.183409e-41), -65536(nan) +; EG-NEXT: OR_INT * T1.W, PS, PV.W, +; EG-NEXT: MOV * T6.X, PV.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: LSHR T1.W, T10.Z, literal.x, +; EG-NEXT: LSHR * T2.W, T10.X, literal.x, ; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; EG-NEXT: LSHL T0.Y, PS, PV.W, -; EG-NEXT: AND_INT T1.W, T8.Z, literal.x, -; EG-NEXT: AND_INT * T2.W, T8.X, literal.x, +; EG-NEXT: LSHL T1.W, PS, PV.W, +; EG-NEXT: AND_INT * T2.W, PV.X, literal.x, ; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) -; EG-NEXT: LSHL T0.X, PS, PV.W, +; EG-NEXT: LSHL * T1.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, T2.W, PV.W, +; EG-NEXT: MOV T6.X, PV.W, +; EG-NEXT: MOV * T0.X, T7.X, +; EG-NEXT: AND_INT * T1.W, T10.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: LSHL T1.W, T10.Y, PV.W, +; EG-NEXT: AND_INT * T2.W, T0.X, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: AND_INT * T1.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T1.W, T2.W, PV.W, +; EG-NEXT: MOV * T7.X, PV.W, +; EG-NEXT: MOV T0.X, PV.X, +; EG-NEXT: LSHR T1.W, T10.W, literal.x, +; EG-NEXT: LSHR * T2.W, T10.Y, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: LSHL * T1.W, PS, PV.W, +; EG-NEXT: AND_INT T0.Z, T0.X, literal.x, +; EG-NEXT: LSHL T1.W, PV.W, literal.y, ; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, T0.W, -; EG-NEXT: LSHR * T8.X, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 16(2.242078e-44) +; EG-NEXT: LSHR T0.X, PS, literal.x, +; EG-NEXT: OR_INT * T10.Y, PV.Z, PV.W, ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T7.X, PV.Y, +; EG-NEXT: MOV * T10.X, T6.X, %tid = call i32 @llvm.amdgcn.workitem.id.x() #0 %gep = getelementptr inbounds <4 x i16>, ptr addrspace(1) %in, i32 %tid %gep.out = getelementptr inbounds <4 x i16>, ptr addrspace(1) %out, i32 %tid diff --git a/llvm/test/CodeGen/AMDGPU/si-fold-reg-sequence.mir b/llvm/test/CodeGen/AMDGPU/si-fold-reg-sequence.mir index 7852f5d..23b24a2 100644 --- a/llvm/test/CodeGen/AMDGPU/si-fold-reg-sequence.mir +++ b/llvm/test/CodeGen/AMDGPU/si-fold-reg-sequence.mir @@ -1,11 +1,23 @@ -# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=si-fold-operands -verify-machineinstrs -o - %s +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 +# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=si-fold-operands -verify-machineinstrs -o - %s | FileCheck %s +# Check that we don't hang on this. --- name: fold_reg_sequence body: | bb.0: liveins: $vgpr0_vgpr1, $vgpr2 + ; CHECK-LABEL: name: fold_reg_sequence + ; CHECK: liveins: $vgpr0_vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 429 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE killed [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1 + ; CHECK-NEXT: [[V_MUL_HI_U32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_HI_U32_e64 $vgpr2, [[REG_SEQUENCE]].sub0, implicit $exec + ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s32), addrspace 1) + ; CHECK-NEXT: [[V_MUL_HI_U32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_HI_U32_e64 [[GLOBAL_LOAD_DWORD]], [[REG_SEQUENCE]].sub0, implicit $exec + ; CHECK-NEXT: S_ENDPGM 0 %0:sreg_32 = S_MOV_B32 0 %1:sreg_32 = S_MOV_B32 429 %2:sreg_64 = REG_SEQUENCE killed %1, %subreg.sub0, %0, %subreg.sub1 @@ -13,6 +25,20 @@ body: | %4:vgpr_32 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s32), addrspace 1) %5:vgpr_32 = V_MUL_HI_U32_e64 %4, %2.sub0, implicit $exec S_ENDPGM 0 - ... +# Fold through a COPY of REG_SEQUENCE. +--- +name: fold_through_copy +body: | + bb.0: + ; CHECK-LABEL: name: fold_through_copy + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF + ; CHECK-NEXT: [[V_PK_ADD_F32_:%[0-9]+]]:vreg_64_align2 = nofpexcept V_PK_ADD_F32 8, [[DEF]], 8, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec + %0:sreg_32 = S_MOV_B32 0 + %1:sreg_64 = REG_SEQUENCE %0:sreg_32, %subreg.sub0, %0:sreg_32, %subreg.sub1 + %2:sreg_64_xexec = IMPLICIT_DEF + %3:vreg_64_align2 = COPY %1:sreg_64 + %4:vreg_64_align2 = nofpexcept V_PK_ADD_F32 8, %2:sreg_64_xexec, 8, %3:vreg_64_align2, 0, 0, 0, 0, 0, implicit $mode, implicit $exec +... diff --git a/llvm/test/CodeGen/AMDGPU/sra.ll b/llvm/test/CodeGen/AMDGPU/sra.ll index 5d169c1..80c0d0f 100644 --- a/llvm/test/CodeGen/AMDGPU/sra.ll +++ b/llvm/test/CodeGen/AMDGPU/sra.ll @@ -320,28 +320,67 @@ define amdgpu_kernel void @ashr_v4i16(ptr addrspace(1) %out, ptr addrspace(1) %i ; ; EG-LABEL: ashr_v4i16: ; EG: ; %bb.0: -; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[] +; EG-NEXT: ALU 1, @8, KC0[CB0:0-32], KC1[] ; EG-NEXT: TEX 0 @6 -; EG-NEXT: ALU 10, @9, KC0[CB0:0-32], KC1[] -; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T7.XY, T8.X, 1 +; EG-NEXT: ALU 48, @10, KC0[CB0:0-32], KC1[] +; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T10.XY, T9.X, 1 ; EG-NEXT: CF_END ; EG-NEXT: PAD ; EG-NEXT: Fetch clause starting at 6: -; EG-NEXT: VTX_READ_128 T7.XYZW, T7.X, 0, #1 +; EG-NEXT: VTX_READ_128 T9.XYZW, T9.X, 0, #1 ; EG-NEXT: ALU clause starting at 8: -; EG-NEXT: MOV * T7.X, KC0[2].Z, -; EG-NEXT: ALU clause starting at 9: -; EG-NEXT: LSHR T0.Z, T7.X, literal.x, -; EG-NEXT: BFE_INT T0.W, T7.X, 0.0, literal.x, -; EG-NEXT: AND_INT * T1.W, T7.Z, literal.y, +; EG-NEXT: MOV * T0.Y, T6.X, +; EG-NEXT: MOV * T9.X, KC0[2].Z, +; EG-NEXT: ALU clause starting at 10: +; EG-NEXT: BFE_INT T0.W, T9.X, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, T9.Z, literal.y, ; EG-NEXT: 16(2.242078e-44), 65535(9.183409e-41) -; EG-NEXT: ASHR T7.X, PV.W, PS, -; EG-NEXT: BFE_INT T0.W, PV.Z, 0.0, literal.x, -; EG-NEXT: LSHR * T1.W, T7.Z, literal.x, +; EG-NEXT: ASHR * T0.W, PV.W, PS, +; EG-NEXT: AND_INT T0.W, PV.W, literal.x, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.y, +; EG-NEXT: 65535(9.183409e-41), -65536(nan) +; EG-NEXT: OR_INT * T0.W, PS, PV.W, +; EG-NEXT: MOV * T6.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T9.X, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: LSHR * T1.W, T9.Z, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: ASHR T0.W, PV.W, PS, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV T6.X, PV.W, +; EG-NEXT: MOV T0.Y, T7.X, +; EG-NEXT: BFE_INT T0.W, T9.Y, 0.0, literal.x, +; EG-NEXT: AND_INT * T1.W, T9.W, literal.y, +; EG-NEXT: 16(2.242078e-44), 65535(9.183409e-41) +; EG-NEXT: ASHR T0.W, PV.W, PS, +; EG-NEXT: AND_INT * T1.W, PV.Y, literal.x, +; EG-NEXT: -65536(nan), 0(0.000000e+00) +; EG-NEXT: AND_INT * T0.W, PV.W, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: OR_INT * T0.W, T1.W, PV.W, +; EG-NEXT: MOV * T7.X, PV.W, +; EG-NEXT: MOV T0.Y, PV.X, +; EG-NEXT: LSHR * T0.W, T9.Y, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: BFE_INT T0.W, PV.W, 0.0, literal.x, +; EG-NEXT: LSHR * T1.W, T9.W, literal.x, +; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) +; EG-NEXT: ASHR T0.W, PV.W, PS, +; EG-NEXT: AND_INT * T1.W, T0.Y, literal.x, +; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00) +; EG-NEXT: LSHL * T0.W, PV.W, literal.x, ; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00) -; EG-NEXT: LSHR T8.X, KC0[2].Y, literal.x, -; EG-NEXT: ASHR * T7.Y, PV.W, PS, +; EG-NEXT: LSHR T9.X, KC0[2].Y, literal.x, +; EG-NEXT: OR_INT * T10.Y, T1.W, PV.W, ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; EG-NEXT: MOV T7.X, PV.Y, +; EG-NEXT: MOV * T10.X, T6.X, %b_ptr = getelementptr <4 x i16>, ptr addrspace(1) %in, i16 1 %a = load <4 x i16>, ptr addrspace(1) %in %b = load <4 x i16>, ptr addrspace(1) %b_ptr diff --git a/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll b/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll index 141b86a..4a6202ea 100644 --- a/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll @@ -493,9 +493,9 @@ define <3 x half> @v_constained_fsub_v3f16_fpexcept_strict(<3 x half> %x, <3 x h ; GFX11-GISEL-TRUE16-LABEL: v_constained_fsub_v3f16_fpexcept_strict: ; GFX11-GISEL-TRUE16: ; %bb.0: ; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-GISEL-TRUE16-NEXT: v_sub_f16_e32 v1.l, v1.l, v3.l ; GFX11-GISEL-TRUE16-NEXT: v_sub_f16_e32 v0.l, v0.l, v2.l ; GFX11-GISEL-TRUE16-NEXT: v_sub_f16_e32 v0.h, v0.h, v2.h +; GFX11-GISEL-TRUE16-NEXT: v_sub_f16_e32 v1.l, v1.l, v3.l ; GFX11-GISEL-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-GISEL-FAKE16-LABEL: v_constained_fsub_v3f16_fpexcept_strict: diff --git a/llvm/test/CodeGen/AMDGPU/strict_ldexp.f16.ll b/llvm/test/CodeGen/AMDGPU/strict_ldexp.f16.ll index 84fe4ec..98d0a62 100644 --- a/llvm/test/CodeGen/AMDGPU/strict_ldexp.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/strict_ldexp.f16.ll @@ -299,13 +299,13 @@ define <3 x half> @test_ldexp_v3f16_v3i32(ptr addrspace(1) %out, <3 x half> %a, ; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-GISEL-TRUE16-NEXT: v_mov_b32_e32 v0, 0x7fff ; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11-GISEL-TRUE16-NEXT: v_med3_i32 v1, 0xffff8000, v6, v0 -; GFX11-GISEL-TRUE16-NEXT: v_med3_i32 v4, 0xffff8000, v4, v0 -; GFX11-GISEL-TRUE16-NEXT: v_med3_i32 v5, 0xffff8000, v5, v0 -; GFX11-GISEL-TRUE16-NEXT: v_ldexp_f16_e32 v1.l, v3.l, v1.l +; GFX11-GISEL-TRUE16-NEXT: v_med3_i32 v1, 0xffff8000, v4, v0 +; GFX11-GISEL-TRUE16-NEXT: v_med3_i32 v4, 0xffff8000, v5, v0 +; GFX11-GISEL-TRUE16-NEXT: v_med3_i32 v5, 0xffff8000, v6, v0 +; GFX11-GISEL-TRUE16-NEXT: v_ldexp_f16_e32 v0.l, v2.l, v1.l ; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-GISEL-TRUE16-NEXT: v_ldexp_f16_e32 v0.l, v2.l, v4.l -; GFX11-GISEL-TRUE16-NEXT: v_ldexp_f16_e32 v0.h, v2.h, v5.l +; GFX11-GISEL-TRUE16-NEXT: v_ldexp_f16_e32 v0.h, v2.h, v4.l +; GFX11-GISEL-TRUE16-NEXT: v_ldexp_f16_e32 v1.l, v3.l, v5.l ; GFX11-GISEL-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-GISEL-FAKE16-LABEL: test_ldexp_v3f16_v3i32: diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll index ed2f06b..b5d9d00 100644 --- a/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll +++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll @@ -3963,8 +3963,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) { ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc ; GFX7-SDAG-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[4:5] -; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc +; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX7-SDAG-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3] ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc @@ -4067,8 +4067,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) { ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc ; GFX8-SDAG-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[4:5] -; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc +; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX8-SDAG-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3] ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc @@ -4175,8 +4175,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) { ; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc ; GFX9-SDAG-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[4:5] ; GFX9-SDAG-NEXT: s_nop 1 -; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc +; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX9-SDAG-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3] ; GFX9-SDAG-NEXT: s_nop 1 ; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc @@ -4283,8 +4283,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) { ; GFX10-SDAG-NEXT: v_cmp_gt_i64_e64 s4, v[2:3], v[6:7] ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo -; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4 ; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4 +; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4 ; GFX10-SDAG-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3] ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo @@ -4387,8 +4387,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) { ; GFX11-SDAG-NEXT: v_cmp_gt_i64_e64 s0, v[2:3], v[6:7] ; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0 ; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0 +; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX11-SDAG-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3] ; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1 ; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31] @@ -4502,8 +4502,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) { ; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0 ; GFX12-SDAG-NEXT: s_wait_alu 0xf1ff ; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0 +; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX12-SDAG-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3] ; GFX12-SDAG-NEXT: s_wait_alu 0xfffd ; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1 diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll index 8812cae..2a989ec 100644 --- a/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll +++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll @@ -3963,8 +3963,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) { ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc ; GFX7-SDAG-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[4:5] -; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc +; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX7-SDAG-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3] ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc @@ -4067,8 +4067,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) { ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc ; GFX8-SDAG-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[4:5] -; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc +; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX8-SDAG-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3] ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc @@ -4175,8 +4175,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) { ; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc ; GFX9-SDAG-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[4:5] ; GFX9-SDAG-NEXT: s_nop 1 -; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc +; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX9-SDAG-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3] ; GFX9-SDAG-NEXT: s_nop 1 ; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc @@ -4283,8 +4283,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) { ; GFX10-SDAG-NEXT: v_cmp_lt_i64_e64 s4, v[2:3], v[6:7] ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo -; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4 ; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4 +; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4 ; GFX10-SDAG-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3] ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo @@ -4387,8 +4387,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) { ; GFX11-SDAG-NEXT: v_cmp_lt_i64_e64 s0, v[2:3], v[6:7] ; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0 ; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0 +; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX11-SDAG-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3] ; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1 ; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31] @@ -4502,8 +4502,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) { ; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0 ; GFX12-SDAG-NEXT: s_wait_alu 0xf1ff ; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0 +; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX12-SDAG-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3] ; GFX12-SDAG-NEXT: s_wait_alu 0xfffd ; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1 diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll index 82eb122..69fd58a 100644 --- a/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll +++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll @@ -3839,8 +3839,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) { ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc ; GFX7-SDAG-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5] -; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc +; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX7-SDAG-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3] ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc @@ -3943,8 +3943,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) { ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc ; GFX8-SDAG-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5] -; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc +; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX8-SDAG-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3] ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc @@ -4051,8 +4051,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) { ; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc ; GFX9-SDAG-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5] ; GFX9-SDAG-NEXT: s_nop 1 -; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc +; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX9-SDAG-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3] ; GFX9-SDAG-NEXT: s_nop 1 ; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc @@ -4159,8 +4159,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) { ; GFX10-SDAG-NEXT: v_cmp_gt_u64_e64 s4, v[2:3], v[6:7] ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo -; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4 ; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4 +; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4 ; GFX10-SDAG-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3] ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo @@ -4263,8 +4263,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) { ; GFX11-SDAG-NEXT: v_cmp_gt_u64_e64 s0, v[2:3], v[6:7] ; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0 ; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0 +; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX11-SDAG-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3] ; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1 ; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31] @@ -4378,8 +4378,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) { ; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0 ; GFX12-SDAG-NEXT: s_wait_alu 0xf1ff ; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0 +; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX12-SDAG-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3] ; GFX12-SDAG-NEXT: s_wait_alu 0xfffd ; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1 diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll index 115b05a..aab0e76 100644 --- a/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll +++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll @@ -3585,8 +3585,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) { ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc ; GFX7-SDAG-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[4:5] -; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc +; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX7-SDAG-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3] ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc ; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc @@ -3689,8 +3689,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) { ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc ; GFX8-SDAG-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[4:5] -; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc +; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX8-SDAG-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3] ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc ; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc @@ -3797,8 +3797,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) { ; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc ; GFX9-SDAG-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[4:5] ; GFX9-SDAG-NEXT: s_nop 1 -; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc +; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc ; GFX9-SDAG-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3] ; GFX9-SDAG-NEXT: s_nop 1 ; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc @@ -3905,8 +3905,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) { ; GFX10-SDAG-NEXT: v_cmp_lt_u64_e64 s4, v[2:3], v[6:7] ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo -; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4 ; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4 +; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4 ; GFX10-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3] ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo ; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo @@ -4009,8 +4009,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) { ; GFX11-SDAG-NEXT: v_cmp_lt_u64_e64 s0, v[2:3], v[6:7] ; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0 ; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0 +; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX11-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3] ; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1 ; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31] @@ -4124,8 +4124,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) { ; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0 ; GFX12-SDAG-NEXT: s_wait_alu 0xf1ff ; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0 +; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0 ; GFX12-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3] ; GFX12-SDAG-NEXT: s_wait_alu 0xfffd ; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1 diff --git a/llvm/test/CodeGen/AMDGPU/wmma-hazards-gfx1250-w32.mir b/llvm/test/CodeGen/AMDGPU/wmma-hazards-gfx1250-w32.mir index 2032b98..fa3b9244 100644 --- a/llvm/test/CodeGen/AMDGPU/wmma-hazards-gfx1250-w32.mir +++ b/llvm/test/CodeGen/AMDGPU/wmma-hazards-gfx1250-w32.mir @@ -834,6 +834,222 @@ body: | ... --- +name: test_wmma_scale_F32_16x16x128_F8F6F4_F8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_scale_F32_16x16x128_F8F6F4_F8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr41, 1, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr41, 1, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec +... + +--- +name: test_wmma_scale_F32_16x16x128_F8F6F4_F8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_scale_F32_16x16x128_F8F6F4_F8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr41, 1, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr41, 1, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec +... + +--- +name: test_wmma_scale_F32_16x16x128_F8F6F4_F8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_scale_F32_16x16x128_F8F6F4_F8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr81, 1, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr81, 1, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_scale_F32_16x16x128_F8F6F4_F6F4_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_scale_F32_16x16x128_F8F6F4_F6F4_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr41, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr41, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec +... + +--- +name: test_wmma_scale_F32_16x16x128_F8F6F4_F6F4_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_scale_F32_16x16x128_F8F6F4_F6F4_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr41, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr41, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec +... + +--- +name: test_wmma_scale_F32_16x16x128_F8F6F4_F6F4_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_scale_F32_16x16x128_F8F6F4_F6F4_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr81, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr81, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_scale16_F3216_16x16x128_F8F6F4_F8_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_scale16_F3216_16x16x128_F8F6F4_F8_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE16_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41, killed $vgpr42_vgpr43, 1, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE16_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41, killed $vgpr42_vgpr43, 1, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec +... + +--- +name: test_wmma_scale16_F32_16x16x128_F8F6F4_F8_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_scale16_F32_16x16x128_F8F6F4_F8_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE16_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41, killed $vgpr42_vgpr43, 1, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE16_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41, killed $vgpr42_vgpr43, 1, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec +... + +--- +name: test_wmma_scale16_F32_16x16x128_F8F6F4_F8_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_scale16_F32_16x16x128_F8F6F4_F8_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr41, 1, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr41, 1, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33, 0, 0, 0, implicit $exec +... + +--- +name: test_wmma_scale16_F3216_16x16x128_F8F6F4_F6f4_D0_overlaps_A1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_scale16_F3216_16x16x128_F8F6F4_F6f4_D0_overlaps_A1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE16_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41, killed $vgpr42_vgpr43, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE16_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41, killed $vgpr42_vgpr43, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec +... + +--- +name: test_wmma_scale16_F32_16x16x128_F8F6F4_F6f4_D0_overlaps_B1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_scale16_F32_16x16x128_F8F6F4_F6f4_D0_overlaps_B1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE16_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41, killed $vgpr42_vgpr43, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE16_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40_vgpr41, killed $vgpr42_vgpr43, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71 = V_WMMA_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47, 8, killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, 1, 2, 0, 0, implicit $exec +... + +--- +name: test_wmma_scale16_F32_16x16x128_F8F6F4_F6f4_D0_overlaps_Index1 +body: | + bb.0: + ; GFX1250-LABEL: name: test_wmma_scale16_F32_16x16x128_F8F6F4_F6f4_D0_overlaps_Index1 + ; GFX1250: early-clobber $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr41, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: V_NOP_e32 implicit $exec + ; GFX1250-NEXT: early-clobber $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33, 0, 0, 0, implicit $exec + $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 = V_WMMA_SCALE_F32_16X16X128_F8F6F4_f8_f8_w32_twoaddr killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, killed $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 8, killed $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39, killed $vgpr40, killed $vgpr41, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0, implicit $exec + $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 = V_SWMMAC_F32_16X16X128_FP8_FP8_w32_twoaddr killed $vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71, killed $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55, killed $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63, killed $vgpr32_vgpr33, 0, 0, 0, implicit $exec +... + +--- name: test_swmmac_f32_16x16x64_bf16_D0_overlaps_A1 body: | bb.0: diff --git a/llvm/test/CodeGen/ARM/bad-constraint.ll b/llvm/test/CodeGen/ARM/bad-constraint.ll index 9b8fcd5..7d80f0c 100644 --- a/llvm/test/CodeGen/ARM/bad-constraint.ll +++ b/llvm/test/CodeGen/ARM/bad-constraint.ll @@ -1,6 +1,7 @@ ; RUN: not llc -filetype=obj %s -o /dev/null 2>&1 | FileCheck %s ; CHECK: error: couldn't allocate input reg for constraint '{d2}' ; CHECK-NEXT: error: couldn't allocate input reg for constraint '{s2}' +; CHECK-NEXT: error: couldn't allocate input reg for constraint '{d3}' target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" target triple = "armv8a-unknown-linux-gnueabihf" @@ -23,3 +24,8 @@ entry: ret void } +define void @_Z1dv() local_unnamed_addr { +entry: + tail call void asm sideeffect "", "{d3}"(<16 x i8> splat (i8 -1)) + ret void +} diff --git a/llvm/test/CodeGen/ARM/div.ll b/llvm/test/CodeGen/ARM/div.ll index 935aeaa..97cd9fd 100644 --- a/llvm/test/CodeGen/ARM/div.ll +++ b/llvm/test/CodeGen/ARM/div.ll @@ -145,3 +145,57 @@ define i64 @f8(i64 %a) { %tmp1 = udiv i64 %a, 3 ret i64 %tmp1 } + +define i8 @sdiv_i8(i8 %a, i8 %b) { +entry: +; CHECK-LABEL: sdiv_i8 +; CHECK-SWDIV: __divsi3 + +; CHECK-THUMB: .thumb_func +; CHECK-HWDIV: sdiv + +; CHECK-EABI: __aeabi_idiv + %tmp1 = sdiv i8 %a, %b ; <i8> [#uses=1] + ret i8 %tmp1 +} + + +define i16 @sdiv_i16(i16 %a, i16 %b) { +entry: +; CHECK-LABEL: sdiv_i16 +; CHECK-SWDIV: __divsi3 + +; CHECK-THUMB: .thumb_func +; CHECK-HWDIV: sdiv + +; CHECK-EABI: __aeabi_idiv + %tmp1 = sdiv i16 %a, %b ; <i16> [#uses=1] + ret i16 %tmp1 +} + +define i8 @udiv_i8(i8 %a, i8 %b) { +entry: +; CHECK-LABEL: udiv_i8 +; CHECK-SWDIV: __udivsi3 + +; CHECK-THUMB: .thumb_func +; CHECK-HWDIV: udiv + +; CHECK-EABI: __aeabi_uidiv + %tmp1 = udiv i8 %a, %b ; <i8> [#uses=1] + ret i8 %tmp1 +} + + +define i16 @udiv_i16(i16 %a, i16 %b) { +entry: +; CHECK-LABEL: udiv_i16 +; CHECK-SWDIV: __udivsi3 + +; CHECK-THUMB: .thumb_func +; CHECK-HWDIV: udiv + +; CHECK-EABI: __aeabi_uidiv + %tmp1 = udiv i16 %a, %b ; <i16> [#uses=1] + ret i16 %tmp1 +} diff --git a/llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll b/llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll new file mode 100644 index 0000000..0c01bb9 --- /dev/null +++ b/llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll @@ -0,0 +1,14 @@ +; RUN: llc %s -filetype=asm -o - | FileCheck %s + +; CHECK: vmov.i8 d3, #0xff + +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" +target triple = "armv8a-unknown-linux-gnueabihf" + +; Function Attrs: mustprogress noimplicitfloat nounwind +define void @cvt_vec() local_unnamed_addr { +entry: + tail call void asm sideeffect "", "{d3}"(<8 x i8> splat (i8 -1)) + ret void +} + diff --git a/llvm/test/CodeGen/AVR/llvm.sincos.ll b/llvm/test/CodeGen/AVR/llvm.sincos.ll index 897101d..b70b8d3 100644 --- a/llvm/test/CodeGen/AVR/llvm.sincos.ll +++ b/llvm/test/CodeGen/AVR/llvm.sincos.ll @@ -3,630 +3,266 @@ ; RUN: llc -mtriple=avr-unknown-linux-gnu < %s | FileCheck -check-prefixes=CHECK,GNU %s define { half, half } @test_sincos_f16(half %a) #0 { -; NONGNU-LABEL: test_sincos_f16: -; NONGNU: ; %bb.0: -; NONGNU-NEXT: push r12 -; NONGNU-NEXT: push r13 -; NONGNU-NEXT: push r14 -; NONGNU-NEXT: push r15 -; NONGNU-NEXT: push r16 -; NONGNU-NEXT: push r17 -; NONGNU-NEXT: mov r24, r22 -; NONGNU-NEXT: mov r25, r23 -; NONGNU-NEXT: rcall __extendhfsf2 -; NONGNU-NEXT: mov r16, r22 -; NONGNU-NEXT: mov r17, r23 -; NONGNU-NEXT: mov r14, r24 -; NONGNU-NEXT: mov r15, r25 -; NONGNU-NEXT: rcall sin -; NONGNU-NEXT: rcall __truncsfhf2 -; NONGNU-NEXT: mov r12, r24 -; NONGNU-NEXT: mov r13, r25 -; NONGNU-NEXT: mov r22, r16 -; NONGNU-NEXT: mov r23, r17 -; NONGNU-NEXT: mov r24, r14 -; NONGNU-NEXT: mov r25, r15 -; NONGNU-NEXT: rcall cos -; NONGNU-NEXT: rcall __truncsfhf2 -; NONGNU-NEXT: mov r22, r24 -; NONGNU-NEXT: mov r23, r25 -; NONGNU-NEXT: mov r18, r12 -; NONGNU-NEXT: mov r19, r13 -; NONGNU-NEXT: pop r17 -; NONGNU-NEXT: pop r16 -; NONGNU-NEXT: pop r15 -; NONGNU-NEXT: pop r14 -; NONGNU-NEXT: pop r13 -; NONGNU-NEXT: pop r12 -; NONGNU-NEXT: ret -; -; GNU-LABEL: test_sincos_f16: -; GNU: ; %bb.0: -; GNU-NEXT: push r16 -; GNU-NEXT: push r17 -; GNU-NEXT: push r28 -; GNU-NEXT: push r29 -; GNU-NEXT: in r28, 61 -; GNU-NEXT: in r29, 62 -; GNU-NEXT: sbiw r28, 8 -; GNU-NEXT: in r0, 63 -; GNU-NEXT: cli -; GNU-NEXT: out 62, r29 -; GNU-NEXT: out 63, r0 -; GNU-NEXT: out 61, r28 -; GNU-NEXT: mov r24, r22 -; GNU-NEXT: mov r25, r23 -; GNU-NEXT: rcall __extendhfsf2 -; GNU-NEXT: mov r20, r28 -; GNU-NEXT: mov r21, r29 -; GNU-NEXT: subi r20, 251 -; GNU-NEXT: sbci r21, 255 -; GNU-NEXT: mov r18, r28 -; GNU-NEXT: mov r19, r29 -; GNU-NEXT: subi r18, 255 -; GNU-NEXT: sbci r19, 255 -; GNU-NEXT: rcall sincosf -; GNU-NEXT: ldd r22, Y+5 -; GNU-NEXT: ldd r23, Y+6 -; GNU-NEXT: ldd r24, Y+7 -; GNU-NEXT: ldd r25, Y+8 -; GNU-NEXT: rcall __truncsfhf2 -; GNU-NEXT: mov r16, r24 -; GNU-NEXT: mov r17, r25 -; GNU-NEXT: ldd r22, Y+1 -; GNU-NEXT: ldd r23, Y+2 -; GNU-NEXT: ldd r24, Y+3 -; GNU-NEXT: ldd r25, Y+4 -; GNU-NEXT: rcall __truncsfhf2 -; GNU-NEXT: mov r22, r24 -; GNU-NEXT: mov r23, r25 -; GNU-NEXT: mov r18, r16 -; GNU-NEXT: mov r19, r17 -; GNU-NEXT: adiw r28, 8 -; GNU-NEXT: in r0, 63 -; GNU-NEXT: cli -; GNU-NEXT: out 62, r29 -; GNU-NEXT: out 63, r0 -; GNU-NEXT: out 61, r28 -; GNU-NEXT: pop r29 -; GNU-NEXT: pop r28 -; GNU-NEXT: pop r17 -; GNU-NEXT: pop r16 -; GNU-NEXT: ret +; CHECK-LABEL: test_sincos_f16: +; CHECK: ; %bb.0: +; CHECK-NEXT: push r12 +; CHECK-NEXT: push r13 +; CHECK-NEXT: push r14 +; CHECK-NEXT: push r15 +; CHECK-NEXT: push r16 +; CHECK-NEXT: push r17 +; CHECK-NEXT: mov r24, r22 +; CHECK-NEXT: mov r25, r23 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: mov r16, r22 +; CHECK-NEXT: mov r17, r23 +; CHECK-NEXT: mov r14, r24 +; CHECK-NEXT: mov r15, r25 +; CHECK-NEXT: rcall sin +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r12, r24 +; CHECK-NEXT: mov r13, r25 +; CHECK-NEXT: mov r22, r16 +; CHECK-NEXT: mov r23, r17 +; CHECK-NEXT: mov r24, r14 +; CHECK-NEXT: mov r25, r15 +; CHECK-NEXT: rcall cos +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r22, r24 +; CHECK-NEXT: mov r23, r25 +; CHECK-NEXT: mov r18, r12 +; CHECK-NEXT: mov r19, r13 +; CHECK-NEXT: pop r17 +; CHECK-NEXT: pop r16 +; CHECK-NEXT: pop r15 +; CHECK-NEXT: pop r14 +; CHECK-NEXT: pop r13 +; CHECK-NEXT: pop r12 +; CHECK-NEXT: ret %result = call { half, half } @llvm.sincos.f16(half %a) ret { half, half } %result } define half @test_sincos_f16_only_use_sin(half %a) #0 { -; NONGNU-LABEL: test_sincos_f16_only_use_sin: -; NONGNU: ; %bb.0: -; NONGNU-NEXT: mov r24, r22 -; NONGNU-NEXT: mov r25, r23 -; NONGNU-NEXT: rcall __extendhfsf2 -; NONGNU-NEXT: rcall sin -; NONGNU-NEXT: rcall __truncsfhf2 -; NONGNU-NEXT: mov r22, r24 -; NONGNU-NEXT: mov r23, r25 -; NONGNU-NEXT: ret -; -; GNU-LABEL: test_sincos_f16_only_use_sin: -; GNU: ; %bb.0: -; GNU-NEXT: push r28 -; GNU-NEXT: push r29 -; GNU-NEXT: in r28, 61 -; GNU-NEXT: in r29, 62 -; GNU-NEXT: sbiw r28, 8 -; GNU-NEXT: in r0, 63 -; GNU-NEXT: cli -; GNU-NEXT: out 62, r29 -; GNU-NEXT: out 63, r0 -; GNU-NEXT: out 61, r28 -; GNU-NEXT: mov r24, r22 -; GNU-NEXT: mov r25, r23 -; GNU-NEXT: rcall __extendhfsf2 -; GNU-NEXT: mov r20, r28 -; GNU-NEXT: mov r21, r29 -; GNU-NEXT: subi r20, 251 -; GNU-NEXT: sbci r21, 255 -; GNU-NEXT: mov r18, r28 -; GNU-NEXT: mov r19, r29 -; GNU-NEXT: subi r18, 255 -; GNU-NEXT: sbci r19, 255 -; GNU-NEXT: rcall sincosf -; GNU-NEXT: ldd r22, Y+5 -; GNU-NEXT: ldd r23, Y+6 -; GNU-NEXT: ldd r24, Y+7 -; GNU-NEXT: ldd r25, Y+8 -; GNU-NEXT: rcall __truncsfhf2 -; GNU-NEXT: mov r22, r24 -; GNU-NEXT: mov r23, r25 -; GNU-NEXT: adiw r28, 8 -; GNU-NEXT: in r0, 63 -; GNU-NEXT: cli -; GNU-NEXT: out 62, r29 -; GNU-NEXT: out 63, r0 -; GNU-NEXT: out 61, r28 -; GNU-NEXT: pop r29 -; GNU-NEXT: pop r28 -; GNU-NEXT: ret +; CHECK-LABEL: test_sincos_f16_only_use_sin: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov r24, r22 +; CHECK-NEXT: mov r25, r23 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: rcall sin +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r22, r24 +; CHECK-NEXT: mov r23, r25 +; CHECK-NEXT: ret %result = call { half, half } @llvm.sincos.f16(half %a) %result.0 = extractvalue { half, half } %result, 0 ret half %result.0 } define half @test_sincos_f16_only_use_cos(half %a) #0 { -; NONGNU-LABEL: test_sincos_f16_only_use_cos: -; NONGNU: ; %bb.0: -; NONGNU-NEXT: mov r24, r22 -; NONGNU-NEXT: mov r25, r23 -; NONGNU-NEXT: rcall __extendhfsf2 -; NONGNU-NEXT: rcall cos -; NONGNU-NEXT: rcall __truncsfhf2 -; NONGNU-NEXT: mov r22, r24 -; NONGNU-NEXT: mov r23, r25 -; NONGNU-NEXT: ret -; -; GNU-LABEL: test_sincos_f16_only_use_cos: -; GNU: ; %bb.0: -; GNU-NEXT: push r28 -; GNU-NEXT: push r29 -; GNU-NEXT: in r28, 61 -; GNU-NEXT: in r29, 62 -; GNU-NEXT: sbiw r28, 8 -; GNU-NEXT: in r0, 63 -; GNU-NEXT: cli -; GNU-NEXT: out 62, r29 -; GNU-NEXT: out 63, r0 -; GNU-NEXT: out 61, r28 -; GNU-NEXT: mov r24, r22 -; GNU-NEXT: mov r25, r23 -; GNU-NEXT: rcall __extendhfsf2 -; GNU-NEXT: mov r20, r28 -; GNU-NEXT: mov r21, r29 -; GNU-NEXT: subi r20, 251 -; GNU-NEXT: sbci r21, 255 -; GNU-NEXT: mov r18, r28 -; GNU-NEXT: mov r19, r29 -; GNU-NEXT: subi r18, 255 -; GNU-NEXT: sbci r19, 255 -; GNU-NEXT: rcall sincosf -; GNU-NEXT: ldd r22, Y+1 -; GNU-NEXT: ldd r23, Y+2 -; GNU-NEXT: ldd r24, Y+3 -; GNU-NEXT: ldd r25, Y+4 -; GNU-NEXT: rcall __truncsfhf2 -; GNU-NEXT: mov r22, r24 -; GNU-NEXT: mov r23, r25 -; GNU-NEXT: adiw r28, 8 -; GNU-NEXT: in r0, 63 -; GNU-NEXT: cli -; GNU-NEXT: out 62, r29 -; GNU-NEXT: out 63, r0 -; GNU-NEXT: out 61, r28 -; GNU-NEXT: pop r29 -; GNU-NEXT: pop r28 -; GNU-NEXT: ret +; CHECK-LABEL: test_sincos_f16_only_use_cos: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov r24, r22 +; CHECK-NEXT: mov r25, r23 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: rcall cos +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r22, r24 +; CHECK-NEXT: mov r23, r25 +; CHECK-NEXT: ret %result = call { half, half } @llvm.sincos.f16(half %a) %result.1 = extractvalue { half, half } %result, 1 ret half %result.1 } define { <2 x half>, <2 x half> } @test_sincos_v2f16(<2 x half> %a) #0 { -; NONGNU-LABEL: test_sincos_v2f16: -; NONGNU: ; %bb.0: -; NONGNU-NEXT: push r6 -; NONGNU-NEXT: push r7 -; NONGNU-NEXT: push r8 -; NONGNU-NEXT: push r9 -; NONGNU-NEXT: push r10 -; NONGNU-NEXT: push r11 -; NONGNU-NEXT: push r12 -; NONGNU-NEXT: push r13 -; NONGNU-NEXT: push r14 -; NONGNU-NEXT: push r15 -; NONGNU-NEXT: push r16 -; NONGNU-NEXT: push r17 -; NONGNU-NEXT: mov r10, r22 -; NONGNU-NEXT: mov r11, r23 -; NONGNU-NEXT: rcall __extendhfsf2 -; NONGNU-NEXT: mov r16, r22 -; NONGNU-NEXT: mov r17, r23 -; NONGNU-NEXT: mov r14, r24 -; NONGNU-NEXT: mov r15, r25 -; NONGNU-NEXT: rcall sin -; NONGNU-NEXT: rcall __truncsfhf2 -; NONGNU-NEXT: mov r12, r24 -; NONGNU-NEXT: mov r13, r25 -; NONGNU-NEXT: mov r24, r10 -; NONGNU-NEXT: mov r25, r11 -; NONGNU-NEXT: rcall __extendhfsf2 -; NONGNU-NEXT: mov r10, r22 -; NONGNU-NEXT: mov r11, r23 -; NONGNU-NEXT: mov r8, r24 -; NONGNU-NEXT: mov r9, r25 -; NONGNU-NEXT: rcall cos -; NONGNU-NEXT: rcall __truncsfhf2 -; NONGNU-NEXT: mov r6, r24 -; NONGNU-NEXT: mov r7, r25 -; NONGNU-NEXT: mov r22, r10 -; NONGNU-NEXT: mov r23, r11 -; NONGNU-NEXT: mov r24, r8 -; NONGNU-NEXT: mov r25, r9 -; NONGNU-NEXT: rcall sin -; NONGNU-NEXT: rcall __truncsfhf2 -; NONGNU-NEXT: mov r10, r24 -; NONGNU-NEXT: mov r11, r25 -; NONGNU-NEXT: mov r22, r16 -; NONGNU-NEXT: mov r23, r17 -; NONGNU-NEXT: mov r24, r14 -; NONGNU-NEXT: mov r25, r15 -; NONGNU-NEXT: rcall cos -; NONGNU-NEXT: rcall __truncsfhf2 -; NONGNU-NEXT: mov r18, r10 -; NONGNU-NEXT: mov r19, r11 -; NONGNU-NEXT: mov r20, r12 -; NONGNU-NEXT: mov r21, r13 -; NONGNU-NEXT: mov r22, r6 -; NONGNU-NEXT: mov r23, r7 -; NONGNU-NEXT: pop r17 -; NONGNU-NEXT: pop r16 -; NONGNU-NEXT: pop r15 -; NONGNU-NEXT: pop r14 -; NONGNU-NEXT: pop r13 -; NONGNU-NEXT: pop r12 -; NONGNU-NEXT: pop r11 -; NONGNU-NEXT: pop r10 -; NONGNU-NEXT: pop r9 -; NONGNU-NEXT: pop r8 -; NONGNU-NEXT: pop r7 -; NONGNU-NEXT: pop r6 -; NONGNU-NEXT: ret -; -; GNU-LABEL: test_sincos_v2f16: -; GNU: ; %bb.0: -; GNU-NEXT: push r12 -; GNU-NEXT: push r13 -; GNU-NEXT: push r14 -; GNU-NEXT: push r15 -; GNU-NEXT: push r16 -; GNU-NEXT: push r17 -; GNU-NEXT: push r28 -; GNU-NEXT: push r29 -; GNU-NEXT: in r28, 61 -; GNU-NEXT: in r29, 62 -; GNU-NEXT: sbiw r28, 16 -; GNU-NEXT: in r0, 63 -; GNU-NEXT: cli -; GNU-NEXT: out 62, r29 -; GNU-NEXT: out 63, r0 -; GNU-NEXT: out 61, r28 -; GNU-NEXT: mov r16, r24 -; GNU-NEXT: mov r17, r25 -; GNU-NEXT: mov r24, r22 -; GNU-NEXT: mov r25, r23 -; GNU-NEXT: rcall __extendhfsf2 -; GNU-NEXT: mov r20, r28 -; GNU-NEXT: mov r21, r29 -; GNU-NEXT: subi r20, 243 -; GNU-NEXT: sbci r21, 255 -; GNU-NEXT: mov r18, r28 -; GNU-NEXT: mov r19, r29 -; GNU-NEXT: subi r18, 247 -; GNU-NEXT: sbci r19, 255 -; GNU-NEXT: rcall sincosf -; GNU-NEXT: mov r24, r16 -; GNU-NEXT: mov r25, r17 -; GNU-NEXT: rcall __extendhfsf2 -; GNU-NEXT: mov r20, r28 -; GNU-NEXT: mov r21, r29 -; GNU-NEXT: subi r20, 251 -; GNU-NEXT: sbci r21, 255 -; GNU-NEXT: mov r18, r28 -; GNU-NEXT: mov r19, r29 -; GNU-NEXT: subi r18, 255 -; GNU-NEXT: sbci r19, 255 -; GNU-NEXT: rcall sincosf -; GNU-NEXT: ldd r22, Y+13 -; GNU-NEXT: ldd r23, Y+14 -; GNU-NEXT: ldd r24, Y+15 -; GNU-NEXT: ldd r25, Y+16 -; GNU-NEXT: rcall __truncsfhf2 -; GNU-NEXT: mov r16, r24 -; GNU-NEXT: mov r17, r25 -; GNU-NEXT: ldd r22, Y+5 -; GNU-NEXT: ldd r23, Y+6 -; GNU-NEXT: ldd r24, Y+7 -; GNU-NEXT: ldd r25, Y+8 -; GNU-NEXT: rcall __truncsfhf2 -; GNU-NEXT: mov r14, r24 -; GNU-NEXT: mov r15, r25 -; GNU-NEXT: ldd r22, Y+9 -; GNU-NEXT: ldd r23, Y+10 -; GNU-NEXT: ldd r24, Y+11 -; GNU-NEXT: ldd r25, Y+12 -; GNU-NEXT: rcall __truncsfhf2 -; GNU-NEXT: mov r12, r24 -; GNU-NEXT: mov r13, r25 -; GNU-NEXT: ldd r22, Y+1 -; GNU-NEXT: ldd r23, Y+2 -; GNU-NEXT: ldd r24, Y+3 -; GNU-NEXT: ldd r25, Y+4 -; GNU-NEXT: rcall __truncsfhf2 -; GNU-NEXT: mov r18, r16 -; GNU-NEXT: mov r19, r17 -; GNU-NEXT: mov r20, r14 -; GNU-NEXT: mov r21, r15 -; GNU-NEXT: mov r22, r12 -; GNU-NEXT: mov r23, r13 -; GNU-NEXT: adiw r28, 16 -; GNU-NEXT: in r0, 63 -; GNU-NEXT: cli -; GNU-NEXT: out 62, r29 -; GNU-NEXT: out 63, r0 -; GNU-NEXT: out 61, r28 -; GNU-NEXT: pop r29 -; GNU-NEXT: pop r28 -; GNU-NEXT: pop r17 -; GNU-NEXT: pop r16 -; GNU-NEXT: pop r15 -; GNU-NEXT: pop r14 -; GNU-NEXT: pop r13 -; GNU-NEXT: pop r12 -; GNU-NEXT: ret +; CHECK-LABEL: test_sincos_v2f16: +; CHECK: ; %bb.0: +; CHECK-NEXT: push r6 +; CHECK-NEXT: push r7 +; CHECK-NEXT: push r8 +; CHECK-NEXT: push r9 +; CHECK-NEXT: push r10 +; CHECK-NEXT: push r11 +; CHECK-NEXT: push r12 +; CHECK-NEXT: push r13 +; CHECK-NEXT: push r14 +; CHECK-NEXT: push r15 +; CHECK-NEXT: push r16 +; CHECK-NEXT: push r17 +; CHECK-NEXT: mov r10, r22 +; CHECK-NEXT: mov r11, r23 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: mov r16, r22 +; CHECK-NEXT: mov r17, r23 +; CHECK-NEXT: mov r14, r24 +; CHECK-NEXT: mov r15, r25 +; CHECK-NEXT: rcall sin +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r12, r24 +; CHECK-NEXT: mov r13, r25 +; CHECK-NEXT: mov r24, r10 +; CHECK-NEXT: mov r25, r11 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: mov r10, r22 +; CHECK-NEXT: mov r11, r23 +; CHECK-NEXT: mov r8, r24 +; CHECK-NEXT: mov r9, r25 +; CHECK-NEXT: rcall cos +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r6, r24 +; CHECK-NEXT: mov r7, r25 +; CHECK-NEXT: mov r22, r10 +; CHECK-NEXT: mov r23, r11 +; CHECK-NEXT: mov r24, r8 +; CHECK-NEXT: mov r25, r9 +; CHECK-NEXT: rcall sin +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r10, r24 +; CHECK-NEXT: mov r11, r25 +; CHECK-NEXT: mov r22, r16 +; CHECK-NEXT: mov r23, r17 +; CHECK-NEXT: mov r24, r14 +; CHECK-NEXT: mov r25, r15 +; CHECK-NEXT: rcall cos +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r18, r10 +; CHECK-NEXT: mov r19, r11 +; CHECK-NEXT: mov r20, r12 +; CHECK-NEXT: mov r21, r13 +; CHECK-NEXT: mov r22, r6 +; CHECK-NEXT: mov r23, r7 +; CHECK-NEXT: pop r17 +; CHECK-NEXT: pop r16 +; CHECK-NEXT: pop r15 +; CHECK-NEXT: pop r14 +; CHECK-NEXT: pop r13 +; CHECK-NEXT: pop r12 +; CHECK-NEXT: pop r11 +; CHECK-NEXT: pop r10 +; CHECK-NEXT: pop r9 +; CHECK-NEXT: pop r8 +; CHECK-NEXT: pop r7 +; CHECK-NEXT: pop r6 +; CHECK-NEXT: ret %result = call { <2 x half>, <2 x half> } @llvm.sincos.v2f16(<2 x half> %a) ret { <2 x half>, <2 x half> } %result } define { float, float } @test_sincos_f32(float %a) #0 { -; NONGNU-LABEL: test_sincos_f32: -; NONGNU: ; %bb.0: -; NONGNU-NEXT: push r10 -; NONGNU-NEXT: push r11 -; NONGNU-NEXT: push r12 -; NONGNU-NEXT: push r13 -; NONGNU-NEXT: push r14 -; NONGNU-NEXT: push r15 -; NONGNU-NEXT: push r16 -; NONGNU-NEXT: push r17 -; NONGNU-NEXT: mov r16, r24 -; NONGNU-NEXT: mov r17, r25 -; NONGNU-NEXT: mov r14, r22 -; NONGNU-NEXT: mov r15, r23 -; NONGNU-NEXT: rcall sin -; NONGNU-NEXT: mov r12, r22 -; NONGNU-NEXT: mov r13, r23 -; NONGNU-NEXT: mov r10, r24 -; NONGNU-NEXT: mov r11, r25 -; NONGNU-NEXT: mov r22, r14 -; NONGNU-NEXT: mov r23, r15 -; NONGNU-NEXT: mov r24, r16 -; NONGNU-NEXT: mov r25, r17 -; NONGNU-NEXT: rcall cos -; NONGNU-NEXT: mov r18, r12 -; NONGNU-NEXT: mov r19, r13 -; NONGNU-NEXT: mov r20, r10 -; NONGNU-NEXT: mov r21, r11 -; NONGNU-NEXT: pop r17 -; NONGNU-NEXT: pop r16 -; NONGNU-NEXT: pop r15 -; NONGNU-NEXT: pop r14 -; NONGNU-NEXT: pop r13 -; NONGNU-NEXT: pop r12 -; NONGNU-NEXT: pop r11 -; NONGNU-NEXT: pop r10 -; NONGNU-NEXT: ret -; -; GNU-LABEL: test_sincos_f32: -; GNU: ; %bb.0: -; GNU-NEXT: push r28 -; GNU-NEXT: push r29 -; GNU-NEXT: in r28, 61 -; GNU-NEXT: in r29, 62 -; GNU-NEXT: sbiw r28, 8 -; GNU-NEXT: in r0, 63 -; GNU-NEXT: cli -; GNU-NEXT: out 62, r29 -; GNU-NEXT: out 63, r0 -; GNU-NEXT: out 61, r28 -; GNU-NEXT: mov r20, r28 -; GNU-NEXT: mov r21, r29 -; GNU-NEXT: subi r20, 251 -; GNU-NEXT: sbci r21, 255 -; GNU-NEXT: mov r18, r28 -; GNU-NEXT: mov r19, r29 -; GNU-NEXT: subi r18, 255 -; GNU-NEXT: sbci r19, 255 -; GNU-NEXT: rcall sincosf -; GNU-NEXT: ldd r18, Y+5 -; GNU-NEXT: ldd r19, Y+6 -; GNU-NEXT: ldd r20, Y+7 -; GNU-NEXT: ldd r21, Y+8 -; GNU-NEXT: ldd r22, Y+1 -; GNU-NEXT: ldd r23, Y+2 -; GNU-NEXT: ldd r24, Y+3 -; GNU-NEXT: ldd r25, Y+4 -; GNU-NEXT: adiw r28, 8 -; GNU-NEXT: in r0, 63 -; GNU-NEXT: cli -; GNU-NEXT: out 62, r29 -; GNU-NEXT: out 63, r0 -; GNU-NEXT: out 61, r28 -; GNU-NEXT: pop r29 -; GNU-NEXT: pop r28 -; GNU-NEXT: ret +; CHECK-LABEL: test_sincos_f32: +; CHECK: ; %bb.0: +; CHECK-NEXT: push r10 +; CHECK-NEXT: push r11 +; CHECK-NEXT: push r12 +; CHECK-NEXT: push r13 +; CHECK-NEXT: push r14 +; CHECK-NEXT: push r15 +; CHECK-NEXT: push r16 +; CHECK-NEXT: push r17 +; CHECK-NEXT: mov r16, r24 +; CHECK-NEXT: mov r17, r25 +; CHECK-NEXT: mov r14, r22 +; CHECK-NEXT: mov r15, r23 +; CHECK-NEXT: rcall sin +; CHECK-NEXT: mov r12, r22 +; CHECK-NEXT: mov r13, r23 +; CHECK-NEXT: mov r10, r24 +; CHECK-NEXT: mov r11, r25 +; CHECK-NEXT: mov r22, r14 +; CHECK-NEXT: mov r23, r15 +; CHECK-NEXT: mov r24, r16 +; CHECK-NEXT: mov r25, r17 +; CHECK-NEXT: rcall cos +; CHECK-NEXT: mov r18, r12 +; CHECK-NEXT: mov r19, r13 +; CHECK-NEXT: mov r20, r10 +; CHECK-NEXT: mov r21, r11 +; CHECK-NEXT: pop r17 +; CHECK-NEXT: pop r16 +; CHECK-NEXT: pop r15 +; CHECK-NEXT: pop r14 +; CHECK-NEXT: pop r13 +; CHECK-NEXT: pop r12 +; CHECK-NEXT: pop r11 +; CHECK-NEXT: pop r10 +; CHECK-NEXT: ret %result = call { float, float } @llvm.sincos.f32(float %a) ret { float, float } %result } define { <2 x float>, <2 x float> } @test_sincos_v2f32(<2 x float> %a) #0 { -; NONGNU-LABEL: test_sincos_v2f32: -; NONGNU: ; %bb.0: -; NONGNU-NEXT: push r8 -; NONGNU-NEXT: push r9 -; NONGNU-NEXT: push r10 -; NONGNU-NEXT: push r11 -; NONGNU-NEXT: push r12 -; NONGNU-NEXT: push r13 -; NONGNU-NEXT: push r14 -; NONGNU-NEXT: push r15 -; NONGNU-NEXT: mov r14, r22 -; NONGNU-NEXT: mov r15, r23 -; NONGNU-NEXT: mov r12, r20 -; NONGNU-NEXT: mov r13, r21 -; NONGNU-NEXT: mov r10, r18 -; NONGNU-NEXT: mov r11, r19 -; NONGNU-NEXT: mov r8, r24 -; NONGNU-NEXT: mov r9, r25 -; NONGNU-NEXT: mov r22, r12 -; NONGNU-NEXT: mov r23, r13 -; NONGNU-NEXT: mov r24, r14 -; NONGNU-NEXT: mov r25, r15 -; NONGNU-NEXT: rcall cos -; NONGNU-NEXT: mov r30, r8 -; NONGNU-NEXT: mov r31, r9 -; NONGNU-NEXT: std Z+15, r25 -; NONGNU-NEXT: std Z+14, r24 -; NONGNU-NEXT: std Z+13, r23 -; NONGNU-NEXT: std Z+12, r22 -; NONGNU-NEXT: mov r22, r16 -; NONGNU-NEXT: mov r23, r17 -; NONGNU-NEXT: mov r24, r10 -; NONGNU-NEXT: mov r25, r11 -; NONGNU-NEXT: rcall cos -; NONGNU-NEXT: mov r30, r8 -; NONGNU-NEXT: mov r31, r9 -; NONGNU-NEXT: std Z+11, r25 -; NONGNU-NEXT: std Z+10, r24 -; NONGNU-NEXT: std Z+9, r23 -; NONGNU-NEXT: std Z+8, r22 -; NONGNU-NEXT: mov r22, r12 -; NONGNU-NEXT: mov r23, r13 -; NONGNU-NEXT: mov r24, r14 -; NONGNU-NEXT: mov r25, r15 -; NONGNU-NEXT: rcall sin -; NONGNU-NEXT: mov r30, r8 -; NONGNU-NEXT: mov r31, r9 -; NONGNU-NEXT: std Z+7, r25 -; NONGNU-NEXT: std Z+6, r24 -; NONGNU-NEXT: std Z+5, r23 -; NONGNU-NEXT: std Z+4, r22 -; NONGNU-NEXT: mov r22, r16 -; NONGNU-NEXT: mov r23, r17 -; NONGNU-NEXT: mov r24, r10 -; NONGNU-NEXT: mov r25, r11 -; NONGNU-NEXT: rcall sin -; NONGNU-NEXT: mov r30, r8 -; NONGNU-NEXT: mov r31, r9 -; NONGNU-NEXT: std Z+3, r25 -; NONGNU-NEXT: std Z+2, r24 -; NONGNU-NEXT: std Z+1, r23 -; NONGNU-NEXT: st Z, r22 -; NONGNU-NEXT: pop r15 -; NONGNU-NEXT: pop r14 -; NONGNU-NEXT: pop r13 -; NONGNU-NEXT: pop r12 -; NONGNU-NEXT: pop r11 -; NONGNU-NEXT: pop r10 -; NONGNU-NEXT: pop r9 -; NONGNU-NEXT: pop r8 -; NONGNU-NEXT: ret -; -; GNU-LABEL: test_sincos_v2f32: -; GNU: ; %bb.0: -; GNU-NEXT: push r12 -; GNU-NEXT: push r13 -; GNU-NEXT: push r14 -; GNU-NEXT: push r15 -; GNU-NEXT: push r28 -; GNU-NEXT: push r29 -; GNU-NEXT: in r28, 61 -; GNU-NEXT: in r29, 62 -; GNU-NEXT: sbiw r28, 16 -; GNU-NEXT: in r0, 63 -; GNU-NEXT: cli -; GNU-NEXT: out 62, r29 -; GNU-NEXT: out 63, r0 -; GNU-NEXT: out 61, r28 -; GNU-NEXT: mov r30, r22 -; GNU-NEXT: mov r31, r23 -; GNU-NEXT: mov r14, r18 -; GNU-NEXT: mov r15, r19 -; GNU-NEXT: mov r12, r24 -; GNU-NEXT: mov r13, r25 -; GNU-NEXT: mov r26, r28 -; GNU-NEXT: mov r27, r29 -; GNU-NEXT: adiw r26, 13 -; GNU-NEXT: mov r18, r28 -; GNU-NEXT: mov r19, r29 -; GNU-NEXT: subi r18, 247 -; GNU-NEXT: sbci r19, 255 -; GNU-NEXT: mov r22, r20 -; GNU-NEXT: mov r23, r21 -; GNU-NEXT: mov r24, r30 -; GNU-NEXT: mov r25, r31 -; GNU-NEXT: mov r20, r26 -; GNU-NEXT: mov r21, r27 -; GNU-NEXT: rcall sincosf -; GNU-NEXT: mov r20, r28 -; GNU-NEXT: mov r21, r29 -; GNU-NEXT: subi r20, 251 -; GNU-NEXT: sbci r21, 255 -; GNU-NEXT: mov r18, r28 -; GNU-NEXT: mov r19, r29 -; GNU-NEXT: subi r18, 255 -; GNU-NEXT: sbci r19, 255 -; GNU-NEXT: mov r22, r16 -; GNU-NEXT: mov r23, r17 -; GNU-NEXT: mov r24, r14 -; GNU-NEXT: mov r25, r15 -; GNU-NEXT: rcall sincosf -; GNU-NEXT: ldd r24, Y+11 -; GNU-NEXT: ldd r25, Y+12 -; GNU-NEXT: mov r30, r12 -; GNU-NEXT: mov r31, r13 -; GNU-NEXT: std Z+15, r25 -; GNU-NEXT: std Z+14, r24 -; GNU-NEXT: ldd r24, Y+9 -; GNU-NEXT: ldd r25, Y+10 -; GNU-NEXT: std Z+13, r25 -; GNU-NEXT: std Z+12, r24 -; GNU-NEXT: ldd r24, Y+3 -; GNU-NEXT: ldd r25, Y+4 -; GNU-NEXT: std Z+11, r25 -; GNU-NEXT: std Z+10, r24 -; GNU-NEXT: ldd r24, Y+1 -; GNU-NEXT: ldd r25, Y+2 -; GNU-NEXT: std Z+9, r25 -; GNU-NEXT: std Z+8, r24 -; GNU-NEXT: ldd r24, Y+15 -; GNU-NEXT: ldd r25, Y+16 -; GNU-NEXT: std Z+7, r25 -; GNU-NEXT: std Z+6, r24 -; GNU-NEXT: ldd r24, Y+13 -; GNU-NEXT: ldd r25, Y+14 -; GNU-NEXT: std Z+5, r25 -; GNU-NEXT: std Z+4, r24 -; GNU-NEXT: ldd r24, Y+7 -; GNU-NEXT: ldd r25, Y+8 -; GNU-NEXT: std Z+3, r25 -; GNU-NEXT: std Z+2, r24 -; GNU-NEXT: ldd r24, Y+5 -; GNU-NEXT: ldd r25, Y+6 -; GNU-NEXT: std Z+1, r25 -; GNU-NEXT: st Z, r24 -; GNU-NEXT: adiw r28, 16 -; GNU-NEXT: in r0, 63 -; GNU-NEXT: cli -; GNU-NEXT: out 62, r29 -; GNU-NEXT: out 63, r0 -; GNU-NEXT: out 61, r28 -; GNU-NEXT: pop r29 -; GNU-NEXT: pop r28 -; GNU-NEXT: pop r15 -; GNU-NEXT: pop r14 -; GNU-NEXT: pop r13 -; GNU-NEXT: pop r12 -; GNU-NEXT: ret +; CHECK-LABEL: test_sincos_v2f32: +; CHECK: ; %bb.0: +; CHECK-NEXT: push r8 +; CHECK-NEXT: push r9 +; CHECK-NEXT: push r10 +; CHECK-NEXT: push r11 +; CHECK-NEXT: push r12 +; CHECK-NEXT: push r13 +; CHECK-NEXT: push r14 +; CHECK-NEXT: push r15 +; CHECK-NEXT: mov r14, r22 +; CHECK-NEXT: mov r15, r23 +; CHECK-NEXT: mov r12, r20 +; CHECK-NEXT: mov r13, r21 +; CHECK-NEXT: mov r10, r18 +; CHECK-NEXT: mov r11, r19 +; CHECK-NEXT: mov r8, r24 +; CHECK-NEXT: mov r9, r25 +; CHECK-NEXT: mov r22, r12 +; CHECK-NEXT: mov r23, r13 +; CHECK-NEXT: mov r24, r14 +; CHECK-NEXT: mov r25, r15 +; CHECK-NEXT: rcall cos +; CHECK-NEXT: mov r30, r8 +; CHECK-NEXT: mov r31, r9 +; CHECK-NEXT: std Z+15, r25 +; CHECK-NEXT: std Z+14, r24 +; CHECK-NEXT: std Z+13, r23 +; CHECK-NEXT: std Z+12, r22 +; CHECK-NEXT: mov r22, r16 +; CHECK-NEXT: mov r23, r17 +; CHECK-NEXT: mov r24, r10 +; CHECK-NEXT: mov r25, r11 +; CHECK-NEXT: rcall cos +; CHECK-NEXT: mov r30, r8 +; CHECK-NEXT: mov r31, r9 +; CHECK-NEXT: std Z+11, r25 +; CHECK-NEXT: std Z+10, r24 +; CHECK-NEXT: std Z+9, r23 +; CHECK-NEXT: std Z+8, r22 +; CHECK-NEXT: mov r22, r12 +; CHECK-NEXT: mov r23, r13 +; CHECK-NEXT: mov r24, r14 +; CHECK-NEXT: mov r25, r15 +; CHECK-NEXT: rcall sin +; CHECK-NEXT: mov r30, r8 +; CHECK-NEXT: mov r31, r9 +; CHECK-NEXT: std Z+7, r25 +; CHECK-NEXT: std Z+6, r24 +; CHECK-NEXT: std Z+5, r23 +; CHECK-NEXT: std Z+4, r22 +; CHECK-NEXT: mov r22, r16 +; CHECK-NEXT: mov r23, r17 +; CHECK-NEXT: mov r24, r10 +; CHECK-NEXT: mov r25, r11 +; CHECK-NEXT: rcall sin +; CHECK-NEXT: mov r30, r8 +; CHECK-NEXT: mov r31, r9 +; CHECK-NEXT: std Z+3, r25 +; CHECK-NEXT: std Z+2, r24 +; CHECK-NEXT: std Z+1, r23 +; CHECK-NEXT: st Z, r22 +; CHECK-NEXT: pop r15 +; CHECK-NEXT: pop r14 +; CHECK-NEXT: pop r13 +; CHECK-NEXT: pop r12 +; CHECK-NEXT: pop r11 +; CHECK-NEXT: pop r10 +; CHECK-NEXT: pop r9 +; CHECK-NEXT: pop r8 +; CHECK-NEXT: ret %result = call { <2 x float>, <2 x float> } @llvm.sincos.v2f32(<2 x float> %a) ret { <2 x float>, <2 x float> } %result } @@ -644,235 +280,127 @@ define { <2 x float>, <2 x float> } @test_sincos_v2f32(<2 x float> %a) #0 { ; } define { fp128, fp128 } @test_sincos_f128(fp128 %a) #0 { -; NONGNU-LABEL: test_sincos_f128: -; NONGNU: ; %bb.0: -; NONGNU-NEXT: push r2 -; NONGNU-NEXT: push r3 -; NONGNU-NEXT: push r4 -; NONGNU-NEXT: push r5 -; NONGNU-NEXT: push r6 -; NONGNU-NEXT: push r7 -; NONGNU-NEXT: push r28 -; NONGNU-NEXT: push r29 -; NONGNU-NEXT: in r28, 61 -; NONGNU-NEXT: in r29, 62 -; NONGNU-NEXT: sbiw r28, 34 -; NONGNU-NEXT: in r0, 63 -; NONGNU-NEXT: cli -; NONGNU-NEXT: out 62, r29 -; NONGNU-NEXT: out 63, r0 -; NONGNU-NEXT: out 61, r28 -; NONGNU-NEXT: std Y+2, r23 ; 2-byte Folded Spill -; NONGNU-NEXT: std Y+1, r22 ; 2-byte Folded Spill -; NONGNU-NEXT: mov r2, r20 -; NONGNU-NEXT: mov r3, r21 -; NONGNU-NEXT: mov r4, r18 -; NONGNU-NEXT: mov r5, r19 -; NONGNU-NEXT: mov r6, r24 -; NONGNU-NEXT: mov r7, r25 -; NONGNU-NEXT: mov r24, r28 -; NONGNU-NEXT: mov r25, r29 -; NONGNU-NEXT: adiw r24, 3 -; NONGNU-NEXT: rcall cosl -; NONGNU-NEXT: mov r24, r28 -; NONGNU-NEXT: mov r25, r29 -; NONGNU-NEXT: adiw r24, 19 -; NONGNU-NEXT: mov r18, r4 -; NONGNU-NEXT: mov r19, r5 -; NONGNU-NEXT: mov r20, r2 -; NONGNU-NEXT: mov r21, r3 -; NONGNU-NEXT: ldd r22, Y+1 ; 2-byte Folded Reload -; NONGNU-NEXT: ldd r23, Y+2 ; 2-byte Folded Reload -; NONGNU-NEXT: rcall sinl -; NONGNU-NEXT: ldd r24, Y+17 -; NONGNU-NEXT: ldd r25, Y+18 -; NONGNU-NEXT: mov r30, r6 -; NONGNU-NEXT: mov r31, r7 -; NONGNU-NEXT: std Z+31, r25 -; NONGNU-NEXT: std Z+30, r24 -; NONGNU-NEXT: ldd r24, Y+15 -; NONGNU-NEXT: ldd r25, Y+16 -; NONGNU-NEXT: std Z+29, r25 -; NONGNU-NEXT: std Z+28, r24 -; NONGNU-NEXT: ldd r24, Y+13 -; NONGNU-NEXT: ldd r25, Y+14 -; NONGNU-NEXT: std Z+27, r25 -; NONGNU-NEXT: std Z+26, r24 -; NONGNU-NEXT: ldd r24, Y+11 -; NONGNU-NEXT: ldd r25, Y+12 -; NONGNU-NEXT: std Z+25, r25 -; NONGNU-NEXT: std Z+24, r24 -; NONGNU-NEXT: ldd r24, Y+9 -; NONGNU-NEXT: ldd r25, Y+10 -; NONGNU-NEXT: std Z+23, r25 -; NONGNU-NEXT: std Z+22, r24 -; NONGNU-NEXT: ldd r24, Y+7 -; NONGNU-NEXT: ldd r25, Y+8 -; NONGNU-NEXT: std Z+21, r25 -; NONGNU-NEXT: std Z+20, r24 -; NONGNU-NEXT: ldd r24, Y+5 -; NONGNU-NEXT: ldd r25, Y+6 -; NONGNU-NEXT: std Z+19, r25 -; NONGNU-NEXT: std Z+18, r24 -; NONGNU-NEXT: ldd r24, Y+3 -; NONGNU-NEXT: ldd r25, Y+4 -; NONGNU-NEXT: std Z+17, r25 -; NONGNU-NEXT: std Z+16, r24 -; NONGNU-NEXT: ldd r24, Y+33 -; NONGNU-NEXT: ldd r25, Y+34 -; NONGNU-NEXT: std Z+15, r25 -; NONGNU-NEXT: std Z+14, r24 -; NONGNU-NEXT: ldd r24, Y+31 -; NONGNU-NEXT: ldd r25, Y+32 -; NONGNU-NEXT: std Z+13, r25 -; NONGNU-NEXT: std Z+12, r24 -; NONGNU-NEXT: ldd r24, Y+29 -; NONGNU-NEXT: ldd r25, Y+30 -; NONGNU-NEXT: std Z+11, r25 -; NONGNU-NEXT: std Z+10, r24 -; NONGNU-NEXT: ldd r24, Y+27 -; NONGNU-NEXT: ldd r25, Y+28 -; NONGNU-NEXT: std Z+9, r25 -; NONGNU-NEXT: std Z+8, r24 -; NONGNU-NEXT: ldd r24, Y+25 -; NONGNU-NEXT: ldd r25, Y+26 -; NONGNU-NEXT: std Z+7, r25 -; NONGNU-NEXT: std Z+6, r24 -; NONGNU-NEXT: ldd r24, Y+23 -; NONGNU-NEXT: ldd r25, Y+24 -; NONGNU-NEXT: std Z+5, r25 -; NONGNU-NEXT: std Z+4, r24 -; NONGNU-NEXT: ldd r24, Y+21 -; NONGNU-NEXT: ldd r25, Y+22 -; NONGNU-NEXT: std Z+3, r25 -; NONGNU-NEXT: std Z+2, r24 -; NONGNU-NEXT: ldd r24, Y+19 -; NONGNU-NEXT: ldd r25, Y+20 -; NONGNU-NEXT: std Z+1, r25 -; NONGNU-NEXT: st Z, r24 -; NONGNU-NEXT: adiw r28, 34 -; NONGNU-NEXT: in r0, 63 -; NONGNU-NEXT: cli -; NONGNU-NEXT: out 62, r29 -; NONGNU-NEXT: out 63, r0 -; NONGNU-NEXT: out 61, r28 -; NONGNU-NEXT: pop r29 -; NONGNU-NEXT: pop r28 -; NONGNU-NEXT: pop r7 -; NONGNU-NEXT: pop r6 -; NONGNU-NEXT: pop r5 -; NONGNU-NEXT: pop r4 -; NONGNU-NEXT: pop r3 -; NONGNU-NEXT: pop r2 -; NONGNU-NEXT: ret -; -; GNU-LABEL: test_sincos_f128: -; GNU: ; %bb.0: -; GNU-NEXT: push r6 -; GNU-NEXT: push r7 -; GNU-NEXT: push r28 -; GNU-NEXT: push r29 -; GNU-NEXT: in r28, 61 -; GNU-NEXT: in r29, 62 -; GNU-NEXT: sbiw r28, 52 -; GNU-NEXT: in r0, 63 -; GNU-NEXT: cli -; GNU-NEXT: out 62, r29 -; GNU-NEXT: out 63, r0 -; GNU-NEXT: out 61, r28 -; GNU-NEXT: mov r6, r24 -; GNU-NEXT: mov r7, r25 -; GNU-NEXT: mov r24, r28 -; GNU-NEXT: mov r25, r29 -; GNU-NEXT: adiw r24, 21 -; GNU-NEXT: std Y+4, r25 -; GNU-NEXT: std Y+3, r24 -; GNU-NEXT: mov r24, r28 -; GNU-NEXT: mov r25, r29 -; GNU-NEXT: adiw r24, 37 -; GNU-NEXT: std Y+2, r25 -; GNU-NEXT: std Y+1, r24 -; GNU-NEXT: mov r24, r28 -; GNU-NEXT: mov r25, r29 -; GNU-NEXT: adiw r24, 5 -; GNU-NEXT: rcall sincosl -; GNU-NEXT: ldd r24, Y+35 -; GNU-NEXT: ldd r25, Y+36 -; GNU-NEXT: mov r30, r6 -; GNU-NEXT: mov r31, r7 -; GNU-NEXT: std Z+31, r25 -; GNU-NEXT: std Z+30, r24 -; GNU-NEXT: ldd r24, Y+33 -; GNU-NEXT: ldd r25, Y+34 -; GNU-NEXT: std Z+29, r25 -; GNU-NEXT: std Z+28, r24 -; GNU-NEXT: ldd r24, Y+31 -; GNU-NEXT: ldd r25, Y+32 -; GNU-NEXT: std Z+27, r25 -; GNU-NEXT: std Z+26, r24 -; GNU-NEXT: ldd r24, Y+29 -; GNU-NEXT: ldd r25, Y+30 -; GNU-NEXT: std Z+25, r25 -; GNU-NEXT: std Z+24, r24 -; GNU-NEXT: ldd r24, Y+27 -; GNU-NEXT: ldd r25, Y+28 -; GNU-NEXT: std Z+23, r25 -; GNU-NEXT: std Z+22, r24 -; GNU-NEXT: ldd r24, Y+25 -; GNU-NEXT: ldd r25, Y+26 -; GNU-NEXT: std Z+21, r25 -; GNU-NEXT: std Z+20, r24 -; GNU-NEXT: ldd r24, Y+23 -; GNU-NEXT: ldd r25, Y+24 -; GNU-NEXT: std Z+19, r25 -; GNU-NEXT: std Z+18, r24 -; GNU-NEXT: ldd r24, Y+21 -; GNU-NEXT: ldd r25, Y+22 -; GNU-NEXT: std Z+17, r25 -; GNU-NEXT: std Z+16, r24 -; GNU-NEXT: ldd r24, Y+51 -; GNU-NEXT: ldd r25, Y+52 -; GNU-NEXT: std Z+15, r25 -; GNU-NEXT: std Z+14, r24 -; GNU-NEXT: ldd r24, Y+49 -; GNU-NEXT: ldd r25, Y+50 -; GNU-NEXT: std Z+13, r25 -; GNU-NEXT: std Z+12, r24 -; GNU-NEXT: ldd r24, Y+47 -; GNU-NEXT: ldd r25, Y+48 -; GNU-NEXT: std Z+11, r25 -; GNU-NEXT: std Z+10, r24 -; GNU-NEXT: ldd r24, Y+45 -; GNU-NEXT: ldd r25, Y+46 -; GNU-NEXT: std Z+9, r25 -; GNU-NEXT: std Z+8, r24 -; GNU-NEXT: ldd r24, Y+43 -; GNU-NEXT: ldd r25, Y+44 -; GNU-NEXT: std Z+7, r25 -; GNU-NEXT: std Z+6, r24 -; GNU-NEXT: ldd r24, Y+41 -; GNU-NEXT: ldd r25, Y+42 -; GNU-NEXT: std Z+5, r25 -; GNU-NEXT: std Z+4, r24 -; GNU-NEXT: ldd r24, Y+39 -; GNU-NEXT: ldd r25, Y+40 -; GNU-NEXT: std Z+3, r25 -; GNU-NEXT: std Z+2, r24 -; GNU-NEXT: ldd r24, Y+37 -; GNU-NEXT: ldd r25, Y+38 -; GNU-NEXT: std Z+1, r25 -; GNU-NEXT: st Z, r24 -; GNU-NEXT: adiw r28, 52 -; GNU-NEXT: in r0, 63 -; GNU-NEXT: cli -; GNU-NEXT: out 62, r29 -; GNU-NEXT: out 63, r0 -; GNU-NEXT: out 61, r28 -; GNU-NEXT: pop r29 -; GNU-NEXT: pop r28 -; GNU-NEXT: pop r7 -; GNU-NEXT: pop r6 -; GNU-NEXT: ret +; CHECK-LABEL: test_sincos_f128: +; CHECK: ; %bb.0: +; CHECK-NEXT: push r2 +; CHECK-NEXT: push r3 +; CHECK-NEXT: push r4 +; CHECK-NEXT: push r5 +; CHECK-NEXT: push r6 +; CHECK-NEXT: push r7 +; CHECK-NEXT: push r28 +; CHECK-NEXT: push r29 +; CHECK-NEXT: in r28, 61 +; CHECK-NEXT: in r29, 62 +; CHECK-NEXT: sbiw r28, 34 +; CHECK-NEXT: in r0, 63 +; CHECK-NEXT: cli +; CHECK-NEXT: out 62, r29 +; CHECK-NEXT: out 63, r0 +; CHECK-NEXT: out 61, r28 +; CHECK-NEXT: std Y+2, r23 ; 2-byte Folded Spill +; CHECK-NEXT: std Y+1, r22 ; 2-byte Folded Spill +; CHECK-NEXT: mov r2, r20 +; CHECK-NEXT: mov r3, r21 +; CHECK-NEXT: mov r4, r18 +; CHECK-NEXT: mov r5, r19 +; CHECK-NEXT: mov r6, r24 +; CHECK-NEXT: mov r7, r25 +; CHECK-NEXT: mov r24, r28 +; CHECK-NEXT: mov r25, r29 +; CHECK-NEXT: adiw r24, 3 +; CHECK-NEXT: rcall cosl +; CHECK-NEXT: mov r24, r28 +; CHECK-NEXT: mov r25, r29 +; CHECK-NEXT: adiw r24, 19 +; CHECK-NEXT: mov r18, r4 +; CHECK-NEXT: mov r19, r5 +; CHECK-NEXT: mov r20, r2 +; CHECK-NEXT: mov r21, r3 +; CHECK-NEXT: ldd r22, Y+1 ; 2-byte Folded Reload +; CHECK-NEXT: ldd r23, Y+2 ; 2-byte Folded Reload +; CHECK-NEXT: rcall sinl +; CHECK-NEXT: ldd r24, Y+17 +; CHECK-NEXT: ldd r25, Y+18 +; CHECK-NEXT: mov r30, r6 +; CHECK-NEXT: mov r31, r7 +; CHECK-NEXT: std Z+31, r25 +; CHECK-NEXT: std Z+30, r24 +; CHECK-NEXT: ldd r24, Y+15 +; CHECK-NEXT: ldd r25, Y+16 +; CHECK-NEXT: std Z+29, r25 +; CHECK-NEXT: std Z+28, r24 +; CHECK-NEXT: ldd r24, Y+13 +; CHECK-NEXT: ldd r25, Y+14 +; CHECK-NEXT: std Z+27, r25 +; CHECK-NEXT: std Z+26, r24 +; CHECK-NEXT: ldd r24, Y+11 +; CHECK-NEXT: ldd r25, Y+12 +; CHECK-NEXT: std Z+25, r25 +; CHECK-NEXT: std Z+24, r24 +; CHECK-NEXT: ldd r24, Y+9 +; CHECK-NEXT: ldd r25, Y+10 +; CHECK-NEXT: std Z+23, r25 +; CHECK-NEXT: std Z+22, r24 +; CHECK-NEXT: ldd r24, Y+7 +; CHECK-NEXT: ldd r25, Y+8 +; CHECK-NEXT: std Z+21, r25 +; CHECK-NEXT: std Z+20, r24 +; CHECK-NEXT: ldd r24, Y+5 +; CHECK-NEXT: ldd r25, Y+6 +; CHECK-NEXT: std Z+19, r25 +; CHECK-NEXT: std Z+18, r24 +; CHECK-NEXT: ldd r24, Y+3 +; CHECK-NEXT: ldd r25, Y+4 +; CHECK-NEXT: std Z+17, r25 +; CHECK-NEXT: std Z+16, r24 +; CHECK-NEXT: ldd r24, Y+33 +; CHECK-NEXT: ldd r25, Y+34 +; CHECK-NEXT: std Z+15, r25 +; CHECK-NEXT: std Z+14, r24 +; CHECK-NEXT: ldd r24, Y+31 +; CHECK-NEXT: ldd r25, Y+32 +; CHECK-NEXT: std Z+13, r25 +; CHECK-NEXT: std Z+12, r24 +; CHECK-NEXT: ldd r24, Y+29 +; CHECK-NEXT: ldd r25, Y+30 +; CHECK-NEXT: std Z+11, r25 +; CHECK-NEXT: std Z+10, r24 +; CHECK-NEXT: ldd r24, Y+27 +; CHECK-NEXT: ldd r25, Y+28 +; CHECK-NEXT: std Z+9, r25 +; CHECK-NEXT: std Z+8, r24 +; CHECK-NEXT: ldd r24, Y+25 +; CHECK-NEXT: ldd r25, Y+26 +; CHECK-NEXT: std Z+7, r25 +; CHECK-NEXT: std Z+6, r24 +; CHECK-NEXT: ldd r24, Y+23 +; CHECK-NEXT: ldd r25, Y+24 +; CHECK-NEXT: std Z+5, r25 +; CHECK-NEXT: std Z+4, r24 +; CHECK-NEXT: ldd r24, Y+21 +; CHECK-NEXT: ldd r25, Y+22 +; CHECK-NEXT: std Z+3, r25 +; CHECK-NEXT: std Z+2, r24 +; CHECK-NEXT: ldd r24, Y+19 +; CHECK-NEXT: ldd r25, Y+20 +; CHECK-NEXT: std Z+1, r25 +; CHECK-NEXT: st Z, r24 +; CHECK-NEXT: adiw r28, 34 +; CHECK-NEXT: in r0, 63 +; CHECK-NEXT: cli +; CHECK-NEXT: out 62, r29 +; CHECK-NEXT: out 63, r0 +; CHECK-NEXT: out 61, r28 +; CHECK-NEXT: pop r29 +; CHECK-NEXT: pop r28 +; CHECK-NEXT: pop r7 +; CHECK-NEXT: pop r6 +; CHECK-NEXT: pop r5 +; CHECK-NEXT: pop r4 +; CHECK-NEXT: pop r3 +; CHECK-NEXT: pop r2 +; CHECK-NEXT: ret %result = call { fp128, fp128 } @llvm.sincos.f128(fp128 %a) ret { fp128, fp128 } %result } @@ -880,4 +408,5 @@ define { fp128, fp128 } @test_sincos_f128(fp128 %a) #0 { attributes #0 = { nounwind } ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; CHECK: {{.*}} +; GNU: {{.*}} +; NONGNU: {{.*}} diff --git a/llvm/test/CodeGen/Generic/allow-check.ll b/llvm/test/CodeGen/Generic/allow-check.ll index 148ee81..97719a7 100644 --- a/llvm/test/CodeGen/Generic/allow-check.ll +++ b/llvm/test/CodeGen/Generic/allow-check.ll @@ -6,6 +6,7 @@ ; XFAIL: target=nvptx{{.*}} ; XFAIL: target=sparc{{.*}} ; XFAIL: target=hexagon-{{.*}} +; XFAIL: target=arm64ec-{{.*}} ; RUN: llc < %s -O3 -global-isel=0 -fast-isel=0 ; RUN: llc < %s -O3 -global-isel=1 -fast-isel=0 diff --git a/llvm/test/CodeGen/Generic/fp128-exp10-libcall.ll b/llvm/test/CodeGen/Generic/fp128-exp10-libcall.ll new file mode 100644 index 0000000..5e97f03 --- /dev/null +++ b/llvm/test/CodeGen/Generic/fp128-exp10-libcall.ll @@ -0,0 +1,28 @@ +; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-USELD %} +; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=aarch64-unknown-linux-musl | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-USELD %} +; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=aarch64-unknown-none | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-USELD %} +; RUN: %if aarch64-registered-target %{ not llc -mtriple=arm64-apple-macosx -filetype=null %s 2>&1 | FileCheck --check-prefix=ERR %s %} +; RUN: %if arm-registered-target %{ llc < %s -mtriple=arm-none-eabi | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-USELD %} +; RUN: %if arm-registered-target %{ llc < %s -mtriple=arm-unknown-linux-gnueabi | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-USELD %} +; RUN: %if powerpc-registered-target %{ llc < %s -mtriple=powerpc-unknown-linux-gnu | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-F128 %} +; RUN: %if powerpc-registered-target %{ llc < %s -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-F128 %} +; RUN: %if powerpc-registered-target %{ llc < %s -mtriple=powerpc64-unknown-linux-musl | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-F128 %} +; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv32-unknown-linux-gnu | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-USELD %} +; RUN: %if systemz-registered-target %{ llc < %s -mtriple=s390x-unknown-linux-gnu | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-S390X %} +; RUN: %if x86-registered-target %{ llc < %s -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-F128 %} +; RUN: %if x86-registered-target %{ llc < %s -mtriple=i686-unknown-linux-musl | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-USELD %} +; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-F128 %} +; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-unknown-linux-musl | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-USELD %} +; RUN %if x86-registered-target %{ llc < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-F128 %} + +; ERR: error: no libcall available for fexp10 +define fp128 @test_exp10(fp128 %a) { +; CHECK-ALL-LABEL: test_exp10: +; CHECK-F128: exp10f128 +; CHECK-USELD: exp10l +; CHECK-S390X: exp10l +start: + %0 = tail call fp128 @llvm.exp10.f128(fp128 %a) + ret fp128 %0 +} + diff --git a/llvm/test/CodeGen/Generic/fp128-math-libcalls.ll b/llvm/test/CodeGen/Generic/fp128-math-libcalls.ll index ccce4bbd..f759c94 100644 --- a/llvm/test/CodeGen/Generic/fp128-math-libcalls.ll +++ b/llvm/test/CodeGen/Generic/fp128-math-libcalls.ll @@ -95,16 +95,6 @@ start: ret fp128 %0 } -define fp128 @test_exp10(fp128 %a) { -; CHECK-ALL-LABEL: test_exp10: -; CHECK-F128: exp10f128 -; CHECK-USELD: exp10l -; CHECK-S390X: exp10l -start: - %0 = tail call fp128 @llvm.exp10.f128(fp128 %a) - ret fp128 %0 -} - define fp128 @test_exp2(fp128 %a) { ; CHECK-ALL-LABEL: test_exp2: ; CHECK-F128: exp2f128 diff --git a/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll b/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll index 3800712..f0277a7 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll @@ -11,16 +11,16 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill ; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill ; CHECK-NEXT: addi.w $fp, $a0, 0 -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 1 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 1 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 ; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 ; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 0 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 0 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -29,8 +29,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 1 ; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 2 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 2 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -39,8 +39,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 2 ; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 3 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 3 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -49,8 +49,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 3 ; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 4 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 4 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -59,8 +59,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 4 ; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 5 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 5 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -69,8 +69,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 5 ; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 6 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 6 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -79,8 +79,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 6 ; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 7 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 7 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -107,16 +107,16 @@ define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind { ; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill ; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: addi.w $fp, $a0, 0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1 -; CHECK-NEXT: movgr2fr.d $fa0, $a0 +; CHECK-NEXT: xvpickve.d $xr0, $xr0, 1 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 ; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 ; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0 -; CHECK-NEXT: movgr2fr.d $fa0, $a0 +; CHECK-NEXT: xvpickve.d $xr0, $xr0, 0 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -125,8 +125,8 @@ define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1 ; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 2 -; CHECK-NEXT: movgr2fr.d $fa0, $a0 +; CHECK-NEXT: xvpickve.d $xr0, $xr0, 2 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -135,8 +135,8 @@ define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 2 ; CHECK-NEXT: xvst $xr1, $sp, 16 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3 -; CHECK-NEXT: movgr2fr.d $fa0, $a0 +; CHECK-NEXT: xvpickve.d $xr0, $xr0, 3 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll index 221aba3..8ee567c 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll @@ -6,12 +6,12 @@ define <4 x double> @shufflevector_v4f64(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: shufflevector_v4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvpickve2gr.d $a0, $xr1, 2 -; CHECK-NEXT: xvpickve2gr.d $a1, $xr0, 3 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 1 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 2 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr1, 3 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 3 +; CHECK-NEXT: xvpickve.d $xr2, $xr1, 2 +; CHECK-NEXT: xvpickve.d $xr3, $xr0, 3 +; CHECK-NEXT: xvinsve0.d $xr0, $xr2, 1 +; CHECK-NEXT: xvinsve0.d $xr0, $xr3, 2 +; CHECK-NEXT: xvpickve.d $xr1, $xr1, 3 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 3 ; CHECK-NEXT: ret entry: %c = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 6, i32 3, i32 7> diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll index 271e3ec..ac5a214 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll @@ -42,8 +42,8 @@ entry: define <8 x float> @insert_extract_v8f32(<8 x float> %a) nounwind { ; CHECK-LABEL: insert_extract_v8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 7 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 1 +; CHECK-NEXT: xvpickve.w $xr1, $xr0, 7 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 1 ; CHECK-NEXT: ret entry: %b = extractelement <8 x float> %a, i32 7 @@ -66,8 +66,8 @@ entry: define <4 x double> @insert_extract_v4f64(<4 x double> %a) nounwind { ; CHECK-LABEL: insert_extract_v4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 1 +; CHECK-NEXT: xvpickve.d $xr1, $xr0, 3 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1 ; CHECK-NEXT: ret entry: %b = extractelement <4 x double> %a, i32 3 diff --git a/llvm/test/CodeGen/NVPTX/combine-min-max.ll b/llvm/test/CodeGen/NVPTX/combine-min-max.ll index 157c3cc..e7140ab 100644 --- a/llvm/test/CodeGen/NVPTX/combine-min-max.ll +++ b/llvm/test/CodeGen/NVPTX/combine-min-max.ll @@ -1,22 +1,37 @@ -; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_20 -O2 | FileCheck %s -; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_20 -O2 | %ptxas-verify %} +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mcpu=sm_90 -mattr=+ptx80 -O3 | FileCheck %s --check-prefixes=CHECK,SM90 +; RUN: llc < %s -mcpu=sm_20 -O3 | FileCheck %s --check-prefixes=CHECK,SM20 +; RUN: %if ptxas-12.0 %{ llc < %s -mcpu=sm_90 -mattr=+ptx80 -O3 | %ptxas-verify -arch=sm_90 %} +; RUN: %if ptxas-12.0 %{ llc < %s -mcpu=sm_20 -O3 | %ptxas-verify %} + +target triple = "nvptx64-nvidia-cuda" ; ************************************* ; * Cases with no min/max define i32 @ab_eq_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ab_eq_i32 -; CHECK-NOT: min -; CHECK-NOT: max +; CHECK-LABEL: ab_eq_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ab_eq_i32_param_1]; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp eq i32 %a, %b %sel = select i1 %cmp, i32 %a, i32 %b ret i32 %sel } define i64 @ab_ne_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ab_ne_i64 -; CHECK-NOT: min -; CHECK-NOT: max +; CHECK-LABEL: ab_ne_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ab_ne_i64_param_1]; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd1; +; CHECK-NEXT: ret; %cmp = icmp ne i64 %a, %b %sel = select i1 %cmp, i64 %b, i64 %a ret i64 %sel @@ -27,32 +42,72 @@ define i64 @ab_ne_i64(i64 %a, i64 %b) { ; *** ab, unsigned, i16 define i16 @ab_ugt_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ab_ugt_i16 -; CHECK: max.u16 +; CHECK-LABEL: ab_ugt_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ab_ugt_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ab_ugt_i16_param_1]; +; CHECK-NEXT: max.u16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp ugt i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b ret i16 %sel } define i16 @ab_uge_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ab_uge_i16 -; CHECK: max.u16 +; CHECK-LABEL: ab_uge_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ab_uge_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ab_uge_i16_param_1]; +; CHECK-NEXT: max.u16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp uge i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b ret i16 %sel } define i16 @ab_ult_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ab_ult_i16 -; CHECK: min.u16 +; CHECK-LABEL: ab_ult_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ab_ult_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ab_ult_i16_param_1]; +; CHECK-NEXT: min.u16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp ult i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b ret i16 %sel } define i16 @ab_ule_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ab_ule_i16 -; CHECK: min.u16 +; CHECK-LABEL: ab_ule_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ab_ule_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ab_ule_i16_param_1]; +; CHECK-NEXT: min.u16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp ule i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b ret i16 %sel @@ -60,32 +115,72 @@ define i16 @ab_ule_i16(i16 %a, i16 %b) { ; *** ab, signed, i16 define i16 @ab_sgt_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ab_sgt_i16 -; CHECK: max.s16 +; CHECK-LABEL: ab_sgt_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ab_sgt_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ab_sgt_i16_param_1]; +; CHECK-NEXT: max.s16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp sgt i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b ret i16 %sel } define i16 @ab_sge_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ab_sge_i16 -; CHECK: max.s16 +; CHECK-LABEL: ab_sge_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ab_sge_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ab_sge_i16_param_1]; +; CHECK-NEXT: max.s16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp sge i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b ret i16 %sel } define i16 @ab_slt_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ab_slt_i16 -; CHECK: min.s16 +; CHECK-LABEL: ab_slt_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ab_slt_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ab_slt_i16_param_1]; +; CHECK-NEXT: min.s16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp slt i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b ret i16 %sel } define i16 @ab_sle_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ab_sle_i16 -; CHECK: min.s16 +; CHECK-LABEL: ab_sle_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ab_sle_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ab_sle_i16_param_1]; +; CHECK-NEXT: min.s16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp sle i16 %a, %b %sel = select i1 %cmp, i16 %a, i16 %b ret i16 %sel @@ -93,32 +188,72 @@ define i16 @ab_sle_i16(i16 %a, i16 %b) { ; *** ba, unsigned, i16 define i16 @ba_ugt_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ba_ugt_i16 -; CHECK: min.u16 +; CHECK-LABEL: ba_ugt_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ba_ugt_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ba_ugt_i16_param_1]; +; CHECK-NEXT: min.u16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp ugt i16 %a, %b %sel = select i1 %cmp, i16 %b, i16 %a ret i16 %sel } define i16 @ba_uge_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ba_uge_i16 -; CHECK: min.u16 +; CHECK-LABEL: ba_uge_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ba_uge_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ba_uge_i16_param_1]; +; CHECK-NEXT: min.u16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp uge i16 %a, %b %sel = select i1 %cmp, i16 %b, i16 %a ret i16 %sel } define i16 @ba_ult_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ba_ult_i16 -; CHECK: max.u16 +; CHECK-LABEL: ba_ult_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ba_ult_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ba_ult_i16_param_1]; +; CHECK-NEXT: max.u16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp ult i16 %a, %b %sel = select i1 %cmp, i16 %b, i16 %a ret i16 %sel } define i16 @ba_ule_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ba_ule_i16 -; CHECK: max.u16 +; CHECK-LABEL: ba_ule_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ba_ule_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ba_ule_i16_param_1]; +; CHECK-NEXT: max.u16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp ule i16 %a, %b %sel = select i1 %cmp, i16 %b, i16 %a ret i16 %sel @@ -126,32 +261,72 @@ define i16 @ba_ule_i16(i16 %a, i16 %b) { ; *** ba, signed, i16 define i16 @ba_sgt_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ba_sgt_i16 -; CHECK: min.s16 +; CHECK-LABEL: ba_sgt_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ba_sgt_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ba_sgt_i16_param_1]; +; CHECK-NEXT: min.s16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp sgt i16 %a, %b %sel = select i1 %cmp, i16 %b, i16 %a ret i16 %sel } define i16 @ba_sge_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ba_sge_i16 -; CHECK: min.s16 +; CHECK-LABEL: ba_sge_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ba_sge_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ba_sge_i16_param_1]; +; CHECK-NEXT: min.s16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp sge i16 %a, %b %sel = select i1 %cmp, i16 %b, i16 %a ret i16 %sel } define i16 @ba_slt_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ba_slt_i16 -; CHECK: max.s16 +; CHECK-LABEL: ba_slt_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ba_slt_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ba_slt_i16_param_1]; +; CHECK-NEXT: max.s16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp slt i16 %a, %b %sel = select i1 %cmp, i16 %b, i16 %a ret i16 %sel } define i16 @ba_sle_i16(i16 %a, i16 %b) { -; CHECK-LABEL: @ba_sle_i16 -; CHECK: max.s16 +; CHECK-LABEL: ba_sle_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b16 %rs1, [ba_sle_i16_param_0]; +; CHECK-NEXT: ld.param.b16 %rs2, [ba_sle_i16_param_1]; +; CHECK-NEXT: max.s16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; %cmp = icmp sle i16 %a, %b %sel = select i1 %cmp, i16 %b, i16 %a ret i16 %sel @@ -162,32 +337,64 @@ define i16 @ba_sle_i16(i16 %a, i16 %b) { ; *** ab, unsigned, i32 define i32 @ab_ugt_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ab_ugt_i32 -; CHECK: max.u32 +; CHECK-LABEL: ab_ugt_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ab_ugt_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ab_ugt_i32_param_1]; +; CHECK-NEXT: max.u32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp ugt i32 %a, %b %sel = select i1 %cmp, i32 %a, i32 %b ret i32 %sel } define i32 @ab_uge_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ab_uge_i32 -; CHECK: max.u32 +; CHECK-LABEL: ab_uge_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ab_uge_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ab_uge_i32_param_1]; +; CHECK-NEXT: max.u32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp uge i32 %a, %b %sel = select i1 %cmp, i32 %a, i32 %b ret i32 %sel } define i32 @ab_ult_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ab_ult_i32 -; CHECK: min.u32 +; CHECK-LABEL: ab_ult_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ab_ult_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ab_ult_i32_param_1]; +; CHECK-NEXT: min.u32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp ult i32 %a, %b %sel = select i1 %cmp, i32 %a, i32 %b ret i32 %sel } define i32 @ab_ule_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ab_ule_i32 -; CHECK: min.u32 +; CHECK-LABEL: ab_ule_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ab_ule_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ab_ule_i32_param_1]; +; CHECK-NEXT: min.u32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp ule i32 %a, %b %sel = select i1 %cmp, i32 %a, i32 %b ret i32 %sel @@ -195,32 +402,64 @@ define i32 @ab_ule_i32(i32 %a, i32 %b) { ; *** ab, signed, i32 define i32 @ab_sgt_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ab_sgt_i32 -; CHECK: max.s32 +; CHECK-LABEL: ab_sgt_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ab_sgt_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ab_sgt_i32_param_1]; +; CHECK-NEXT: max.s32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp sgt i32 %a, %b %sel = select i1 %cmp, i32 %a, i32 %b ret i32 %sel } define i32 @ab_sge_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ab_sge_i32 -; CHECK: max.s32 +; CHECK-LABEL: ab_sge_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ab_sge_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ab_sge_i32_param_1]; +; CHECK-NEXT: max.s32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp sge i32 %a, %b %sel = select i1 %cmp, i32 %a, i32 %b ret i32 %sel } define i32 @ab_slt_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ab_slt_i32 -; CHECK: min.s32 +; CHECK-LABEL: ab_slt_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ab_slt_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ab_slt_i32_param_1]; +; CHECK-NEXT: min.s32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp slt i32 %a, %b %sel = select i1 %cmp, i32 %a, i32 %b ret i32 %sel } define i32 @ab_sle_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ab_sle_i32 -; CHECK: min.s32 +; CHECK-LABEL: ab_sle_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ab_sle_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ab_sle_i32_param_1]; +; CHECK-NEXT: min.s32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp sle i32 %a, %b %sel = select i1 %cmp, i32 %a, i32 %b ret i32 %sel @@ -228,32 +467,64 @@ define i32 @ab_sle_i32(i32 %a, i32 %b) { ; *** ba, unsigned, i32 define i32 @ba_ugt_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ba_ugt_i32 -; CHECK: min.u32 +; CHECK-LABEL: ba_ugt_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ba_ugt_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ba_ugt_i32_param_1]; +; CHECK-NEXT: min.u32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp ugt i32 %a, %b %sel = select i1 %cmp, i32 %b, i32 %a ret i32 %sel } define i32 @ba_uge_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ba_uge_i32 -; CHECK: min.u32 +; CHECK-LABEL: ba_uge_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ba_uge_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ba_uge_i32_param_1]; +; CHECK-NEXT: min.u32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp uge i32 %a, %b %sel = select i1 %cmp, i32 %b, i32 %a ret i32 %sel } define i32 @ba_ult_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ba_ult_i32 -; CHECK: max.u32 +; CHECK-LABEL: ba_ult_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ba_ult_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ba_ult_i32_param_1]; +; CHECK-NEXT: max.u32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp ult i32 %a, %b %sel = select i1 %cmp, i32 %b, i32 %a ret i32 %sel } define i32 @ba_ule_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ba_ule_i32 -; CHECK: max.u32 +; CHECK-LABEL: ba_ule_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ba_ule_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ba_ule_i32_param_1]; +; CHECK-NEXT: max.u32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp ule i32 %a, %b %sel = select i1 %cmp, i32 %b, i32 %a ret i32 %sel @@ -261,32 +532,64 @@ define i32 @ba_ule_i32(i32 %a, i32 %b) { ; *** ba, signed, i32 define i32 @ba_sgt_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ba_sgt_i32 -; CHECK: min.s32 +; CHECK-LABEL: ba_sgt_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ba_sgt_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ba_sgt_i32_param_1]; +; CHECK-NEXT: min.s32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp sgt i32 %a, %b %sel = select i1 %cmp, i32 %b, i32 %a ret i32 %sel } define i32 @ba_sge_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ba_sge_i32 -; CHECK: min.s32 +; CHECK-LABEL: ba_sge_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ba_sge_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ba_sge_i32_param_1]; +; CHECK-NEXT: min.s32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp sge i32 %a, %b %sel = select i1 %cmp, i32 %b, i32 %a ret i32 %sel } define i32 @ba_slt_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ba_slt_i32 -; CHECK: max.s32 +; CHECK-LABEL: ba_slt_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ba_slt_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ba_slt_i32_param_1]; +; CHECK-NEXT: max.s32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp slt i32 %a, %b %sel = select i1 %cmp, i32 %b, i32 %a ret i32 %sel } define i32 @ba_sle_i32(i32 %a, i32 %b) { -; CHECK-LABEL: @ba_sle_i32 -; CHECK: max.s32 +; CHECK-LABEL: ba_sle_i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [ba_sle_i32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [ba_sle_i32_param_1]; +; CHECK-NEXT: max.s32 %r3, %r1, %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; %cmp = icmp sle i32 %a, %b %sel = select i1 %cmp, i32 %b, i32 %a ret i32 %sel @@ -297,32 +600,64 @@ define i32 @ba_sle_i32(i32 %a, i32 %b) { ; *** ab, unsigned, i64 define i64 @ab_ugt_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ab_ugt_i64 -; CHECK: max.u64 +; CHECK-LABEL: ab_ugt_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ab_ugt_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ab_ugt_i64_param_1]; +; CHECK-NEXT: max.u64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp ugt i64 %a, %b %sel = select i1 %cmp, i64 %a, i64 %b ret i64 %sel } define i64 @ab_uge_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ab_uge_i64 -; CHECK: max.u64 +; CHECK-LABEL: ab_uge_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ab_uge_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ab_uge_i64_param_1]; +; CHECK-NEXT: max.u64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp uge i64 %a, %b %sel = select i1 %cmp, i64 %a, i64 %b ret i64 %sel } define i64 @ab_ult_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ab_ult_i64 -; CHECK: min.u64 +; CHECK-LABEL: ab_ult_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ab_ult_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ab_ult_i64_param_1]; +; CHECK-NEXT: min.u64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp ult i64 %a, %b %sel = select i1 %cmp, i64 %a, i64 %b ret i64 %sel } define i64 @ab_ule_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ab_ule_i64 -; CHECK: min.u64 +; CHECK-LABEL: ab_ule_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ab_ule_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ab_ule_i64_param_1]; +; CHECK-NEXT: min.u64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp ule i64 %a, %b %sel = select i1 %cmp, i64 %a, i64 %b ret i64 %sel @@ -330,32 +665,64 @@ define i64 @ab_ule_i64(i64 %a, i64 %b) { ; *** ab, signed, i64 define i64 @ab_sgt_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ab_sgt_i64 -; CHECK: max.s64 +; CHECK-LABEL: ab_sgt_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ab_sgt_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ab_sgt_i64_param_1]; +; CHECK-NEXT: max.s64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp sgt i64 %a, %b %sel = select i1 %cmp, i64 %a, i64 %b ret i64 %sel } define i64 @ab_sge_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ab_sge_i64 -; CHECK: max.s64 +; CHECK-LABEL: ab_sge_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ab_sge_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ab_sge_i64_param_1]; +; CHECK-NEXT: max.s64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp sge i64 %a, %b %sel = select i1 %cmp, i64 %a, i64 %b ret i64 %sel } define i64 @ab_slt_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ab_slt_i64 -; CHECK: min.s64 +; CHECK-LABEL: ab_slt_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ab_slt_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ab_slt_i64_param_1]; +; CHECK-NEXT: min.s64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp slt i64 %a, %b %sel = select i1 %cmp, i64 %a, i64 %b ret i64 %sel } define i64 @ab_sle_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ab_sle_i64 -; CHECK: min.s64 +; CHECK-LABEL: ab_sle_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ab_sle_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ab_sle_i64_param_1]; +; CHECK-NEXT: min.s64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp sle i64 %a, %b %sel = select i1 %cmp, i64 %a, i64 %b ret i64 %sel @@ -363,32 +730,64 @@ define i64 @ab_sle_i64(i64 %a, i64 %b) { ; *** ba, unsigned, i64 define i64 @ba_ugt_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ba_ugt_i64 -; CHECK: min.u64 +; CHECK-LABEL: ba_ugt_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ba_ugt_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ba_ugt_i64_param_1]; +; CHECK-NEXT: min.u64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp ugt i64 %a, %b %sel = select i1 %cmp, i64 %b, i64 %a ret i64 %sel } define i64 @ba_uge_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ba_uge_i64 -; CHECK: min.u64 +; CHECK-LABEL: ba_uge_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ba_uge_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ba_uge_i64_param_1]; +; CHECK-NEXT: min.u64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp uge i64 %a, %b %sel = select i1 %cmp, i64 %b, i64 %a ret i64 %sel } define i64 @ba_ult_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ba_ult_i64 -; CHECK: max.u64 +; CHECK-LABEL: ba_ult_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ba_ult_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ba_ult_i64_param_1]; +; CHECK-NEXT: max.u64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp ult i64 %a, %b %sel = select i1 %cmp, i64 %b, i64 %a ret i64 %sel } define i64 @ba_ule_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ba_ule_i64 -; CHECK: max.u64 +; CHECK-LABEL: ba_ule_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ba_ule_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ba_ule_i64_param_1]; +; CHECK-NEXT: max.u64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp ule i64 %a, %b %sel = select i1 %cmp, i64 %b, i64 %a ret i64 %sel @@ -396,33 +795,239 @@ define i64 @ba_ule_i64(i64 %a, i64 %b) { ; *** ba, signed, i64 define i64 @ba_sgt_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ba_sgt_i64 -; CHECK: min.s64 +; CHECK-LABEL: ba_sgt_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ba_sgt_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ba_sgt_i64_param_1]; +; CHECK-NEXT: min.s64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp sgt i64 %a, %b %sel = select i1 %cmp, i64 %b, i64 %a ret i64 %sel } define i64 @ba_sge_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ba_sge_i64 -; CHECK: min.s64 +; CHECK-LABEL: ba_sge_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ba_sge_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ba_sge_i64_param_1]; +; CHECK-NEXT: min.s64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp sge i64 %a, %b %sel = select i1 %cmp, i64 %b, i64 %a ret i64 %sel } define i64 @ba_slt_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ba_slt_i64 -; CHECK: max.s64 +; CHECK-LABEL: ba_slt_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ba_slt_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ba_slt_i64_param_1]; +; CHECK-NEXT: max.s64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp slt i64 %a, %b %sel = select i1 %cmp, i64 %b, i64 %a ret i64 %sel } define i64 @ba_sle_i64(i64 %a, i64 %b) { -; CHECK-LABEL: @ba_sle_i64 -; CHECK: max.s64 +; CHECK-LABEL: ba_sle_i64( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [ba_sle_i64_param_0]; +; CHECK-NEXT: ld.param.b64 %rd2, [ba_sle_i64_param_1]; +; CHECK-NEXT: max.s64 %rd3, %rd1, %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-NEXT: ret; %cmp = icmp sle i64 %a, %b %sel = select i1 %cmp, i64 %b, i64 %a ret i64 %sel } + +define i32 @min_relu_s32(i32 %a, i32 %b) { +; SM90-LABEL: min_relu_s32( +; SM90: { +; SM90-NEXT: .reg .b32 %r<4>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b32 %r1, [min_relu_s32_param_0]; +; SM90-NEXT: ld.param.b32 %r2, [min_relu_s32_param_1]; +; SM90-NEXT: min.relu.s32 %r3, %r1, %r2; +; SM90-NEXT: st.param.b32 [func_retval0], %r3; +; SM90-NEXT: ret; +; +; SM20-LABEL: min_relu_s32( +; SM20: { +; SM20-NEXT: .reg .b32 %r<5>; +; SM20-EMPTY: +; SM20-NEXT: // %bb.0: +; SM20-NEXT: ld.param.b32 %r1, [min_relu_s32_param_0]; +; SM20-NEXT: ld.param.b32 %r2, [min_relu_s32_param_1]; +; SM20-NEXT: min.s32 %r3, %r1, %r2; +; SM20-NEXT: max.s32 %r4, %r3, 0; +; SM20-NEXT: st.param.b32 [func_retval0], %r4; +; SM20-NEXT: ret; + %min = call i32 @llvm.smin.s32(i32 %a, i32 %b) + %max = call i32 @llvm.smax.s32(i32 %min, i32 0) + ret i32 %max +} + +define i32 @max_relu_s32(i32 %a, i32 %b) { +; SM90-LABEL: max_relu_s32( +; SM90: { +; SM90-NEXT: .reg .b32 %r<4>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b32 %r1, [max_relu_s32_param_0]; +; SM90-NEXT: ld.param.b32 %r2, [max_relu_s32_param_1]; +; SM90-NEXT: max.relu.s32 %r3, %r1, %r2; +; SM90-NEXT: st.param.b32 [func_retval0], %r3; +; SM90-NEXT: ret; +; +; SM20-LABEL: max_relu_s32( +; SM20: { +; SM20-NEXT: .reg .b32 %r<5>; +; SM20-EMPTY: +; SM20-NEXT: // %bb.0: +; SM20-NEXT: ld.param.b32 %r1, [max_relu_s32_param_0]; +; SM20-NEXT: ld.param.b32 %r2, [max_relu_s32_param_1]; +; SM20-NEXT: max.s32 %r3, %r1, %r2; +; SM20-NEXT: max.s32 %r4, %r3, 0; +; SM20-NEXT: st.param.b32 [func_retval0], %r4; +; SM20-NEXT: ret; + %max1 = call i32 @llvm.smax.s32(i32 %a, i32 %b) + %max2 = call i32 @llvm.smax.s32(i32 %max1, i32 0) + ret i32 %max2 +} + +define i32 @max_relu_s32_v2(i32 %a, i32 %b) { +; SM90-LABEL: max_relu_s32_v2( +; SM90: { +; SM90-NEXT: .reg .b32 %r<4>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b32 %r1, [max_relu_s32_v2_param_0]; +; SM90-NEXT: ld.param.b32 %r2, [max_relu_s32_v2_param_1]; +; SM90-NEXT: max.relu.s32 %r3, %r1, %r2; +; SM90-NEXT: st.param.b32 [func_retval0], %r3; +; SM90-NEXT: ret; +; +; SM20-LABEL: max_relu_s32_v2( +; SM20: { +; SM20-NEXT: .reg .b32 %r<5>; +; SM20-EMPTY: +; SM20-NEXT: // %bb.0: +; SM20-NEXT: ld.param.b32 %r1, [max_relu_s32_v2_param_0]; +; SM20-NEXT: ld.param.b32 %r2, [max_relu_s32_v2_param_1]; +; SM20-NEXT: max.s32 %r3, %r1, %r2; +; SM20-NEXT: max.s32 %r4, %r3, 0; +; SM20-NEXT: st.param.b32 [func_retval0], %r4; +; SM20-NEXT: ret; + %max2 = call i32 @llvm.smax.s32(i32 %a, i32 0) + %max1 = call i32 @llvm.smax.s32(i32 %max2, i32 %b) + ret i32 %max1 +} + +define <2 x i16> @min_relu_s16x2(<2 x i16> %a, <2 x i16> %b) { +; SM90-LABEL: min_relu_s16x2( +; SM90: { +; SM90-NEXT: .reg .b32 %r<4>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b32 %r1, [min_relu_s16x2_param_0]; +; SM90-NEXT: ld.param.b32 %r2, [min_relu_s16x2_param_1]; +; SM90-NEXT: min.relu.s16x2 %r3, %r1, %r2; +; SM90-NEXT: st.param.b32 [func_retval0], %r3; +; SM90-NEXT: ret; +; +; SM20-LABEL: min_relu_s16x2( +; SM20: { +; SM20-NEXT: .reg .b16 %rs<9>; +; SM20-EMPTY: +; SM20-NEXT: // %bb.0: +; SM20-NEXT: ld.param.v2.b16 {%rs1, %rs2}, [min_relu_s16x2_param_0]; +; SM20-NEXT: ld.param.v2.b16 {%rs3, %rs4}, [min_relu_s16x2_param_1]; +; SM20-NEXT: min.s16 %rs5, %rs1, %rs3; +; SM20-NEXT: min.s16 %rs6, %rs2, %rs4; +; SM20-NEXT: max.s16 %rs7, %rs6, 0; +; SM20-NEXT: max.s16 %rs8, %rs5, 0; +; SM20-NEXT: st.param.v2.b16 [func_retval0], {%rs8, %rs7}; +; SM20-NEXT: ret; + %min = call <2 x i16> @llvm.smin.v2i16(<2 x i16> %a, <2 x i16> %b) + %max = call <2 x i16> @llvm.smax.v2i16(<2 x i16> %min, <2 x i16> zeroinitializer) + ret <2 x i16> %max +} + +define <2 x i16> @max_relu_s16x2(<2 x i16> %a, <2 x i16> %b) { +; SM90-LABEL: max_relu_s16x2( +; SM90: { +; SM90-NEXT: .reg .b32 %r<4>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b32 %r1, [max_relu_s16x2_param_0]; +; SM90-NEXT: ld.param.b32 %r2, [max_relu_s16x2_param_1]; +; SM90-NEXT: max.relu.s16x2 %r3, %r1, %r2; +; SM90-NEXT: st.param.b32 [func_retval0], %r3; +; SM90-NEXT: ret; +; +; SM20-LABEL: max_relu_s16x2( +; SM20: { +; SM20-NEXT: .reg .b16 %rs<9>; +; SM20-EMPTY: +; SM20-NEXT: // %bb.0: +; SM20-NEXT: ld.param.v2.b16 {%rs1, %rs2}, [max_relu_s16x2_param_0]; +; SM20-NEXT: ld.param.v2.b16 {%rs3, %rs4}, [max_relu_s16x2_param_1]; +; SM20-NEXT: max.s16 %rs5, %rs1, %rs3; +; SM20-NEXT: max.s16 %rs6, %rs2, %rs4; +; SM20-NEXT: max.s16 %rs7, %rs6, 0; +; SM20-NEXT: max.s16 %rs8, %rs5, 0; +; SM20-NEXT: st.param.v2.b16 [func_retval0], {%rs8, %rs7}; +; SM20-NEXT: ret; + %max1 = call <2 x i16> @llvm.smax.v2i16(<2 x i16> %a, <2 x i16> %b) + %max2 = call <2 x i16> @llvm.smax.v2i16(<2 x i16> %max1, <2 x i16> zeroinitializer) + ret <2 x i16> %max2 +} + +define <2 x i16> @max_relu_s16x2_v2(<2 x i16> %a, <2 x i16> %b) { +; SM90-LABEL: max_relu_s16x2_v2( +; SM90: { +; SM90-NEXT: .reg .b32 %r<4>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b32 %r1, [max_relu_s16x2_v2_param_0]; +; SM90-NEXT: ld.param.b32 %r2, [max_relu_s16x2_v2_param_1]; +; SM90-NEXT: max.relu.s16x2 %r3, %r1, %r2; +; SM90-NEXT: st.param.b32 [func_retval0], %r3; +; SM90-NEXT: ret; +; +; SM20-LABEL: max_relu_s16x2_v2( +; SM20: { +; SM20-NEXT: .reg .b16 %rs<9>; +; SM20-EMPTY: +; SM20-NEXT: // %bb.0: +; SM20-NEXT: ld.param.v2.b16 {%rs1, %rs2}, [max_relu_s16x2_v2_param_0]; +; SM20-NEXT: ld.param.v2.b16 {%rs3, %rs4}, [max_relu_s16x2_v2_param_1]; +; SM20-NEXT: max.s16 %rs5, %rs1, %rs3; +; SM20-NEXT: max.s16 %rs6, %rs2, %rs4; +; SM20-NEXT: max.s16 %rs7, %rs6, 0; +; SM20-NEXT: max.s16 %rs8, %rs5, 0; +; SM20-NEXT: st.param.v2.b16 [func_retval0], {%rs8, %rs7}; +; SM20-NEXT: ret; + %max2 = call <2 x i16> @llvm.smax.v2i16(<2 x i16> %a, <2 x i16> zeroinitializer) + %max1 = call <2 x i16> @llvm.smax.v2i16(<2 x i16> %max2, <2 x i16> %b) + ret <2 x i16> %max1 +} diff --git a/llvm/test/CodeGen/NVPTX/i1-select.ll b/llvm/test/CodeGen/NVPTX/i1-select.ll index 9a051b3..df32e2a 100644 --- a/llvm/test/CodeGen/NVPTX/i1-select.ll +++ b/llvm/test/CodeGen/NVPTX/i1-select.ll @@ -108,9 +108,9 @@ define i32 @test_select_i1_basic_folding(i32 %v1, i32 %v2, i32 %v3, i32 %true, i ; CHECK-NEXT: ld.param.b32 %r4, [test_select_i1_basic_folding_param_3]; ; CHECK-NEXT: xor.pred %p6, %p1, %p3; ; CHECK-NEXT: ld.param.b32 %r5, [test_select_i1_basic_folding_param_4]; -; CHECK-NEXT: and.pred %p7, %p6, %p4; +; CHECK-NEXT: and.pred %p8, %p6, %p4; ; CHECK-NEXT: and.pred %p9, %p2, %p4; -; CHECK-NEXT: and.pred %p10, %p3, %p7; +; CHECK-NEXT: and.pred %p10, %p3, %p8; ; CHECK-NEXT: or.pred %p11, %p10, %p9; ; CHECK-NEXT: xor.pred %p12, %p11, %p3; ; CHECK-NEXT: selp.b32 %r6, %r4, %r5, %p12; diff --git a/llvm/test/CodeGen/NVPTX/sext-setcc.ll b/llvm/test/CodeGen/NVPTX/sext-setcc.ll index 9a67bdf..97918a6 100644 --- a/llvm/test/CodeGen/NVPTX/sext-setcc.ll +++ b/llvm/test/CodeGen/NVPTX/sext-setcc.ll @@ -29,7 +29,6 @@ define <4 x i8> @sext_setcc_v4i1_to_v4i8(ptr %p) { ; CHECK-LABEL: sext_setcc_v4i1_to_v4i8( ; CHECK: { ; CHECK-NEXT: .reg .pred %p<5>; -; CHECK-NEXT: .reg .b16 %rs<5>; ; CHECK-NEXT: .reg .b32 %r<13>; ; CHECK-NEXT: .reg .b64 %rd<2>; ; CHECK-EMPTY: @@ -37,17 +36,13 @@ define <4 x i8> @sext_setcc_v4i1_to_v4i8(ptr %p) { ; CHECK-NEXT: ld.param.b64 %rd1, [sext_setcc_v4i1_to_v4i8_param_0]; ; CHECK-NEXT: ld.b32 %r1, [%rd1]; ; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 0x7770U; -; CHECK-NEXT: cvt.u16.u32 %rs1, %r2; -; CHECK-NEXT: setp.eq.b16 %p1, %rs1, 0; +; CHECK-NEXT: setp.eq.b32 %p1, %r2, 0; ; CHECK-NEXT: prmt.b32 %r3, %r1, 0, 0x7771U; -; CHECK-NEXT: cvt.u16.u32 %rs2, %r3; -; CHECK-NEXT: setp.eq.b16 %p2, %rs2, 0; +; CHECK-NEXT: setp.eq.b32 %p2, %r3, 0; ; CHECK-NEXT: prmt.b32 %r4, %r1, 0, 0x7772U; -; CHECK-NEXT: cvt.u16.u32 %rs3, %r4; -; CHECK-NEXT: setp.eq.b16 %p3, %rs3, 0; +; CHECK-NEXT: setp.eq.b32 %p3, %r4, 0; ; CHECK-NEXT: prmt.b32 %r5, %r1, 0, 0x7773U; -; CHECK-NEXT: cvt.u16.u32 %rs4, %r5; -; CHECK-NEXT: setp.eq.b16 %p4, %rs4, 0; +; CHECK-NEXT: setp.eq.b32 %p4, %r5, 0; ; CHECK-NEXT: selp.b32 %r6, -1, 0, %p4; ; CHECK-NEXT: selp.b32 %r7, -1, 0, %p3; ; CHECK-NEXT: prmt.b32 %r8, %r7, %r6, 0x3340U; diff --git a/llvm/test/CodeGen/NVPTX/trunc-setcc.ll b/llvm/test/CodeGen/NVPTX/trunc-setcc.ll new file mode 100644 index 0000000..f22e37e --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/trunc-setcc.ll @@ -0,0 +1,269 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mcpu=sm_50 | FileCheck %s +; RUN: %if ptxas %{ llc < %s -mcpu=sm_50 | %ptxas-verify -arch=sm_50 %} + +target triple = "nvptx64-nvidia-cuda" + +define i1 @trunc_nsw_singed_const(i32 %a) { +; CHECK-LABEL: trunc_nsw_singed_const( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nsw_singed_const_param_0]; +; CHECK-NEXT: add.s32 %r2, %r1, 1; +; CHECK-NEXT: setp.gt.s32 %p1, %r2, -1; +; CHECK-NEXT: selp.b32 %r3, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; + %a2 = add i32 %a, 1 + %b = trunc nsw i32 %a2 to i8 + %c = icmp sgt i8 %b, -1 + ret i1 %c +} + +define i1 @trunc_nuw_singed_const(i32 %a) { +; CHECK-LABEL: trunc_nuw_singed_const( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [trunc_nuw_singed_const_param_0]; +; CHECK-NEXT: add.s16 %rs2, %rs1, 1; +; CHECK-NEXT: cvt.s16.s8 %rs3, %rs2; +; CHECK-NEXT: setp.lt.s16 %p1, %rs3, 100; +; CHECK-NEXT: selp.b32 %r1, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; + %a2 = add i32 %a, 1 + %b = trunc nuw i32 %a2 to i8 + %c = icmp slt i8 %b, 100 + ret i1 %c +} + +define i1 @trunc_nsw_unsinged_const(i32 %a) { +; CHECK-LABEL: trunc_nsw_unsinged_const( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [trunc_nsw_unsinged_const_param_0]; +; CHECK-NEXT: add.s16 %rs2, %rs1, 1; +; CHECK-NEXT: and.b16 %rs3, %rs2, 255; +; CHECK-NEXT: setp.lt.u16 %p1, %rs3, 236; +; CHECK-NEXT: selp.b32 %r1, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; + %a2 = add i32 %a, 1 + %b = trunc nsw i32 %a2 to i8 + %c = icmp ult i8 %b, -20 + ret i1 %c +} + +define i1 @trunc_nuw_unsinged_const(i32 %a) { +; CHECK-LABEL: trunc_nuw_unsinged_const( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nuw_unsinged_const_param_0]; +; CHECK-NEXT: add.s32 %r2, %r1, 1; +; CHECK-NEXT: setp.gt.u32 %p1, %r2, 100; +; CHECK-NEXT: selp.b32 %r3, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; + %a2 = add i32 %a, 1 + %b = trunc nuw i32 %a2 to i8 + %c = icmp ugt i8 %b, 100 + ret i1 %c +} + + +define i1 @trunc_nsw_eq_const(i32 %a) { +; CHECK-LABEL: trunc_nsw_eq_const( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nsw_eq_const_param_0]; +; CHECK-NEXT: setp.eq.b32 %p1, %r1, 99; +; CHECK-NEXT: selp.b32 %r2, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; +; CHECK-NEXT: ret; + %a2 = add i32 %a, 1 + %b = trunc nsw i32 %a2 to i8 + %c = icmp eq i8 %b, 100 + ret i1 %c +} + +define i1 @trunc_nuw_eq_const(i32 %a) { +; CHECK-LABEL: trunc_nuw_eq_const( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nuw_eq_const_param_0]; +; CHECK-NEXT: setp.eq.b32 %p1, %r1, 99; +; CHECK-NEXT: selp.b32 %r2, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; +; CHECK-NEXT: ret; + %a2 = add i32 %a, 1 + %b = trunc nuw i32 %a2 to i8 + %c = icmp eq i8 %b, 100 + ret i1 %c +} + +;;; + +define i1 @trunc_nsw_singed(i32 %a1, i32 %a2) { +; CHECK-LABEL: trunc_nsw_singed( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nsw_singed_param_0]; +; CHECK-NEXT: add.s32 %r2, %r1, 1; +; CHECK-NEXT: ld.param.b32 %r3, [trunc_nsw_singed_param_1]; +; CHECK-NEXT: add.s32 %r4, %r3, 7; +; CHECK-NEXT: setp.gt.s32 %p1, %r2, %r4; +; CHECK-NEXT: selp.b32 %r5, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; +; CHECK-NEXT: ret; + %b1 = add i32 %a1, 1 + %b2 = add i32 %a2, 7 + %c1 = trunc nsw i32 %b1 to i8 + %c2 = trunc nsw i32 %b2 to i8 + %c = icmp sgt i8 %c1, %c2 + ret i1 %c +} + +define i1 @trunc_nuw_singed(i32 %a1, i32 %a2) { +; CHECK-LABEL: trunc_nuw_singed( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<7>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [trunc_nuw_singed_param_0]; +; CHECK-NEXT: ld.param.b8 %rs2, [trunc_nuw_singed_param_1]; +; CHECK-NEXT: add.s16 %rs3, %rs1, 1; +; CHECK-NEXT: cvt.s16.s8 %rs4, %rs3; +; CHECK-NEXT: add.s16 %rs5, %rs2, 6; +; CHECK-NEXT: cvt.s16.s8 %rs6, %rs5; +; CHECK-NEXT: setp.lt.s16 %p1, %rs4, %rs6; +; CHECK-NEXT: selp.b32 %r1, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; + %b1 = add i32 %a1, 1 + %b2 = add i32 %a2, 6 + %c1 = trunc nuw i32 %b1 to i8 + %c2 = trunc nuw i32 %b2 to i8 + %c = icmp slt i8 %c1, %c2 + ret i1 %c +} + +define i1 @trunc_nsw_unsinged(i32 %a1, i32 %a2) { +; CHECK-LABEL: trunc_nsw_unsinged( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<7>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [trunc_nsw_unsinged_param_0]; +; CHECK-NEXT: ld.param.b8 %rs2, [trunc_nsw_unsinged_param_1]; +; CHECK-NEXT: add.s16 %rs3, %rs1, 1; +; CHECK-NEXT: and.b16 %rs4, %rs3, 255; +; CHECK-NEXT: add.s16 %rs5, %rs2, 4; +; CHECK-NEXT: and.b16 %rs6, %rs5, 255; +; CHECK-NEXT: setp.lt.u16 %p1, %rs4, %rs6; +; CHECK-NEXT: selp.b32 %r1, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; + %b1 = add i32 %a1, 1 + %b2 = add i32 %a2, 4 + %c1 = trunc nsw i32 %b1 to i8 + %c2 = trunc nsw i32 %b2 to i8 + %c = icmp ult i8 %c1, %c2 + ret i1 %c +} + +define i1 @trunc_nuw_unsinged(i32 %a1, i32 %a2) { +; CHECK-LABEL: trunc_nuw_unsinged( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nuw_unsinged_param_0]; +; CHECK-NEXT: add.s32 %r2, %r1, 1; +; CHECK-NEXT: ld.param.b32 %r3, [trunc_nuw_unsinged_param_1]; +; CHECK-NEXT: add.s32 %r4, %r3, 5; +; CHECK-NEXT: setp.gt.u32 %p1, %r2, %r4; +; CHECK-NEXT: selp.b32 %r5, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; +; CHECK-NEXT: ret; + %b1 = add i32 %a1, 1 + %b2 = add i32 %a2, 5 + %c1 = trunc nuw i32 %b1 to i8 + %c2 = trunc nuw i32 %b2 to i8 + %c = icmp ugt i8 %c1, %c2 + ret i1 %c +} + + +define i1 @trunc_nsw_eq(i32 %a1, i32 %a2) { +; CHECK-LABEL: trunc_nsw_eq( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nsw_eq_param_0]; +; CHECK-NEXT: add.s32 %r2, %r1, 1; +; CHECK-NEXT: ld.param.b32 %r3, [trunc_nsw_eq_param_1]; +; CHECK-NEXT: add.s32 %r4, %r3, 3; +; CHECK-NEXT: setp.eq.b32 %p1, %r2, %r4; +; CHECK-NEXT: selp.b32 %r5, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; +; CHECK-NEXT: ret; + %b1 = add i32 %a1, 1 + %b2 = add i32 %a2, 3 + %c1 = trunc nsw i32 %b1 to i8 + %c2 = trunc nsw i32 %b2 to i8 + %c = icmp eq i8 %c1, %c2 + ret i1 %c +} + +define i1 @trunc_nuw_eq(i32 %a1, i32 %a2) { +; CHECK-LABEL: trunc_nuw_eq( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nuw_eq_param_0]; +; CHECK-NEXT: add.s32 %r2, %r1, 2; +; CHECK-NEXT: ld.param.b32 %r3, [trunc_nuw_eq_param_1]; +; CHECK-NEXT: add.s32 %r4, %r3, 1; +; CHECK-NEXT: setp.eq.b32 %p1, %r2, %r4; +; CHECK-NEXT: selp.b32 %r5, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; +; CHECK-NEXT: ret; + %b1 = add i32 %a1, 2 + %b2 = add i32 %a2, 1 + %c1 = trunc nuw i32 %b1 to i8 + %c2 = trunc nuw i32 %b2 to i8 + %c = icmp eq i8 %c1, %c2 + ret i1 %c +} diff --git a/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll b/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll index 9ffb4fd..258ddf6 100644 --- a/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll +++ b/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll @@ -37,9 +37,9 @@ define signext i8 @test_chars(i8 signext %c1, i8 signext %c2, i8 signext %c3, i8 ; 32BIT: bb.0.entry: ; 32BIT-NEXT: liveins: $r3, $r4, $r5, $r6 ; 32BIT-NEXT: {{ $}} - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r4 - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r5 - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r6 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r4 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r5 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r6 ; 32BIT-NEXT: renamable $r3 = EXTSB killed renamable $r3 ; 32BIT-NEXT: BLR implicit $lr, implicit $rm, implicit $r3 ; @@ -47,9 +47,9 @@ define signext i8 @test_chars(i8 signext %c1, i8 signext %c2, i8 signext %c3, i8 ; 64BIT: bb.0.entry: ; 64BIT-NEXT: liveins: $x3, $x4, $x5, $x6 ; 64BIT-NEXT: {{ $}} - ; 64BIT-NEXT: renamable $r3 = ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3 - ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r5, implicit killed $x5 - ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r5, implicit killed $x5 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3 ; 64BIT-NEXT: renamable $x3 = EXTSB8 killed renamable $x3 ; 64BIT-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3 entry: @@ -96,9 +96,9 @@ define signext i8 @test_chars_mix(i8 signext %c1, i8 zeroext %c2, i8 zeroext %c3 ; 32BIT: bb.0.entry: ; 32BIT-NEXT: liveins: $r3, $r4, $r5, $r6 ; 32BIT-NEXT: {{ $}} - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r4 - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r5 - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r6 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r4 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r5 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r6 ; 32BIT-NEXT: renamable $r3 = EXTSB killed renamable $r3 ; 32BIT-NEXT: BLR implicit $lr, implicit $rm, implicit $r3 ; @@ -106,9 +106,9 @@ define signext i8 @test_chars_mix(i8 signext %c1, i8 zeroext %c2, i8 zeroext %c3 ; 64BIT: bb.0.entry: ; 64BIT-NEXT: liveins: $x3, $x4, $x5, $x6 ; 64BIT-NEXT: {{ $}} - ; 64BIT-NEXT: renamable $r3 = ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3 - ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r5, implicit killed $x5 - ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r5, implicit killed $x5 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3 ; 64BIT-NEXT: renamable $x3 = EXTSB8 killed renamable $x3 ; 64BIT-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3 entry: diff --git a/llvm/test/CodeGen/PowerPC/aix-nest-param.ll b/llvm/test/CodeGen/PowerPC/aix-nest-param.ll index 1863eaf..bfc7fbb 100644 --- a/llvm/test/CodeGen/PowerPC/aix-nest-param.ll +++ b/llvm/test/CodeGen/PowerPC/aix-nest-param.ll @@ -1,5 +1,5 @@ -; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s -; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s +; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s +; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s define ptr @nest_receiver(ptr nest %arg) nounwind { ret ptr %arg @@ -9,5 +9,10 @@ define ptr @nest_caller(ptr %arg) nounwind { %result = call ptr @nest_receiver(ptr nest %arg) ret ptr %result } +; CHECK-LABEL: .nest_receiver: +; CHECK: mr 3, 11 +; CHECK: blr -; CHECK: LLVM ERROR: Nest arguments are unimplemented. +; CHECK-LABEL: .nest_caller: +; CHECK: mr 11, 3 +; CHECK: bl .nest_receiver diff --git a/llvm/test/CodeGen/PowerPC/aix-trampoline.ll b/llvm/test/CodeGen/PowerPC/aix-trampoline.ll index b71f6b5..19df220 100644 --- a/llvm/test/CodeGen/PowerPC/aix-trampoline.ll +++ b/llvm/test/CodeGen/PowerPC/aix-trampoline.ll @@ -1,7 +1,7 @@ -; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s -; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s - -; CHECK: LLVM ERROR: INIT_TRAMPOLINE operation is not supported on AIX. +; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | \ +; RUN: FileCheck %s --check-prefix=32BIT +; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 -mattr=-altivec | \ +; RUN: FileCheck %s --check-prefix=64BIT define void @create_trampoline(ptr %buffer, ptr %nval) nounwind { entry: @@ -12,3 +12,17 @@ entry: declare i32 @nested(i32); declare void @llvm.init.trampoline(ptr, ptr, ptr) nounwind + +; 32BIT: stw 4, 8(3) +; 32BIT: lwz [[FuncDesc:[0-9]+]], L..C0(2) +; 32BIT-DAG: lwz [[SCRATCH1:[0-9]+]], 0([[FuncDesc]]) +; 32BIT-DAG: lwz [[SCRATCH2:[0-9]+]], 4([[FuncDesc]]) +; 32BIT-DAG: stw [[SCRATCH1]], 0(3) +; 32BIT-DAG: stw [[SCRATCH2]], 4(3) + +; 64BIT: std 4, 16(3) +; 64BIT-DAG: ld [[FuncDesc:[0-9]+]], L..C0(2) +; 64BIT-DAG: ld [[SCRATCH1:[0-9]+]], 0([[FuncDesc]]) +; 64BIT-DAG: ld [[SCRATCH2:[0-9]+]], 8([[FuncDesc]]) +; 64BIT-DAG: std [[SCRATCH1]], 0(3) +; 64BIT-DAG: std [[SCRATCH2]], 8(3) diff --git a/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll b/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll index aae2326..afc7a39 100644 --- a/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll +++ b/llvm/test/CodeGen/PowerPC/aix-vec_insert_elt.ll @@ -750,25 +750,21 @@ entry: define <2 x double> @testDoubleImm1(<2 x double> %a, double %b) { ; CHECK-64-LABEL: testDoubleImm1: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-64-NEXT: xxpermdi 34, 1, 34, 1 ; CHECK-64-NEXT: blr ; ; CHECK-32-LABEL: testDoubleImm1: ; CHECK-32: # %bb.0: # %entry -; CHECK-32-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-32-NEXT: xxpermdi 34, 1, 34, 1 ; CHECK-32-NEXT: blr ; ; CHECK-64-P10-LABEL: testDoubleImm1: ; CHECK-64-P10: # %bb.0: # %entry -; CHECK-64-P10-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-64-P10-NEXT: xxpermdi 34, 1, 34, 1 ; CHECK-64-P10-NEXT: blr ; ; CHECK-32-P10-LABEL: testDoubleImm1: ; CHECK-32-P10: # %bb.0: # %entry -; CHECK-32-P10-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-32-P10-NEXT: xxpermdi 34, 1, 34, 1 ; CHECK-32-P10-NEXT: blr entry: diff --git a/llvm/test/CodeGen/PowerPC/build-vector-tests.ll b/llvm/test/CodeGen/PowerPC/build-vector-tests.ll index 10fc308..9dd0fbe 100644 --- a/llvm/test/CodeGen/PowerPC/build-vector-tests.ll +++ b/llvm/test/CodeGen/PowerPC/build-vector-tests.ll @@ -1757,11 +1757,7 @@ entry: define <4 x i32> @fromRegsConvdtoi(double %a, double %b, double %c, double %d) { ; P9BE-LABEL: fromRegsConvdtoi: ; P9BE: # %bb.0: # %entry -; P9BE-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; P9BE-NEXT: # kill: def $f2 killed $f2 def $vsl2 ; P9BE-NEXT: xxmrghd vs0, vs2, vs4 -; P9BE-NEXT: # kill: def $f3 killed $f3 def $vsl3 -; P9BE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P9BE-NEXT: xvcvdpsxws v2, vs0 ; P9BE-NEXT: xxmrghd vs0, vs1, vs3 ; P9BE-NEXT: xvcvdpsxws v3, vs0 @@ -1770,11 +1766,7 @@ define <4 x i32> @fromRegsConvdtoi(double %a, double %b, double %c, double %d) { ; ; P9LE-LABEL: fromRegsConvdtoi: ; P9LE: # %bb.0: # %entry -; P9LE-NEXT: # kill: def $f3 killed $f3 def $vsl3 -; P9LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P9LE-NEXT: xxmrghd vs0, vs3, vs1 -; P9LE-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; P9LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 ; P9LE-NEXT: xvcvdpsxws v2, vs0 ; P9LE-NEXT: xxmrghd vs0, vs4, vs2 ; P9LE-NEXT: xvcvdpsxws v3, vs0 @@ -1783,10 +1775,6 @@ define <4 x i32> @fromRegsConvdtoi(double %a, double %b, double %c, double %d) { ; ; P8BE-LABEL: fromRegsConvdtoi: ; P8BE: # %bb.0: # %entry -; P8BE-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; P8BE-NEXT: # kill: def $f3 killed $f3 def $vsl3 -; P8BE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; P8BE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P8BE-NEXT: xxmrghd vs0, vs2, vs4 ; P8BE-NEXT: xxmrghd vs1, vs1, vs3 ; P8BE-NEXT: xvcvdpsxws v2, vs0 @@ -1796,10 +1784,6 @@ define <4 x i32> @fromRegsConvdtoi(double %a, double %b, double %c, double %d) { ; ; P8LE-LABEL: fromRegsConvdtoi: ; P8LE: # %bb.0: # %entry -; P8LE-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; P8LE-NEXT: # kill: def $f3 killed $f3 def $vsl3 -; P8LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; P8LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P8LE-NEXT: xxmrghd vs0, vs3, vs1 ; P8LE-NEXT: xxmrghd vs1, vs4, vs2 ; P8LE-NEXT: xvcvdpsxws v2, vs0 @@ -3262,11 +3246,7 @@ entry: define <4 x i32> @fromRegsConvdtoui(double %a, double %b, double %c, double %d) { ; P9BE-LABEL: fromRegsConvdtoui: ; P9BE: # %bb.0: # %entry -; P9BE-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; P9BE-NEXT: # kill: def $f2 killed $f2 def $vsl2 ; P9BE-NEXT: xxmrghd vs0, vs2, vs4 -; P9BE-NEXT: # kill: def $f3 killed $f3 def $vsl3 -; P9BE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P9BE-NEXT: xvcvdpuxws v2, vs0 ; P9BE-NEXT: xxmrghd vs0, vs1, vs3 ; P9BE-NEXT: xvcvdpuxws v3, vs0 @@ -3275,11 +3255,7 @@ define <4 x i32> @fromRegsConvdtoui(double %a, double %b, double %c, double %d) ; ; P9LE-LABEL: fromRegsConvdtoui: ; P9LE: # %bb.0: # %entry -; P9LE-NEXT: # kill: def $f3 killed $f3 def $vsl3 -; P9LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P9LE-NEXT: xxmrghd vs0, vs3, vs1 -; P9LE-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; P9LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 ; P9LE-NEXT: xvcvdpuxws v2, vs0 ; P9LE-NEXT: xxmrghd vs0, vs4, vs2 ; P9LE-NEXT: xvcvdpuxws v3, vs0 @@ -3288,10 +3264,6 @@ define <4 x i32> @fromRegsConvdtoui(double %a, double %b, double %c, double %d) ; ; P8BE-LABEL: fromRegsConvdtoui: ; P8BE: # %bb.0: # %entry -; P8BE-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; P8BE-NEXT: # kill: def $f3 killed $f3 def $vsl3 -; P8BE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; P8BE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P8BE-NEXT: xxmrghd vs0, vs2, vs4 ; P8BE-NEXT: xxmrghd vs1, vs1, vs3 ; P8BE-NEXT: xvcvdpuxws v2, vs0 @@ -3301,10 +3273,6 @@ define <4 x i32> @fromRegsConvdtoui(double %a, double %b, double %c, double %d) ; ; P8LE-LABEL: fromRegsConvdtoui: ; P8LE: # %bb.0: # %entry -; P8LE-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; P8LE-NEXT: # kill: def $f3 killed $f3 def $vsl3 -; P8LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; P8LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P8LE-NEXT: xxmrghd vs0, vs3, vs1 ; P8LE-NEXT: xxmrghd vs1, vs4, vs2 ; P8LE-NEXT: xvcvdpuxws v2, vs0 @@ -4578,32 +4546,24 @@ entry: define <2 x i64> @fromRegsConvdtoll(double %a, double %b) { ; P9BE-LABEL: fromRegsConvdtoll: ; P9BE: # %bb.0: # %entry -; P9BE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; P9BE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P9BE-NEXT: xxmrghd vs0, vs1, vs2 ; P9BE-NEXT: xvcvdpsxds v2, vs0 ; P9BE-NEXT: blr ; ; P9LE-LABEL: fromRegsConvdtoll: ; P9LE: # %bb.0: # %entry -; P9LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; P9LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P9LE-NEXT: xxmrghd vs0, vs2, vs1 ; P9LE-NEXT: xvcvdpsxds v2, vs0 ; P9LE-NEXT: blr ; ; P8BE-LABEL: fromRegsConvdtoll: ; P8BE: # %bb.0: # %entry -; P8BE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; P8BE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P8BE-NEXT: xxmrghd vs0, vs1, vs2 ; P8BE-NEXT: xvcvdpsxds v2, vs0 ; P8BE-NEXT: blr ; ; P8LE-LABEL: fromRegsConvdtoll: ; P8LE: # %bb.0: # %entry -; P8LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; P8LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P8LE-NEXT: xxmrghd vs0, vs2, vs1 ; P8LE-NEXT: xvcvdpsxds v2, vs0 ; P8LE-NEXT: blr @@ -5740,32 +5700,24 @@ entry: define <2 x i64> @fromRegsConvdtoull(double %a, double %b) { ; P9BE-LABEL: fromRegsConvdtoull: ; P9BE: # %bb.0: # %entry -; P9BE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; P9BE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P9BE-NEXT: xxmrghd vs0, vs1, vs2 ; P9BE-NEXT: xvcvdpuxds v2, vs0 ; P9BE-NEXT: blr ; ; P9LE-LABEL: fromRegsConvdtoull: ; P9LE: # %bb.0: # %entry -; P9LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; P9LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P9LE-NEXT: xxmrghd vs0, vs2, vs1 ; P9LE-NEXT: xvcvdpuxds v2, vs0 ; P9LE-NEXT: blr ; ; P8BE-LABEL: fromRegsConvdtoull: ; P8BE: # %bb.0: # %entry -; P8BE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; P8BE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P8BE-NEXT: xxmrghd vs0, vs1, vs2 ; P8BE-NEXT: xvcvdpuxds v2, vs0 ; P8BE-NEXT: blr ; ; P8LE-LABEL: fromRegsConvdtoull: ; P8LE: # %bb.0: # %entry -; P8LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; P8LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P8LE-NEXT: xxmrghd vs0, vs2, vs1 ; P8LE-NEXT: xvcvdpuxds v2, vs0 ; P8LE-NEXT: blr diff --git a/llvm/test/CodeGen/PowerPC/canonical-merge-shuffles.ll b/llvm/test/CodeGen/PowerPC/canonical-merge-shuffles.ll index b40fbc3..7f6fdc7 100644 --- a/llvm/test/CodeGen/PowerPC/canonical-merge-shuffles.ll +++ b/llvm/test/CodeGen/PowerPC/canonical-merge-shuffles.ll @@ -562,7 +562,6 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea ; CHECK-P8-NEXT: bl dummy ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: xxlxor f0, f0, f0 -; CHECK-P8-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-P8-NEXT: xxmrghd vs0, vs1, vs0 ; CHECK-P8-NEXT: xxswapd vs0, vs0 ; CHECK-P8-NEXT: stxvd2x vs0, 0, r30 @@ -577,7 +576,6 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea ; CHECK-P9-NEXT: bl dummy ; CHECK-P9-NEXT: nop ; CHECK-P9-NEXT: xxlxor f0, f0, f0 -; CHECK-P9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-P9-NEXT: xxmrghd vs0, vs1, vs0 ; CHECK-P9-NEXT: stxv vs0, 0(r30) ; @@ -591,7 +589,6 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea ; CHECK-P9-BE-NEXT: bl dummy ; CHECK-P9-BE-NEXT: nop ; CHECK-P9-BE-NEXT: xxlxor f0, f0, f0 -; CHECK-P9-BE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-P9-BE-NEXT: xxmrghd vs0, vs0, vs1 ; CHECK-P9-BE-NEXT: stxv vs0, 0(r30) ; @@ -618,7 +615,6 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea ; CHECK-P7-NEXT: bl dummy ; CHECK-P7-NEXT: nop ; CHECK-P7-NEXT: xxlxor f0, f0, f0 -; CHECK-P7-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-P7-NEXT: xxmrghd vs0, vs1, vs0 ; CHECK-P7-NEXT: xxswapd vs0, vs0 ; CHECK-P7-NEXT: stxvd2x vs0, 0, r30 @@ -633,7 +629,6 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea ; P8-AIX-64-NEXT: bl .dummy[PR] ; P8-AIX-64-NEXT: nop ; P8-AIX-64-NEXT: xxlxor f0, f0, f0 -; P8-AIX-64-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P8-AIX-64-NEXT: xxmrghd vs0, vs0, vs1 ; P8-AIX-64-NEXT: stxvd2x vs0, 0, r31 ; @@ -647,7 +642,6 @@ define dso_local void @no_crash_elt0_from_RHS(ptr noalias nocapture dereferencea ; P8-AIX-32-NEXT: bl .dummy[PR] ; P8-AIX-32-NEXT: nop ; P8-AIX-32-NEXT: xxlxor f0, f0, f0 -; P8-AIX-32-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P8-AIX-32-NEXT: xxmrghd vs0, vs0, vs1 ; P8-AIX-32-NEXT: stxvd2x vs0, 0, r31 test_entry: diff --git a/llvm/test/CodeGen/PowerPC/check-zero-vector.ll b/llvm/test/CodeGen/PowerPC/check-zero-vector.ll index 59173e2..d8e66d6 100644 --- a/llvm/test/CodeGen/PowerPC/check-zero-vector.ll +++ b/llvm/test/CodeGen/PowerPC/check-zero-vector.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu \ ; RUN: < %s | FileCheck %s --check-prefix=POWERPC_64LE @@ -7,240 +8,90 @@ ; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc-ibm-aix \ ; RUN: < %s | FileCheck %s --check-prefix=POWERPC_32 -define i32 @test_Greater_than(ptr %colauths, i32 signext %ncols) { -; This testcase is manually reduced to isolate the critical code blocks. -; It is designed to check for vector comparison specifically for zero vectors. -; In the vector.body section, we are expecting a comparison instruction (vcmpequh), -; merge instructions (vmrghh and vmrglh) which use exactly 2 vectors. -; The output of the merge instruction is being used by xxland and finally -; accumulated by vadduwm instruction. - +define i32 @test_Greater_than(ptr %colauths) { +; This testcase is for the special case of zero-vector comparisons. +; Currently the generated code does a comparison (vcmpequh) and then a negation (xxlnor). +; This pattern is expected to be optimized in a future patch. ; POWERPC_64LE-LABEL: test_Greater_than: -; POWERPC_64LE: .LBB0_6: # %vector.body -; POWERPC_64LE-NEXT: # -; POWERPC_64LE-NEXT: lxv [[R1:[0-9]+]], -64(4) -; POWERPC_64LE-NEXT: vcmpequh [[R2:[0-9]+]], [[R2]], [[R3:[0-9]+]] -; POWERPC_64LE-NEXT: xxlnor [[R1]], [[R1]], [[R1]] -; POWERPC_64LE-NEXT: vmrghh [[R4:[0-9]+]], [[R2]], [[R2]] -; POWERPC_64LE-NEXT: vmrglh [[R2]], [[R2]], [[R2]] -; POWERPC_64LE-NEXT: xxland [[R5:[0-9]+]], [[R5]], [[R6:[0-9]+]] -; POWERPC_64LE-NEXT: xxland [[R1]], [[R1]], [[R6]] -; POWERPC_64LE-NEXT: vadduwm [[R7:[0-9]+]], [[R7]], [[R4]] -; POWERPC_64LE: .LBB0_10: # %vec.epilog.vector.body -; POWERPC_64LE-NEXT: # -; POWERPC_64LE-NEXT: lxv [[R8:[0-9]+]], 0(4) -; POWERPC_64LE-NEXT: addi 4, 4, 16 -; POWERPC_64LE-NEXT: vcmpequh [[R9:[0-9]+]], [[R9]], [[R10:[0-9]+]] -; POWERPC_64LE-NEXT: xxlnor [[R8]], [[R8]], [[R8]] -; POWERPC_64LE-NEXT: vmrglh [[R11:[0-9]+]], [[R9]], [[R9]] -; POWERPC_64LE-NEXT: vmrghh [[R9]], [[R9]], [[R9]] -; POWERPC_64LE-NEXT: xxland [[R12:[0-9]+]], [[R12]], [[R6]] -; POWERPC_64LE-NEXT: xxland [[R8]], [[R8]], [[R6]] -; POWERPC_64LE-NEXT: vadduwm [[R7]], [[R7]], [[R9]] -; POWERPC_64LE-NEXT: vadduwm [[R3]], [[R3]], [[R11]] -; POWERPC_64LE-NEXT: bdnz .LBB0_10 -; POWERPC_64LE: blr +; POWERPC_64LE: # %bb.0: # %entry +; POWERPC_64LE-NEXT: lfd 0, 0(3) +; POWERPC_64LE-NEXT: xxlxor 35, 35, 35 +; POWERPC_64LE-NEXT: li 4, 0 +; POWERPC_64LE-NEXT: li 3, 4 +; POWERPC_64LE-NEXT: xxswapd 34, 0 +; POWERPC_64LE-NEXT: vcmpequh 2, 2, 3 +; POWERPC_64LE-NEXT: xxlnor 34, 34, 34 +; POWERPC_64LE-NEXT: vmrglh 3, 2, 2 +; POWERPC_64LE-NEXT: vextuwrx 4, 4, 2 +; POWERPC_64LE-NEXT: vextuwrx 3, 3, 3 +; POWERPC_64LE-NEXT: clrlwi 4, 4, 31 +; POWERPC_64LE-NEXT: rlwimi 4, 3, 1, 30, 30 +; POWERPC_64LE-NEXT: mfvsrwz 3, 35 +; POWERPC_64LE-NEXT: rlwimi 4, 3, 2, 29, 29 +; POWERPC_64LE-NEXT: li 3, 12 +; POWERPC_64LE-NEXT: vextuwrx 3, 3, 3 +; POWERPC_64LE-NEXT: rlwimi 4, 3, 3, 28, 28 +; POWERPC_64LE-NEXT: stb 4, -1(1) +; POWERPC_64LE-NEXT: lbz 3, -1(1) +; POWERPC_64LE-NEXT: popcntd 3, 3 +; POWERPC_64LE-NEXT: blr ; ; POWERPC_64-LABEL: test_Greater_than: -; POWERPC_64: L..BB0_6: # %vector.body -; POWERPC_64-NEXT: # -; POWERPC_64-NEXT: lxv [[R1:[0-9]+]], -64(4) -; POWERPC_64-NEXT: vcmpequh [[R2:[0-9]+]], [[R2]], [[R3:[0-9]+]] -; POWERPC_64-NEXT: xxlnor [[R1]], [[R1]], [[R1]] -; POWERPC_64-NEXT: vmrglh [[R4:[0-9]+]], [[R2]], [[R2]] -; POWERPC_64-NEXT: vmrghh [[R2]], [[R2]], [[R2]] -; POWERPC_64-NEXT: xxland [[R5:[0-9]+]], [[R5]], [[R6:[0-9]+]] -; POWERPC_64-NEXT: xxland [[R1]], [[R1]], [[R6]] -; POWERPC_64-NEXT: vadduwm [[R7:[0-9]+]], [[R7]], [[R4]] -; POWERPC_64: L..BB0_10: # %vec.epilog.vector.body -; POWERPC_64-NEXT: # -; POWERPC_64-NEXT: lxv [[R8:[0-9]+]], 0(4) -; POWERPC_64-NEXT: addi 4, 4, 16 -; POWERPC_64-NEXT: vcmpequh [[R9:[0-9]+]], [[R9]], [[R10:[0-9]+]] -; POWERPC_64-NEXT: xxlnor [[R8]], [[R8]], [[R8]] -; POWERPC_64-NEXT: vmrghh [[R11:[0-9]+]], [[R9]], [[R9]] -; POWERPC_64-NEXT: vmrglh [[R9]], [[R9]], [[R9]] -; POWERPC_64-NEXT: xxland [[R12:[0-9]+]], [[R12]], [[R6]] -; POWERPC_64-NEXT: xxland [[R8]], [[R8]], [[R6]] -; POWERPC_64-NEXT: vadduwm [[R7]], [[R7]], [[R9]] -; POWERPC_64-NEXT: vadduwm [[R3]], [[R3]], [[R11]] -; POWERPC_64-NEXT: bdnz L..BB0_10 -; POWERPC_64: blr +; POWERPC_64: # %bb.0: # %entry +; POWERPC_64-NEXT: lxsd 2, 0(3) +; POWERPC_64-NEXT: xxlxor 35, 35, 35 +; POWERPC_64-NEXT: li 4, 12 +; POWERPC_64-NEXT: li 3, 8 +; POWERPC_64-NEXT: vcmpequh 2, 2, 3 +; POWERPC_64-NEXT: xxlnor 34, 34, 34 +; POWERPC_64-NEXT: vmrghh 2, 2, 2 +; POWERPC_64-NEXT: vextuwlx 4, 4, 2 +; POWERPC_64-NEXT: vextuwlx 3, 3, 2 +; POWERPC_64-NEXT: clrlwi 4, 4, 31 +; POWERPC_64-NEXT: rlwimi 4, 3, 1, 30, 30 +; POWERPC_64-NEXT: mfvsrwz 3, 34 +; POWERPC_64-NEXT: rlwimi 4, 3, 2, 29, 29 +; POWERPC_64-NEXT: li 3, 0 +; POWERPC_64-NEXT: vextuwlx 3, 3, 2 +; POWERPC_64-NEXT: rlwimi 4, 3, 3, 28, 28 +; POWERPC_64-NEXT: stb 4, -1(1) +; POWERPC_64-NEXT: lbz 3, -1(1) +; POWERPC_64-NEXT: popcntd 3, 3 +; POWERPC_64-NEXT: blr ; ; POWERPC_32-LABEL: test_Greater_than: -; POWERPC_32: L..BB0_7: # %vector.body -; POWERPC_32-NEXT: # -; POWERPC_32-NEXT: lxv [[R1:[0-9]+]], 0(10) -; POWERPC_32-NEXT: addic [[R13:[0-9]+]], [[R13]], 64 -; POWERPC_32-NEXT: addze [[R14:[0-9]+]], [[R14]] -; POWERPC_32-NEXT: xor [[R15:[0-9]+]], [[R13]], [[R16:[0-9]+]] -; POWERPC_32-NEXT: or. [[R15]], [[R15]], [[R14]] -; POWERPC_32-NEXT: vcmpequh [[R2:[0-9]+]], [[R2]], [[R3:[0-9]+]] -; POWERPC_32-NEXT: xxlnor [[R1]], [[R1]], [[R1]] -; POWERPC_32-NEXT: vmrglh [[R4:[0-9]+]], [[R2]], [[R2]] -; POWERPC_32-NEXT: vmrghh [[R2]], [[R2]], [[R2]] -; POWERPC_32-NEXT: xxland [[R5:[0-9]+]], [[R5]], [[R6:[0-9]+]] -; POWERPC_32-NEXT: xxland [[R1]], [[R1]], [[R6]] -; POWERPC_32-NEXT: vadduwm [[R7:[0-9]+]], [[R7]], [[R4]] -; POWERPC_32: L..BB0_11: # %vec.epilog.vector.body -; POWERPC_32-NEXT: # -; POWERPC_32-NEXT: slwi [[R14]], [[R13]], 1 -; POWERPC_32-NEXT: addic [[R13]], [[R13]], 8 -; POWERPC_32-NEXT: addze [[R17:[0-9]+]], [[R17]] -; POWERPC_32-NEXT: lxvx [[R8:[0-9]+]], [[R18:[0-9]+]], [[R14]] -; POWERPC_32-NEXT: xor [[R14]], [[R13]], [[R16]] -; POWERPC_32-NEXT: or. [[R14]], [[R14]], [[R17]] -; POWERPC_32-NEXT: vcmpequh [[R9:[0-9]+]], [[R9]], [[R3]] -; POWERPC_32-NEXT: xxlnor [[R8]], [[R8]], [[R8]] -; POWERPC_32-NEXT: vmrghh [[R11:[0-9]+]], [[R9]], [[R9]] -; POWERPC_32-NEXT: vmrglh [[R9]], [[R9]], [[R9]] -; POWERPC_32-NEXT: xxland [[R12:[0-9]+]], [[R12]], [[R6]] -; POWERPC_32-NEXT: xxland [[R8]], [[R8]], [[R6]] -; POWERPC_32-NEXT: vadduwm [[R7]], [[R7]], [[R9]] -; POWERPC_32-NEXT: vadduwm [[R19:[0-9]+]], [[R19]], [[R11]] -; POWERPC_32-NEXT: bne 0, L..BB0_11 -; POWERPC_32: blr - entry: - %cmp5 = icmp sgt i32 %ncols, 0 - br i1 %cmp5, label %iter.check, label %for.cond.cleanup - -iter.check: ; preds = %entry - %wide.trip.count = zext nneg i32 %ncols to i64 - %min.iters.check = icmp ult i32 %ncols, 8 - br i1 %min.iters.check, label %for.body.preheader, label %vector.main.loop.iter.check - -for.body.preheader: ; preds = %vec.epilog.iter.check, %vec.epilog.middle.block, %iter.check - %indvars.iv.ph = phi i64 [ 0, %iter.check ], [ %n.vec, %vec.epilog.iter.check ], [ %n.vec31, %vec.epilog.middle.block ] - %num_cols_needed.06.ph = phi i32 [ 0, %iter.check ], [ %33, %vec.epilog.iter.check ], [ %40, %vec.epilog.middle.block ] - br label %for.body - -vector.main.loop.iter.check: ; preds = %iter.check - %min.iters.check9 = icmp ult i32 %ncols, 64 - br i1 %min.iters.check9, label %vec.epilog.ph, label %vector.ph - -vector.ph: ; preds = %vector.main.loop.iter.check - %n.vec = and i64 %wide.trip.count, 2147483584 - br label %vector.body - -vector.body: ; preds = %vector.body, %vector.ph - %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] - %vec.phi = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %24, %vector.body ] - %vec.phi10 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %25, %vector.body ] - %vec.phi11 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %26, %vector.body ] - %vec.phi12 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %27, %vector.body ] - %vec.phi13 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %28, %vector.body ] - %vec.phi14 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %29, %vector.body ] - %vec.phi15 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %30, %vector.body ] - %vec.phi16 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %31, %vector.body ] - %0 = getelementptr inbounds nuw i16, ptr %colauths, i64 %index - %1 = getelementptr inbounds nuw i8, ptr %0, i64 16 - %2 = getelementptr inbounds nuw i8, ptr %0, i64 32 - %3 = getelementptr inbounds nuw i8, ptr %0, i64 48 - %4 = getelementptr inbounds nuw i8, ptr %0, i64 64 - %5 = getelementptr inbounds nuw i8, ptr %0, i64 80 - %6 = getelementptr inbounds nuw i8, ptr %0, i64 96 - %7 = getelementptr inbounds nuw i8, ptr %0, i64 112 - %wide.load = load <8 x i16>, ptr %0, align 2, !tbaa !5 - %wide.load17 = load <8 x i16>, ptr %1, align 2, !tbaa !5 - %wide.load18 = load <8 x i16>, ptr %2, align 2, !tbaa !5 - %wide.load19 = load <8 x i16>, ptr %3, align 2, !tbaa !5 - %wide.load20 = load <8 x i16>, ptr %4, align 2, !tbaa !5 - %wide.load21 = load <8 x i16>, ptr %5, align 2, !tbaa !5 - %wide.load22 = load <8 x i16>, ptr %6, align 2, !tbaa !5 - %wide.load23 = load <8 x i16>, ptr %7, align 2, !tbaa !5 - %8 = icmp ne <8 x i16> %wide.load, zeroinitializer - %9 = icmp ne <8 x i16> %wide.load17, zeroinitializer - %10 = icmp ne <8 x i16> %wide.load18, zeroinitializer - %11 = icmp ne <8 x i16> %wide.load19, zeroinitializer - %12 = icmp ne <8 x i16> %wide.load20, zeroinitializer - %13 = icmp ne <8 x i16> %wide.load21, zeroinitializer - %14 = icmp ne <8 x i16> %wide.load22, zeroinitializer - %15 = icmp ne <8 x i16> %wide.load23, zeroinitializer - %16 = zext <8 x i1> %8 to <8 x i32> - %17 = zext <8 x i1> %9 to <8 x i32> - %18 = zext <8 x i1> %10 to <8 x i32> - %19 = zext <8 x i1> %11 to <8 x i32> - %20 = zext <8 x i1> %12 to <8 x i32> - %21 = zext <8 x i1> %13 to <8 x i32> - %22 = zext <8 x i1> %14 to <8 x i32> - %23 = zext <8 x i1> %15 to <8 x i32> - %24 = add <8 x i32> %vec.phi, %16 - %25 = add <8 x i32> %vec.phi10, %17 - %26 = add <8 x i32> %vec.phi11, %18 - %27 = add <8 x i32> %vec.phi12, %19 - %28 = add <8 x i32> %vec.phi13, %20 - %29 = add <8 x i32> %vec.phi14, %21 - %30 = add <8 x i32> %vec.phi15, %22 - %31 = add <8 x i32> %vec.phi16, %23 - %index.next = add nuw i64 %index, 64 - %32 = icmp eq i64 %index.next, %n.vec - br i1 %32, label %middle.block, label %vector.body, !llvm.loop !9 - -middle.block: ; preds = %vector.body - %bin.rdx = add <8 x i32> %25, %24 - %bin.rdx24 = add <8 x i32> %26, %bin.rdx - %bin.rdx25 = add <8 x i32> %27, %bin.rdx24 - %bin.rdx26 = add <8 x i32> %28, %bin.rdx25 - %bin.rdx27 = add <8 x i32> %29, %bin.rdx26 - %bin.rdx28 = add <8 x i32> %30, %bin.rdx27 - %bin.rdx29 = add <8 x i32> %31, %bin.rdx28 - %33 = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %bin.rdx29) - %cmp.n = icmp eq i64 %n.vec, %wide.trip.count - br i1 %cmp.n, label %for.cond.cleanup, label %vec.epilog.iter.check - -vec.epilog.iter.check: ; preds = %middle.block - %n.vec.remaining = and i64 %wide.trip.count, 56 - %min.epilog.iters.check = icmp eq i64 %n.vec.remaining, 0 - br i1 %min.epilog.iters.check, label %for.body.preheader, label %vec.epilog.ph - -vec.epilog.ph: ; preds = %vec.epilog.iter.check, %vector.main.loop.iter.check - %vec.epilog.resume.val = phi i64 [ %n.vec, %vec.epilog.iter.check ], [ 0, %vector.main.loop.iter.check ] - %bc.merge.rdx = phi i32 [ %33, %vec.epilog.iter.check ], [ 0, %vector.main.loop.iter.check ] - %n.vec31 = and i64 %wide.trip.count, 2147483640 - %34 = insertelement <8 x i32> <i32 poison, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, i32 %bc.merge.rdx, i64 0 - br label %vec.epilog.vector.body - -vec.epilog.vector.body: ; preds = %vec.epilog.vector.body, %vec.epilog.ph - %index32 = phi i64 [ %vec.epilog.resume.val, %vec.epilog.ph ], [ %index.next35, %vec.epilog.vector.body ] - %vec.phi33 = phi <8 x i32> [ %34, %vec.epilog.ph ], [ %38, %vec.epilog.vector.body ] - %35 = getelementptr inbounds nuw i16, ptr %colauths, i64 %index32 - %wide.load34 = load <8 x i16>, ptr %35, align 2, !tbaa !5 - %36 = icmp ne <8 x i16> %wide.load34, zeroinitializer - %37 = zext <8 x i1> %36 to <8 x i32> - %38 = add <8 x i32> %vec.phi33, %37 - %index.next35 = add nuw i64 %index32, 8 - %39 = icmp eq i64 %index.next35, %n.vec31 - br i1 %39, label %vec.epilog.middle.block, label %vec.epilog.vector.body, !llvm.loop !13 - -vec.epilog.middle.block: ; preds = %vec.epilog.vector.body - %40 = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %38) - %cmp.n36 = icmp eq i64 %n.vec31, %wide.trip.count - br i1 %cmp.n36, label %for.cond.cleanup, label %for.body.preheader - -for.cond.cleanup: ; preds = %for.body, %middle.block, %vec.epilog.middle.block, %entry - %num_cols_needed.0.lcssa = phi i32 [ 0, %entry ], [ %33, %middle.block ], [ %40, %vec.epilog.middle.block ], [ %spec.select, %for.body ] - ret i32 %num_cols_needed.0.lcssa - -for.body: ; preds = %for.body.preheader, %for.body - %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] - %num_cols_needed.06 = phi i32 [ %spec.select, %for.body ], [ %num_cols_needed.06.ph, %for.body.preheader ] - %arrayidx = getelementptr inbounds nuw i16, ptr %colauths, i64 %indvars.iv - %41 = load i16, ptr %arrayidx, align 2, !tbaa !5 - %tobool.not = icmp ne i16 %41, 0 - %inc = zext i1 %tobool.not to i32 - %spec.select = add nuw nsw i32 %num_cols_needed.06, %inc - %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 - %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count - br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !14 +; POWERPC_32: # %bb.0: # %entry +; POWERPC_32-NEXT: li 4, 4 +; POWERPC_32-NEXT: lxvwsx 1, 0, 3 +; POWERPC_32-NEXT: xxlxor 35, 35, 35 +; POWERPC_32-NEXT: lxvwsx 0, 3, 4 +; POWERPC_32-NEXT: xxmrghw 34, 1, 0 +; POWERPC_32-NEXT: vcmpequh 2, 2, 3 +; POWERPC_32-NEXT: xxlnor 34, 34, 34 +; POWERPC_32-NEXT: vmrghh 2, 2, 2 +; POWERPC_32-NEXT: stxv 34, -32(1) +; POWERPC_32-NEXT: lwz 3, -20(1) +; POWERPC_32-NEXT: lwz 4, -24(1) +; POWERPC_32-NEXT: clrlwi 3, 3, 31 +; POWERPC_32-NEXT: rlwimi 3, 4, 1, 30, 30 +; POWERPC_32-NEXT: lwz 4, -28(1) +; POWERPC_32-NEXT: rlwimi 3, 4, 2, 29, 29 +; POWERPC_32-NEXT: lwz 4, -32(1) +; POWERPC_32-NEXT: rlwimi 3, 4, 3, 28, 28 +; POWERPC_32-NEXT: popcntw 3, 3 +; POWERPC_32-NEXT: blr +entry: + %0 = load <4 x i16>, ptr %colauths, align 2, !tbaa !5 + %1 = icmp ne <4 x i16> %0, zeroinitializer + %2 = bitcast <4 x i1> %1 to i4 + %3 = tail call range(i4 0, 5) i4 @llvm.ctpop.i4(i4 %2) + %4 = zext nneg i4 %3 to i32 + ret i32 %4 } +declare i4 @llvm.ctpop.i4(i4) #1 + !5 = !{!6, !6, i64 0} !6 = !{!"short", !7, i64 0} !7 = !{!"omnipotent char", !8, i64 0} !8 = !{!"Simple C/C++ TBAA"} -!9 = distinct !{!9, !10, !11, !12} -!10 = !{!"llvm.loop.mustprogress"} -!11 = !{!"llvm.loop.isvectorized", i32 1} -!12 = !{!"llvm.loop.unroll.runtime.disable"} -!13 = distinct !{!13, !10, !11, !12} -!14 = distinct !{!14, !10, !12, !11} diff --git a/llvm/test/CodeGen/PowerPC/combine-fneg.ll b/llvm/test/CodeGen/PowerPC/combine-fneg.ll index a72abf7..04af094 100644 --- a/llvm/test/CodeGen/PowerPC/combine-fneg.ll +++ b/llvm/test/CodeGen/PowerPC/combine-fneg.ll @@ -6,7 +6,6 @@ define <4 x double> @fneg_fdiv_splat(double %a0, <4 x double> %a1) { ; CHECK-LABEL: fneg_fdiv_splat: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis 3, 2, .LCPI0_0@toc@ha -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxspltd 0, 1, 0 ; CHECK-NEXT: addi 3, 3, .LCPI0_0@toc@l ; CHECK-NEXT: xvredp 1, 0 diff --git a/llvm/test/CodeGen/PowerPC/fp-strict-round.ll b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll index 4519cf4..eac4fb6 100644 --- a/llvm/test/CodeGen/PowerPC/fp-strict-round.ll +++ b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll @@ -229,7 +229,6 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) strictfp ; P8-NEXT: xscvspdpn f1, vs0 ; P8-NEXT: bl nearbyintf ; P8-NEXT: nop -; P8-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P8-NEXT: xxmrghd vs0, vs1, v30 ; P8-NEXT: xscvspdpn f1, v31 ; P8-NEXT: xvcvdpsp v29, vs0 @@ -240,7 +239,6 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) strictfp ; P8-NEXT: xscvspdpn f1, vs0 ; P8-NEXT: bl nearbyintf ; P8-NEXT: nop -; P8-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P8-NEXT: xxmrghd vs0, v30, vs1 ; P8-NEXT: li r3, 160 ; P8-NEXT: xvcvdpsp v2, vs0 @@ -278,7 +276,6 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) strictfp ; P9-NEXT: xscvspdpn f1, vs0 ; P9-NEXT: bl nearbyintf ; P9-NEXT: nop -; P9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P9-NEXT: xxmrghd vs0, vs1, v30 ; P9-NEXT: xscvspdpn f1, v31 ; P9-NEXT: xvcvdpsp v29, vs0 @@ -289,7 +286,6 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) strictfp ; P9-NEXT: xscvspdpn f1, vs0 ; P9-NEXT: bl nearbyintf ; P9-NEXT: nop -; P9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P9-NEXT: xxmrghd vs0, v30, vs1 ; P9-NEXT: lxv v31, 64(r1) # 16-byte Folded Reload ; P9-NEXT: lxv v30, 48(r1) # 16-byte Folded Reload @@ -330,7 +326,6 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %vf1, <2 x double> %vf2) stric ; P8-NEXT: bl nearbyint ; P8-NEXT: nop ; P8-NEXT: li r3, 144 -; P8-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P8-NEXT: xxmrghd v2, v30, vs1 ; P8-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload ; P8-NEXT: li r3, 128 @@ -359,7 +354,6 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %vf1, <2 x double> %vf2) stric ; P9-NEXT: xxswapd vs1, v31 ; P9-NEXT: bl nearbyint ; P9-NEXT: nop -; P9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P9-NEXT: xxmrghd v2, v30, vs1 ; P9-NEXT: lxv v31, 48(r1) # 16-byte Folded Reload ; P9-NEXT: lxv v30, 32(r1) # 16-byte Folded Reload diff --git a/llvm/test/CodeGen/PowerPC/frem.ll b/llvm/test/CodeGen/PowerPC/frem.ll index 21cb206..19b4b1c 100644 --- a/llvm/test/CodeGen/PowerPC/frem.ll +++ b/llvm/test/CodeGen/PowerPC/frem.ll @@ -70,7 +70,6 @@ define <4 x float> @frem4x32(<4 x float> %a, <4 x float> %b) { ; CHECK-NEXT: xscvspdpn 2, 0 ; CHECK-NEXT: bl fmodf ; CHECK-NEXT: nop -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxmrghd 0, 1, 61 ; CHECK-NEXT: xscvspdpn 1, 62 ; CHECK-NEXT: xscvspdpn 2, 63 @@ -84,7 +83,6 @@ define <4 x float> @frem4x32(<4 x float> %a, <4 x float> %b) { ; CHECK-NEXT: xscvspdpn 2, 0 ; CHECK-NEXT: bl fmodf ; CHECK-NEXT: nop -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxmrghd 0, 61, 1 ; CHECK-NEXT: lxv 63, 80(1) # 16-byte Folded Reload ; CHECK-NEXT: lxv 62, 64(1) # 16-byte Folded Reload @@ -126,7 +124,6 @@ define <2 x double> @frem2x64(<2 x double> %a, <2 x double> %b) { ; CHECK-NEXT: xxswapd 2, 63 ; CHECK-NEXT: bl fmod ; CHECK-NEXT: nop -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxmrghd 34, 61, 1 ; CHECK-NEXT: lxv 63, 64(1) # 16-byte Folded Reload ; CHECK-NEXT: lxv 62, 48(1) # 16-byte Folded Reload diff --git a/llvm/test/CodeGen/PowerPC/froundeven-legalization.ll b/llvm/test/CodeGen/PowerPC/froundeven-legalization.ll index 3ae0b02..238e200 100644 --- a/llvm/test/CodeGen/PowerPC/froundeven-legalization.ll +++ b/llvm/test/CodeGen/PowerPC/froundeven-legalization.ll @@ -41,47 +41,39 @@ define void @test(ptr %p1, ptr %p2) nounwind { ; CHECK-NEXT: xxswapd 61, 63 ; CHECK-NEXT: bl roundeven ; CHECK-NEXT: nop -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxswapd 56, 1 ; CHECK-NEXT: xxlor 1, 59, 59 ; CHECK-NEXT: bl roundeven ; CHECK-NEXT: nop -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxswapd 0, 1 ; CHECK-NEXT: xxlor 1, 60, 60 ; CHECK-NEXT: xxmrgld 59, 0, 56 ; CHECK-NEXT: bl roundeven ; CHECK-NEXT: nop -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxswapd 60, 1 ; CHECK-NEXT: xxlor 1, 62, 62 ; CHECK-NEXT: bl roundeven ; CHECK-NEXT: nop -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxswapd 0, 1 ; CHECK-NEXT: xxlor 1, 61, 61 ; CHECK-NEXT: xxmrgld 62, 0, 60 ; CHECK-NEXT: bl roundeven ; CHECK-NEXT: nop -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxswapd 61, 1 ; CHECK-NEXT: xxlor 1, 63, 63 ; CHECK-NEXT: bl roundeven ; CHECK-NEXT: nop -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxswapd 0, 1 ; CHECK-NEXT: xxlor 1, 57, 57 ; CHECK-NEXT: xxmrgld 63, 0, 61 ; CHECK-NEXT: bl roundeven ; CHECK-NEXT: nop -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxswapd 61, 1 ; CHECK-NEXT: xxlor 1, 58, 58 ; CHECK-NEXT: bl roundeven ; CHECK-NEXT: nop ; CHECK-NEXT: li 3, 160 ; CHECK-NEXT: stxvd2x 63, 30, 29 -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxswapd 0, 1 ; CHECK-NEXT: stxvd2x 62, 30, 28 ; CHECK-NEXT: stxvd2x 59, 30, 27 diff --git a/llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll b/llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll index b83ac4a..50f05cc 100644 --- a/llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll +++ b/llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll @@ -666,7 +666,6 @@ define <4 x float> @test_extend32_vec4(ptr %p) #0 { ; P8-NEXT: bl __extendhfsf2 ; P8-NEXT: nop ; P8-NEXT: li r3, 80 -; P8-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; P8-NEXT: xxmrghd vs0, vs61, vs1 ; P8-NEXT: xxmrghd vs1, vs63, vs62 ; P8-NEXT: ld r30, 96(r1) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/PowerPC/ldexp.ll b/llvm/test/CodeGen/PowerPC/ldexp.ll index 23748bc..8d7253b 100644 --- a/llvm/test/CodeGen/PowerPC/ldexp.ll +++ b/llvm/test/CodeGen/PowerPC/ldexp.ll @@ -107,7 +107,6 @@ define <4 x float> @ldexp_v4f32(<4 x float> %val, <4 x i32> %exp) nounwind { ; CHECK-NEXT: extsw r4, r3 ; CHECK-NEXT: bl ldexpf ; CHECK-NEXT: nop -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxmrghd vs0, vs1, v29 ; CHECK-NEXT: li r3, 0 ; CHECK-NEXT: vextuwrx r3, r3, v31 @@ -124,7 +123,6 @@ define <4 x float> @ldexp_v4f32(<4 x float> %val, <4 x i32> %exp) nounwind { ; CHECK-NEXT: xscvspdpn f1, vs0 ; CHECK-NEXT: bl ldexpf ; CHECK-NEXT: nop -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxmrghd vs0, vs1, v29 ; CHECK-NEXT: lxv v31, 80(r1) # 16-byte Folded Reload ; CHECK-NEXT: lxv v30, 64(r1) # 16-byte Folded Reload diff --git a/llvm/test/CodeGen/PowerPC/llvm.modf.ll b/llvm/test/CodeGen/PowerPC/llvm.modf.ll index 203b3bd..1b137c7 100644 --- a/llvm/test/CodeGen/PowerPC/llvm.modf.ll +++ b/llvm/test/CodeGen/PowerPC/llvm.modf.ll @@ -294,7 +294,6 @@ define { <2 x double>, <2 x double> } @test_modf_v2f64(<2 x double> %a) { ; CHECK-NEXT: addi r4, r1, 40 ; CHECK-NEXT: bl modf ; CHECK-NEXT: nop -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxmrghd v2, v30, vs1 ; CHECK-NEXT: lfd f0, 32(r1) ; CHECK-NEXT: lfd f1, 40(r1) diff --git a/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll b/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll index 232014d..a9503f7 100644 --- a/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll +++ b/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll @@ -2,22 +2,87 @@ ; Verify whether the generated assembly for the following function includes the mtvsrbmi instruction. ; vector unsigned char v00FF() ; { -; vector unsigned char x = { 0xFF, 0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 }; -; return x; +; vector unsigned char x = { 0xFF, 0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 }; +; return x; +; } +; vector unsigned short short00FF() +; { +; vector unsigned short x = { 0xFF, 0,0,0, 0,0,0,0}; +; return x; +; } +; vector unsigned int int00FF() +; { +; vector unsigned int x = { 0xFF, 0,0,0}; +; return x; +; } +; vector unsigned long long longlong00FF() +; { +; vector unsigned long long x = { 0xFF, 0}; +; return x; ; } ; RUN: llc < %s -ppc-asm-full-reg-names -mtriple=powerpc-ibm-aix -mcpu=pwr10 -verify-machineinstrs \ -; RUN: | FileCheck %s --check-prefix=CHECK +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-BE + +; RUN: llc < %s -ppc-asm-full-reg-names -mtriple=powerpc64le-unknown-gnu-linux -mcpu=pwr10 -verify-machineinstrs \ +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-LE + +; CHECK-NOT: .byte 255 +; CHECK-NOT: .byte 0 define dso_local noundef range(i8 -1, 1) <16 x i8> @_Z5v00FFv() { -; CHECK-NOT: L..CPI0_0: -; CHECK-NOT: .byte 255 # 0xff -; CHECK-NOT: .byte 0 # 0x0 - -; CHECK-LABEL: _Z5v00FFv: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: mtvsrbmi v2, 1 -; CHECK-NEXT: blr +; CHECK-BE-LABEL: _Z5v00FFv: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: mtvsrbmi v2, 32768 +; CHECK-BE-NEXT: blr +; +; CHECK-LE-LABEL: _Z5v00FFv: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: mtvsrbmi v2, 1 +; CHECK-LE-NEXT: blr + entry: ret <16 x i8> <i8 -1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0> } + +define dso_local noundef range(i16 0, 256) <8 x i16> @_Z9short00FFv() { +; CHECK-BE-LABEL: _Z9short00FFv: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: mtvsrbmi v2, 16384 +; CHECK-BE-NEXT: blr +; +; CHECK-LE-LABEL: _Z9short00FFv: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: mtvsrbmi v2, 1 +; CHECK-LE-NEXT: blr +entry: + ret <8 x i16> <i16 255, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0> +} + +define dso_local noundef range(i32 0, 256) <4 x i32> @_Z7int00FFv() { +; CHECK-BE-LABEL: _Z7int00FFv: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: mtvsrbmi v2, 4096 +; CHECK-BE-NEXT: blr +; +; CHECK-LE-LABEL: _Z7int00FFv: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: mtvsrbmi v2, 1 +; CHECK-LE-NEXT: blr +entry: + ret <4 x i32> <i32 255, i32 0, i32 0, i32 0> +} + +define dso_local noundef range(i64 0, 256) <2 x i64> @_Z12longlong00FFv() { +; CHECK-BE-LABEL: _Z12longlong00FFv: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: mtvsrbmi v2, 256 +; CHECK-BE-NEXT: blr +; +; CHECK-LE-LABEL: _Z12longlong00FFv: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: mtvsrbmi v2, 1 +; CHECK-LE-NEXT: blr +entry: + ret <2 x i64> <i64 255, i64 0> +} diff --git a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll index b98aed8..291a9c1 100644 --- a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll +++ b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll @@ -940,25 +940,21 @@ entry: define <2 x double> @testDoubleImm1(<2 x double> %a, double %b) { ; CHECK-LABEL: testDoubleImm1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-NEXT: xxmrghd v2, v2, vs1 ; CHECK-NEXT: blr ; ; CHECK-BE-LABEL: testDoubleImm1: ; CHECK-BE: # %bb.0: # %entry -; CHECK-BE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-BE-NEXT: xxpermdi v2, vs1, v2, 1 ; CHECK-BE-NEXT: blr ; ; CHECK-P9-LABEL: testDoubleImm1: ; CHECK-P9: # %bb.0: # %entry -; CHECK-P9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; CHECK-P9-NEXT: xxpermdi v2, vs1, v2, 1 ; CHECK-P9-NEXT: blr ; ; AIX-P8-LABEL: testDoubleImm1: ; AIX-P8: # %bb.0: # %entry -; AIX-P8-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; AIX-P8-NEXT: xxpermdi v2, vs1, v2, 1 ; AIX-P8-NEXT: blr entry: diff --git a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll index 5dac21b..71c3069 100644 --- a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll +++ b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll @@ -107,10 +107,6 @@ entry: define <3 x double> @constrained_vector_fdiv_v3f64(<3 x double> %x, <3 x double> %y) #0 { ; PC64LE-LABEL: constrained_vector_fdiv_v3f64: ; PC64LE: # %bb.0: # %entry -; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5 -; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 0, 5, 4 ; PC64LE-NEXT: xxmrghd 1, 2, 1 ; PC64LE-NEXT: xsdivdp 3, 3, 6 @@ -120,10 +116,6 @@ define <3 x double> @constrained_vector_fdiv_v3f64(<3 x double> %x, <3 x double> ; ; PC64LE9-LABEL: constrained_vector_fdiv_v3f64: ; PC64LE9: # %bb.0: # %entry -; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5 -; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 0, 5, 4 ; PC64LE9-NEXT: xxmrghd 1, 2, 1 ; PC64LE9-NEXT: xsdivdp 3, 3, 6 @@ -217,7 +209,6 @@ define <2 x double> @constrained_vector_frem_v2f64(<2 x double> %x, <2 x double> ; PC64LE-NEXT: bl fmod ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 80 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 34, 61, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 64 @@ -248,7 +239,6 @@ define <2 x double> @constrained_vector_frem_v2f64(<2 x double> %x, <2 x double> ; PC64LE9-NEXT: xxswapd 2, 63 ; PC64LE9-NEXT: bl fmod ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 34, 61, 1 ; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload ; PC64LE9-NEXT: lxv 62, 48(1) # 16-byte Folded Reload @@ -400,7 +390,6 @@ define <3 x double> @constrained_vector_frem_v3f64(<3 x double> %x, <3 x double> ; PC64LE-NEXT: fmr 2, 30 ; PC64LE-NEXT: bl fmod ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 63, 1, 63 ; PC64LE-NEXT: fmr 1, 29 ; PC64LE-NEXT: fmr 2, 31 @@ -442,7 +431,6 @@ define <3 x double> @constrained_vector_frem_v3f64(<3 x double> %x, <3 x double> ; PC64LE9-NEXT: fmr 2, 30 ; PC64LE9-NEXT: bl fmod ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 63, 1, 63 ; PC64LE9-NEXT: fmr 1, 29 ; PC64LE9-NEXT: fmr 2, 31 @@ -498,7 +486,6 @@ define <4 x double> @constrained_vector_frem_v4f64(<4 x double> %x, <4 x double> ; PC64LE-NEXT: xxswapd 2, 62 ; PC64LE-NEXT: bl fmod ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 62, 59, 1 ; PC64LE-NEXT: xxlor 1, 61, 61 ; PC64LE-NEXT: xxlor 2, 63, 63 @@ -511,7 +498,6 @@ define <4 x double> @constrained_vector_frem_v4f64(<4 x double> %x, <4 x double> ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 112 ; PC64LE-NEXT: vmr 2, 30 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 35, 60, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 96 @@ -550,7 +536,6 @@ define <4 x double> @constrained_vector_frem_v4f64(<4 x double> %x, <4 x double> ; PC64LE9-NEXT: xxswapd 2, 62 ; PC64LE9-NEXT: bl fmod ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 62, 59, 1 ; PC64LE9-NEXT: xscpsgndp 1, 61, 61 ; PC64LE9-NEXT: xscpsgndp 2, 63, 63 @@ -561,7 +546,6 @@ define <4 x double> @constrained_vector_frem_v4f64(<4 x double> %x, <4 x double> ; PC64LE9-NEXT: xxswapd 2, 63 ; PC64LE9-NEXT: bl fmod ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 35, 60, 1 ; PC64LE9-NEXT: vmr 2, 30 ; PC64LE9-NEXT: lxv 63, 96(1) # 16-byte Folded Reload @@ -686,10 +670,6 @@ entry: define <3 x double> @constrained_vector_fmul_v3f64(<3 x double> %x, <3 x double> %y) #0 { ; PC64LE-LABEL: constrained_vector_fmul_v3f64: ; PC64LE: # %bb.0: # %entry -; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5 -; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 0, 5, 4 ; PC64LE-NEXT: xxmrghd 1, 2, 1 ; PC64LE-NEXT: xsmuldp 3, 3, 6 @@ -699,10 +679,6 @@ define <3 x double> @constrained_vector_fmul_v3f64(<3 x double> %x, <3 x double> ; ; PC64LE9-LABEL: constrained_vector_fmul_v3f64: ; PC64LE9: # %bb.0: # %entry -; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5 -; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 0, 5, 4 ; PC64LE9-NEXT: xxmrghd 1, 2, 1 ; PC64LE9-NEXT: xsmuldp 3, 3, 6 @@ -844,10 +820,6 @@ entry: define <3 x double> @constrained_vector_fadd_v3f64(<3 x double> %x, <3 x double> %y) #0 { ; PC64LE-LABEL: constrained_vector_fadd_v3f64: ; PC64LE: # %bb.0: # %entry -; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5 -; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 0, 5, 4 ; PC64LE-NEXT: xxmrghd 1, 2, 1 ; PC64LE-NEXT: xsadddp 3, 3, 6 @@ -857,10 +829,6 @@ define <3 x double> @constrained_vector_fadd_v3f64(<3 x double> %x, <3 x double> ; ; PC64LE9-LABEL: constrained_vector_fadd_v3f64: ; PC64LE9: # %bb.0: # %entry -; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5 -; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 0, 5, 4 ; PC64LE9-NEXT: xxmrghd 1, 2, 1 ; PC64LE9-NEXT: xsadddp 3, 3, 6 @@ -1002,10 +970,6 @@ entry: define <3 x double> @constrained_vector_fsub_v3f64(<3 x double> %x, <3 x double> %y) #0 { ; PC64LE-LABEL: constrained_vector_fsub_v3f64: ; PC64LE: # %bb.0: # %entry -; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5 -; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 0, 5, 4 ; PC64LE-NEXT: xxmrghd 1, 2, 1 ; PC64LE-NEXT: xssubdp 3, 3, 6 @@ -1015,10 +979,6 @@ define <3 x double> @constrained_vector_fsub_v3f64(<3 x double> %x, <3 x double> ; ; PC64LE9-LABEL: constrained_vector_fsub_v3f64: ; PC64LE9: # %bb.0: # %entry -; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5 -; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 0, 5, 4 ; PC64LE9-NEXT: xxmrghd 1, 2, 1 ; PC64LE9-NEXT: xssubdp 3, 3, 6 @@ -1145,8 +1105,6 @@ entry: define <3 x double> @constrained_vector_sqrt_v3f64(<3 x double> %x) #0 { ; PC64LE-LABEL: constrained_vector_sqrt_v3f64: ; PC64LE: # %bb.0: # %entry -; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 0, 2, 1 ; PC64LE-NEXT: xssqrtdp 3, 3 ; PC64LE-NEXT: xvsqrtdp 2, 0 @@ -1155,8 +1113,6 @@ define <3 x double> @constrained_vector_sqrt_v3f64(<3 x double> %x) #0 { ; ; PC64LE9-LABEL: constrained_vector_sqrt_v3f64: ; PC64LE9: # %bb.0: # %entry -; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 0, 2, 1 ; PC64LE9-NEXT: xssqrtdp 3, 3 ; PC64LE9-NEXT: xvsqrtdp 2, 0 @@ -1247,7 +1203,6 @@ define <2 x double> @constrained_vector_pow_v2f64(<2 x double> %x, <2 x double> ; PC64LE-NEXT: bl pow ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 80 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 34, 61, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 64 @@ -1278,7 +1233,6 @@ define <2 x double> @constrained_vector_pow_v2f64(<2 x double> %x, <2 x double> ; PC64LE9-NEXT: xxswapd 2, 63 ; PC64LE9-NEXT: bl pow ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 34, 61, 1 ; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload ; PC64LE9-NEXT: lxv 62, 48(1) # 16-byte Folded Reload @@ -1430,7 +1384,6 @@ define <3 x double> @constrained_vector_pow_v3f64(<3 x double> %x, <3 x double> ; PC64LE-NEXT: fmr 2, 30 ; PC64LE-NEXT: bl pow ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 63, 1, 63 ; PC64LE-NEXT: fmr 1, 29 ; PC64LE-NEXT: fmr 2, 31 @@ -1472,7 +1425,6 @@ define <3 x double> @constrained_vector_pow_v3f64(<3 x double> %x, <3 x double> ; PC64LE9-NEXT: fmr 2, 30 ; PC64LE9-NEXT: bl pow ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 63, 1, 63 ; PC64LE9-NEXT: fmr 1, 29 ; PC64LE9-NEXT: fmr 2, 31 @@ -1528,7 +1480,6 @@ define <4 x double> @constrained_vector_pow_v4f64(<4 x double> %x, <4 x double> ; PC64LE-NEXT: xxswapd 2, 62 ; PC64LE-NEXT: bl pow ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 62, 59, 1 ; PC64LE-NEXT: xxlor 1, 61, 61 ; PC64LE-NEXT: xxlor 2, 63, 63 @@ -1541,7 +1492,6 @@ define <4 x double> @constrained_vector_pow_v4f64(<4 x double> %x, <4 x double> ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 112 ; PC64LE-NEXT: vmr 2, 30 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 35, 60, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 96 @@ -1580,7 +1530,6 @@ define <4 x double> @constrained_vector_pow_v4f64(<4 x double> %x, <4 x double> ; PC64LE9-NEXT: xxswapd 2, 62 ; PC64LE9-NEXT: bl pow ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 62, 59, 1 ; PC64LE9-NEXT: xscpsgndp 1, 61, 61 ; PC64LE9-NEXT: xscpsgndp 2, 63, 63 @@ -1591,7 +1540,6 @@ define <4 x double> @constrained_vector_pow_v4f64(<4 x double> %x, <4 x double> ; PC64LE9-NEXT: xxswapd 2, 63 ; PC64LE9-NEXT: bl pow ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 35, 60, 1 ; PC64LE9-NEXT: vmr 2, 30 ; PC64LE9-NEXT: lxv 63, 96(1) # 16-byte Folded Reload @@ -1670,7 +1618,6 @@ define <2 x double> @constrained_vector_powi_v2f64(<2 x double> %x, i32 %y) #0 { ; PC64LE-NEXT: bl __powidf2 ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 64 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 34, 62, 1 ; PC64LE-NEXT: ld 30, 80(1) # 8-byte Folded Reload ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload @@ -1700,7 +1647,6 @@ define <2 x double> @constrained_vector_powi_v2f64(<2 x double> %x, i32 %y) #0 { ; PC64LE9-NEXT: mr 4, 30 ; PC64LE9-NEXT: bl __powidf2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 34, 62, 1 ; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload ; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload @@ -1844,7 +1790,6 @@ define <3 x double> @constrained_vector_powi_v3f64(<3 x double> %x, i32 %y) #0 { ; PC64LE-NEXT: mr 4, 30 ; PC64LE-NEXT: bl __powidf2 ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 63, 1, 63 ; PC64LE-NEXT: fmr 1, 31 ; PC64LE-NEXT: mr 4, 30 @@ -1883,7 +1828,6 @@ define <3 x double> @constrained_vector_powi_v3f64(<3 x double> %x, i32 %y) #0 { ; PC64LE9-NEXT: mr 4, 30 ; PC64LE9-NEXT: bl __powidf2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 63, 1, 63 ; PC64LE9-NEXT: fmr 1, 31 ; PC64LE9-NEXT: mr 4, 30 @@ -1934,7 +1878,6 @@ define <4 x double> @constrained_vector_powi_v4f64(<4 x double> %x, i32 %y) #0 { ; PC64LE-NEXT: mr 4, 30 ; PC64LE-NEXT: bl __powidf2 ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 62, 61, 1 ; PC64LE-NEXT: xxlor 1, 63, 63 ; PC64LE-NEXT: mr 4, 30 @@ -1947,7 +1890,6 @@ define <4 x double> @constrained_vector_powi_v4f64(<4 x double> %x, i32 %y) #0 { ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 80 ; PC64LE-NEXT: vmr 2, 30 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 35, 61, 1 ; PC64LE-NEXT: ld 30, 96(1) # 8-byte Folded Reload ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload @@ -1981,7 +1923,6 @@ define <4 x double> @constrained_vector_powi_v4f64(<4 x double> %x, i32 %y) #0 { ; PC64LE9-NEXT: mr 4, 30 ; PC64LE9-NEXT: bl __powidf2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 62, 61, 1 ; PC64LE9-NEXT: xscpsgndp 1, 63, 63 ; PC64LE9-NEXT: mr 4, 30 @@ -1992,7 +1933,6 @@ define <4 x double> @constrained_vector_powi_v4f64(<4 x double> %x, i32 %y) #0 { ; PC64LE9-NEXT: mr 4, 30 ; PC64LE9-NEXT: bl __powidf2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 35, 61, 1 ; PC64LE9-NEXT: vmr 2, 30 ; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload @@ -2063,7 +2003,6 @@ define <2 x double> @constrained_vector_sin_v2f64(<2 x double> %x) #0 { ; PC64LE-NEXT: bl sin ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 64 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 34, 62, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 48 @@ -2088,7 +2027,6 @@ define <2 x double> @constrained_vector_sin_v2f64(<2 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl sin ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 34, 62, 1 ; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload ; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload @@ -2211,7 +2149,6 @@ define <3 x double> @constrained_vector_sin_v3f64(<3 x double> %x) #0 { ; PC64LE-NEXT: fmr 1, 30 ; PC64LE-NEXT: bl sin ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 63, 1, 63 ; PC64LE-NEXT: fmr 1, 31 ; PC64LE-NEXT: bl sin @@ -2244,7 +2181,6 @@ define <3 x double> @constrained_vector_sin_v3f64(<3 x double> %x) #0 { ; PC64LE9-NEXT: fmr 1, 30 ; PC64LE9-NEXT: bl sin ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 63, 1, 63 ; PC64LE9-NEXT: fmr 1, 31 ; PC64LE9-NEXT: bl sin @@ -2288,7 +2224,6 @@ define <4 x double> @constrained_vector_sin_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: xxswapd 1, 62 ; PC64LE-NEXT: bl sin ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 62, 61, 1 ; PC64LE-NEXT: xxlor 1, 63, 63 ; PC64LE-NEXT: bl sin @@ -2299,7 +2234,6 @@ define <4 x double> @constrained_vector_sin_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 80 ; PC64LE-NEXT: vmr 2, 30 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 35, 61, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 64 @@ -2328,7 +2262,6 @@ define <4 x double> @constrained_vector_sin_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 62 ; PC64LE9-NEXT: bl sin ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 62, 61, 1 ; PC64LE9-NEXT: xscpsgndp 1, 63, 63 ; PC64LE9-NEXT: bl sin @@ -2337,7 +2270,6 @@ define <4 x double> @constrained_vector_sin_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl sin ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 35, 61, 1 ; PC64LE9-NEXT: vmr 2, 30 ; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload @@ -2406,7 +2338,6 @@ define <2 x double> @constrained_vector_cos_v2f64(<2 x double> %x) #0 { ; PC64LE-NEXT: bl cos ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 64 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 34, 62, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 48 @@ -2431,7 +2362,6 @@ define <2 x double> @constrained_vector_cos_v2f64(<2 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl cos ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 34, 62, 1 ; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload ; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload @@ -2554,7 +2484,6 @@ define <3 x double> @constrained_vector_cos_v3f64(<3 x double> %x) #0 { ; PC64LE-NEXT: fmr 1, 30 ; PC64LE-NEXT: bl cos ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 63, 1, 63 ; PC64LE-NEXT: fmr 1, 31 ; PC64LE-NEXT: bl cos @@ -2587,7 +2516,6 @@ define <3 x double> @constrained_vector_cos_v3f64(<3 x double> %x) #0 { ; PC64LE9-NEXT: fmr 1, 30 ; PC64LE9-NEXT: bl cos ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 63, 1, 63 ; PC64LE9-NEXT: fmr 1, 31 ; PC64LE9-NEXT: bl cos @@ -2631,7 +2559,6 @@ define <4 x double> @constrained_vector_cos_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: xxswapd 1, 62 ; PC64LE-NEXT: bl cos ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 62, 61, 1 ; PC64LE-NEXT: xxlor 1, 63, 63 ; PC64LE-NEXT: bl cos @@ -2642,7 +2569,6 @@ define <4 x double> @constrained_vector_cos_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 80 ; PC64LE-NEXT: vmr 2, 30 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 35, 61, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 64 @@ -2671,7 +2597,6 @@ define <4 x double> @constrained_vector_cos_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 62 ; PC64LE9-NEXT: bl cos ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 62, 61, 1 ; PC64LE9-NEXT: xscpsgndp 1, 63, 63 ; PC64LE9-NEXT: bl cos @@ -2680,7 +2605,6 @@ define <4 x double> @constrained_vector_cos_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl cos ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 35, 61, 1 ; PC64LE9-NEXT: vmr 2, 30 ; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload @@ -2749,7 +2673,6 @@ define <2 x double> @constrained_vector_exp_v2f64(<2 x double> %x) #0 { ; PC64LE-NEXT: bl exp ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 64 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 34, 62, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 48 @@ -2774,7 +2697,6 @@ define <2 x double> @constrained_vector_exp_v2f64(<2 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl exp ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 34, 62, 1 ; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload ; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload @@ -2897,7 +2819,6 @@ define <3 x double> @constrained_vector_exp_v3f64(<3 x double> %x) #0 { ; PC64LE-NEXT: fmr 1, 30 ; PC64LE-NEXT: bl exp ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 63, 1, 63 ; PC64LE-NEXT: fmr 1, 31 ; PC64LE-NEXT: bl exp @@ -2930,7 +2851,6 @@ define <3 x double> @constrained_vector_exp_v3f64(<3 x double> %x) #0 { ; PC64LE9-NEXT: fmr 1, 30 ; PC64LE9-NEXT: bl exp ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 63, 1, 63 ; PC64LE9-NEXT: fmr 1, 31 ; PC64LE9-NEXT: bl exp @@ -2974,7 +2894,6 @@ define <4 x double> @constrained_vector_exp_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: xxswapd 1, 62 ; PC64LE-NEXT: bl exp ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 62, 61, 1 ; PC64LE-NEXT: xxlor 1, 63, 63 ; PC64LE-NEXT: bl exp @@ -2985,7 +2904,6 @@ define <4 x double> @constrained_vector_exp_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 80 ; PC64LE-NEXT: vmr 2, 30 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 35, 61, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 64 @@ -3014,7 +2932,6 @@ define <4 x double> @constrained_vector_exp_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 62 ; PC64LE9-NEXT: bl exp ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 62, 61, 1 ; PC64LE9-NEXT: xscpsgndp 1, 63, 63 ; PC64LE9-NEXT: bl exp @@ -3023,7 +2940,6 @@ define <4 x double> @constrained_vector_exp_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl exp ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 35, 61, 1 ; PC64LE9-NEXT: vmr 2, 30 ; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload @@ -3092,7 +3008,6 @@ define <2 x double> @constrained_vector_exp2_v2f64(<2 x double> %x) #0 { ; PC64LE-NEXT: bl exp2 ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 64 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 34, 62, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 48 @@ -3117,7 +3032,6 @@ define <2 x double> @constrained_vector_exp2_v2f64(<2 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl exp2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 34, 62, 1 ; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload ; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload @@ -3240,7 +3154,6 @@ define <3 x double> @constrained_vector_exp2_v3f64(<3 x double> %x) #0 { ; PC64LE-NEXT: fmr 1, 30 ; PC64LE-NEXT: bl exp2 ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 63, 1, 63 ; PC64LE-NEXT: fmr 1, 31 ; PC64LE-NEXT: bl exp2 @@ -3273,7 +3186,6 @@ define <3 x double> @constrained_vector_exp2_v3f64(<3 x double> %x) #0 { ; PC64LE9-NEXT: fmr 1, 30 ; PC64LE9-NEXT: bl exp2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 63, 1, 63 ; PC64LE9-NEXT: fmr 1, 31 ; PC64LE9-NEXT: bl exp2 @@ -3317,7 +3229,6 @@ define <4 x double> @constrained_vector_exp2_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: xxswapd 1, 62 ; PC64LE-NEXT: bl exp2 ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 62, 61, 1 ; PC64LE-NEXT: xxlor 1, 63, 63 ; PC64LE-NEXT: bl exp2 @@ -3328,7 +3239,6 @@ define <4 x double> @constrained_vector_exp2_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 80 ; PC64LE-NEXT: vmr 2, 30 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 35, 61, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 64 @@ -3357,7 +3267,6 @@ define <4 x double> @constrained_vector_exp2_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 62 ; PC64LE9-NEXT: bl exp2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 62, 61, 1 ; PC64LE9-NEXT: xscpsgndp 1, 63, 63 ; PC64LE9-NEXT: bl exp2 @@ -3366,7 +3275,6 @@ define <4 x double> @constrained_vector_exp2_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl exp2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 35, 61, 1 ; PC64LE9-NEXT: vmr 2, 30 ; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload @@ -3435,7 +3343,6 @@ define <2 x double> @constrained_vector_log_v2f64(<2 x double> %x) #0 { ; PC64LE-NEXT: bl log ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 64 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 34, 62, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 48 @@ -3460,7 +3367,6 @@ define <2 x double> @constrained_vector_log_v2f64(<2 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl log ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 34, 62, 1 ; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload ; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload @@ -3583,7 +3489,6 @@ define <3 x double> @constrained_vector_log_v3f64(<3 x double> %x) #0 { ; PC64LE-NEXT: fmr 1, 30 ; PC64LE-NEXT: bl log ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 63, 1, 63 ; PC64LE-NEXT: fmr 1, 31 ; PC64LE-NEXT: bl log @@ -3616,7 +3521,6 @@ define <3 x double> @constrained_vector_log_v3f64(<3 x double> %x) #0 { ; PC64LE9-NEXT: fmr 1, 30 ; PC64LE9-NEXT: bl log ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 63, 1, 63 ; PC64LE9-NEXT: fmr 1, 31 ; PC64LE9-NEXT: bl log @@ -3660,7 +3564,6 @@ define <4 x double> @constrained_vector_log_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: xxswapd 1, 62 ; PC64LE-NEXT: bl log ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 62, 61, 1 ; PC64LE-NEXT: xxlor 1, 63, 63 ; PC64LE-NEXT: bl log @@ -3671,7 +3574,6 @@ define <4 x double> @constrained_vector_log_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 80 ; PC64LE-NEXT: vmr 2, 30 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 35, 61, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 64 @@ -3700,7 +3602,6 @@ define <4 x double> @constrained_vector_log_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 62 ; PC64LE9-NEXT: bl log ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 62, 61, 1 ; PC64LE9-NEXT: xscpsgndp 1, 63, 63 ; PC64LE9-NEXT: bl log @@ -3709,7 +3610,6 @@ define <4 x double> @constrained_vector_log_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl log ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 35, 61, 1 ; PC64LE9-NEXT: vmr 2, 30 ; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload @@ -3778,7 +3678,6 @@ define <2 x double> @constrained_vector_log10_v2f64(<2 x double> %x) #0 { ; PC64LE-NEXT: bl log10 ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 64 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 34, 62, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 48 @@ -3803,7 +3702,6 @@ define <2 x double> @constrained_vector_log10_v2f64(<2 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl log10 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 34, 62, 1 ; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload ; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload @@ -3926,7 +3824,6 @@ define <3 x double> @constrained_vector_log10_v3f64(<3 x double> %x) #0 { ; PC64LE-NEXT: fmr 1, 30 ; PC64LE-NEXT: bl log10 ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 63, 1, 63 ; PC64LE-NEXT: fmr 1, 31 ; PC64LE-NEXT: bl log10 @@ -3959,7 +3856,6 @@ define <3 x double> @constrained_vector_log10_v3f64(<3 x double> %x) #0 { ; PC64LE9-NEXT: fmr 1, 30 ; PC64LE9-NEXT: bl log10 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 63, 1, 63 ; PC64LE9-NEXT: fmr 1, 31 ; PC64LE9-NEXT: bl log10 @@ -4003,7 +3899,6 @@ define <4 x double> @constrained_vector_log10_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: xxswapd 1, 62 ; PC64LE-NEXT: bl log10 ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 62, 61, 1 ; PC64LE-NEXT: xxlor 1, 63, 63 ; PC64LE-NEXT: bl log10 @@ -4014,7 +3909,6 @@ define <4 x double> @constrained_vector_log10_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 80 ; PC64LE-NEXT: vmr 2, 30 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 35, 61, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 64 @@ -4043,7 +3937,6 @@ define <4 x double> @constrained_vector_log10_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 62 ; PC64LE9-NEXT: bl log10 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 62, 61, 1 ; PC64LE9-NEXT: xscpsgndp 1, 63, 63 ; PC64LE9-NEXT: bl log10 @@ -4052,7 +3945,6 @@ define <4 x double> @constrained_vector_log10_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl log10 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 35, 61, 1 ; PC64LE9-NEXT: vmr 2, 30 ; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload @@ -4121,7 +4013,6 @@ define <2 x double> @constrained_vector_log2_v2f64(<2 x double> %x) #0 { ; PC64LE-NEXT: bl log2 ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 64 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 34, 62, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 48 @@ -4146,7 +4037,6 @@ define <2 x double> @constrained_vector_log2_v2f64(<2 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl log2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 34, 62, 1 ; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload ; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload @@ -4269,7 +4159,6 @@ define <3 x double> @constrained_vector_log2_v3f64(<3 x double> %x) #0 { ; PC64LE-NEXT: fmr 1, 30 ; PC64LE-NEXT: bl log2 ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 63, 1, 63 ; PC64LE-NEXT: fmr 1, 31 ; PC64LE-NEXT: bl log2 @@ -4302,7 +4191,6 @@ define <3 x double> @constrained_vector_log2_v3f64(<3 x double> %x) #0 { ; PC64LE9-NEXT: fmr 1, 30 ; PC64LE9-NEXT: bl log2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 63, 1, 63 ; PC64LE9-NEXT: fmr 1, 31 ; PC64LE9-NEXT: bl log2 @@ -4346,7 +4234,6 @@ define <4 x double> @constrained_vector_log2_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: xxswapd 1, 62 ; PC64LE-NEXT: bl log2 ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 62, 61, 1 ; PC64LE-NEXT: xxlor 1, 63, 63 ; PC64LE-NEXT: bl log2 @@ -4357,7 +4244,6 @@ define <4 x double> @constrained_vector_log2_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 80 ; PC64LE-NEXT: vmr 2, 30 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 35, 61, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 64 @@ -4386,7 +4272,6 @@ define <4 x double> @constrained_vector_log2_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 62 ; PC64LE9-NEXT: bl log2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 62, 61, 1 ; PC64LE9-NEXT: xscpsgndp 1, 63, 63 ; PC64LE9-NEXT: bl log2 @@ -4395,7 +4280,6 @@ define <4 x double> @constrained_vector_log2_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl log2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 35, 61, 1 ; PC64LE9-NEXT: vmr 2, 30 ; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload @@ -4503,8 +4387,6 @@ define <3 x float> @constrained_vector_rint_v3f32(<3 x float> %x) #0 { define <3 x double> @constrained_vector_rint_v3f64(<3 x double> %x) #0 { ; PC64LE-LABEL: constrained_vector_rint_v3f64: ; PC64LE: # %bb.0: # %entry -; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 0, 2, 1 ; PC64LE-NEXT: xsrdpic 3, 3 ; PC64LE-NEXT: xvrdpic 2, 0 @@ -4513,8 +4395,6 @@ define <3 x double> @constrained_vector_rint_v3f64(<3 x double> %x) #0 { ; ; PC64LE9-LABEL: constrained_vector_rint_v3f64: ; PC64LE9: # %bb.0: # %entry -; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 0, 2, 1 ; PC64LE9-NEXT: xsrdpic 3, 3 ; PC64LE9-NEXT: xvrdpic 2, 0 @@ -4599,7 +4479,6 @@ define <2 x double> @constrained_vector_nearbyint_v2f64(<2 x double> %x) #0 { ; PC64LE-NEXT: bl nearbyint ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 64 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 34, 62, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 48 @@ -4624,7 +4503,6 @@ define <2 x double> @constrained_vector_nearbyint_v2f64(<2 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl nearbyint ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 34, 62, 1 ; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload ; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload @@ -4747,7 +4625,6 @@ define <3 x double> @constrained_vector_nearby_v3f64(<3 x double> %x) #0 { ; PC64LE-NEXT: fmr 1, 30 ; PC64LE-NEXT: bl nearbyint ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 63, 1, 63 ; PC64LE-NEXT: fmr 1, 31 ; PC64LE-NEXT: bl nearbyint @@ -4780,7 +4657,6 @@ define <3 x double> @constrained_vector_nearby_v3f64(<3 x double> %x) #0 { ; PC64LE9-NEXT: fmr 1, 30 ; PC64LE9-NEXT: bl nearbyint ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 63, 1, 63 ; PC64LE9-NEXT: fmr 1, 31 ; PC64LE9-NEXT: bl nearbyint @@ -4824,7 +4700,6 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: xxswapd 1, 62 ; PC64LE-NEXT: bl nearbyint ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 62, 61, 1 ; PC64LE-NEXT: xxlor 1, 63, 63 ; PC64LE-NEXT: bl nearbyint @@ -4835,7 +4710,6 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 80 ; PC64LE-NEXT: vmr 2, 30 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 35, 61, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 64 @@ -4864,7 +4738,6 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 62 ; PC64LE9-NEXT: bl nearbyint ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 62, 61, 1 ; PC64LE9-NEXT: xscpsgndp 1, 63, 63 ; PC64LE9-NEXT: bl nearbyint @@ -4873,7 +4746,6 @@ define <4 x double> @constrained_vector_nearbyint_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl nearbyint ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 35, 61, 1 ; PC64LE9-NEXT: vmr 2, 30 ; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload @@ -5055,10 +4927,6 @@ define <3 x double> @constrained_vector_max_v3f64(<3 x double> %x, <3 x double> ; PC64LE-NEXT: mflr 0 ; PC64LE-NEXT: stdu 1, -64(1) ; PC64LE-NEXT: li 3, 48 -; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5 -; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 0, 5, 4 ; PC64LE-NEXT: xxmrghd 1, 2, 1 ; PC64LE-NEXT: std 0, 80(1) @@ -5082,10 +4950,6 @@ define <3 x double> @constrained_vector_max_v3f64(<3 x double> %x, <3 x double> ; PC64LE9: # %bb.0: # %entry ; PC64LE9-NEXT: mflr 0 ; PC64LE9-NEXT: stdu 1, -48(1) -; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5 -; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 0, 5, 4 ; PC64LE9-NEXT: xxmrghd 1, 2, 1 ; PC64LE9-NEXT: std 0, 64(1) @@ -5295,10 +5159,6 @@ define <3 x double> @constrained_vector_min_v3f64(<3 x double> %x, <3 x double> ; PC64LE-NEXT: mflr 0 ; PC64LE-NEXT: stdu 1, -64(1) ; PC64LE-NEXT: li 3, 48 -; PC64LE-NEXT: # kill: def $f5 killed $f5 def $vsl5 -; PC64LE-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 0, 5, 4 ; PC64LE-NEXT: xxmrghd 1, 2, 1 ; PC64LE-NEXT: std 0, 80(1) @@ -5322,10 +5182,6 @@ define <3 x double> @constrained_vector_min_v3f64(<3 x double> %x, <3 x double> ; PC64LE9: # %bb.0: # %entry ; PC64LE9-NEXT: mflr 0 ; PC64LE9-NEXT: stdu 1, -48(1) -; PC64LE9-NEXT: # kill: def $f5 killed $f5 def $vsl5 -; PC64LE9-NEXT: # kill: def $f4 killed $f4 def $vsl4 -; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 0, 5, 4 ; PC64LE9-NEXT: xxmrghd 1, 2, 1 ; PC64LE9-NEXT: std 0, 64(1) @@ -6664,8 +6520,6 @@ entry: define <3 x double> @constrained_vector_ceil_v3f64(<3 x double> %x) #0 { ; PC64LE-LABEL: constrained_vector_ceil_v3f64: ; PC64LE: # %bb.0: # %entry -; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 0, 2, 1 ; PC64LE-NEXT: xsrdpip 3, 3 ; PC64LE-NEXT: xvrdpip 2, 0 @@ -6674,8 +6528,6 @@ define <3 x double> @constrained_vector_ceil_v3f64(<3 x double> %x) #0 { ; ; PC64LE9-LABEL: constrained_vector_ceil_v3f64: ; PC64LE9: # %bb.0: # %entry -; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 0, 2, 1 ; PC64LE9-NEXT: xsrdpip 3, 3 ; PC64LE9-NEXT: xvrdpip 2, 0 @@ -6776,8 +6628,6 @@ entry: define <3 x double> @constrained_vector_floor_v3f64(<3 x double> %x) #0 { ; PC64LE-LABEL: constrained_vector_floor_v3f64: ; PC64LE: # %bb.0: # %entry -; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 0, 2, 1 ; PC64LE-NEXT: xsrdpim 3, 3 ; PC64LE-NEXT: xvrdpim 2, 0 @@ -6786,8 +6636,6 @@ define <3 x double> @constrained_vector_floor_v3f64(<3 x double> %x) #0 { ; ; PC64LE9-LABEL: constrained_vector_floor_v3f64: ; PC64LE9: # %bb.0: # %entry -; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 0, 2, 1 ; PC64LE9-NEXT: xsrdpim 3, 3 ; PC64LE9-NEXT: xvrdpim 2, 0 @@ -6888,8 +6736,6 @@ entry: define <3 x double> @constrained_vector_round_v3f64(<3 x double> %x) #0 { ; PC64LE-LABEL: constrained_vector_round_v3f64: ; PC64LE: # %bb.0: # %entry -; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 0, 2, 1 ; PC64LE-NEXT: xsrdpi 3, 3 ; PC64LE-NEXT: xvrdpi 2, 0 @@ -6898,8 +6744,6 @@ define <3 x double> @constrained_vector_round_v3f64(<3 x double> %x) #0 { ; ; PC64LE9-LABEL: constrained_vector_round_v3f64: ; PC64LE9: # %bb.0: # %entry -; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 0, 2, 1 ; PC64LE9-NEXT: xsrdpi 3, 3 ; PC64LE9-NEXT: xvrdpi 2, 0 @@ -6999,8 +6843,6 @@ entry: define <3 x double> @constrained_vector_trunc_v3f64(<3 x double> %x) #0 { ; PC64LE-LABEL: constrained_vector_trunc_v3f64: ; PC64LE: # %bb.0: # %entry -; PC64LE-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 0, 2, 1 ; PC64LE-NEXT: xsrdpiz 3, 3 ; PC64LE-NEXT: xvrdpiz 2, 0 @@ -7009,8 +6851,6 @@ define <3 x double> @constrained_vector_trunc_v3f64(<3 x double> %x) #0 { ; ; PC64LE9-LABEL: constrained_vector_trunc_v3f64: ; PC64LE9: # %bb.0: # %entry -; PC64LE9-NEXT: # kill: def $f2 killed $f2 def $vsl2 -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 0, 2, 1 ; PC64LE9-NEXT: xsrdpiz 3, 3 ; PC64LE9-NEXT: xvrdpiz 2, 0 @@ -8209,7 +8049,6 @@ define <2 x double> @constrained_vector_tan_v2f64(<2 x double> %x) #0 { ; PC64LE-NEXT: bl tan ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 64 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 34, 62, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 48 @@ -8234,7 +8073,6 @@ define <2 x double> @constrained_vector_tan_v2f64(<2 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl tan ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 34, 62, 1 ; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload ; PC64LE9-NEXT: lxv 62, 32(1) # 16-byte Folded Reload @@ -8357,7 +8195,6 @@ define <3 x double> @constrained_vector_tan_v3f64(<3 x double> %x) #0 { ; PC64LE-NEXT: fmr 1, 30 ; PC64LE-NEXT: bl tan ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 63, 1, 63 ; PC64LE-NEXT: fmr 1, 31 ; PC64LE-NEXT: bl tan @@ -8390,7 +8227,6 @@ define <3 x double> @constrained_vector_tan_v3f64(<3 x double> %x) #0 { ; PC64LE9-NEXT: fmr 1, 30 ; PC64LE9-NEXT: bl tan ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 63, 1, 63 ; PC64LE9-NEXT: fmr 1, 31 ; PC64LE9-NEXT: bl tan @@ -8434,7 +8270,6 @@ define <4 x double> @constrained_vector_tan_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: xxswapd 1, 62 ; PC64LE-NEXT: bl tan ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 62, 61, 1 ; PC64LE-NEXT: xxlor 1, 63, 63 ; PC64LE-NEXT: bl tan @@ -8445,7 +8280,6 @@ define <4 x double> @constrained_vector_tan_v4f64(<4 x double> %x) #0 { ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 80 ; PC64LE-NEXT: vmr 2, 30 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 35, 61, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 64 @@ -8474,7 +8308,6 @@ define <4 x double> @constrained_vector_tan_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 62 ; PC64LE9-NEXT: bl tan ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 62, 61, 1 ; PC64LE9-NEXT: xscpsgndp 1, 63, 63 ; PC64LE9-NEXT: bl tan @@ -8483,7 +8316,6 @@ define <4 x double> @constrained_vector_tan_v4f64(<4 x double> %x) #0 { ; PC64LE9-NEXT: xxswapd 1, 63 ; PC64LE9-NEXT: bl tan ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 35, 61, 1 ; PC64LE9-NEXT: vmr 2, 30 ; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload @@ -8558,7 +8390,6 @@ define <2 x double> @constrained_vector_atan2_v2f64(<2 x double> %x, <2 x double ; PC64LE-NEXT: bl atan2 ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 80 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 34, 61, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 64 @@ -8589,7 +8420,6 @@ define <2 x double> @constrained_vector_atan2_v2f64(<2 x double> %x, <2 x double ; PC64LE9-NEXT: xxswapd 2, 63 ; PC64LE9-NEXT: bl atan2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 34, 61, 1 ; PC64LE9-NEXT: lxv 63, 64(1) # 16-byte Folded Reload ; PC64LE9-NEXT: lxv 62, 48(1) # 16-byte Folded Reload @@ -8741,7 +8571,6 @@ define <3 x double> @constrained_vector_atan2_v3f64(<3 x double> %x, <3 x double ; PC64LE-NEXT: fmr 2, 30 ; PC64LE-NEXT: bl atan2 ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 63, 1, 63 ; PC64LE-NEXT: fmr 1, 29 ; PC64LE-NEXT: fmr 2, 31 @@ -8783,7 +8612,6 @@ define <3 x double> @constrained_vector_atan2_v3f64(<3 x double> %x, <3 x double ; PC64LE9-NEXT: fmr 2, 30 ; PC64LE9-NEXT: bl atan2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 63, 1, 63 ; PC64LE9-NEXT: fmr 1, 29 ; PC64LE9-NEXT: fmr 2, 31 @@ -8839,7 +8667,6 @@ define <4 x double> @constrained_vector_atan2_v4f64(<4 x double> %x, <4 x double ; PC64LE-NEXT: xxswapd 2, 62 ; PC64LE-NEXT: bl atan2 ; PC64LE-NEXT: nop -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 62, 59, 1 ; PC64LE-NEXT: xxlor 1, 61, 61 ; PC64LE-NEXT: xxlor 2, 63, 63 @@ -8852,7 +8679,6 @@ define <4 x double> @constrained_vector_atan2_v4f64(<4 x double> %x, <4 x double ; PC64LE-NEXT: nop ; PC64LE-NEXT: li 3, 112 ; PC64LE-NEXT: vmr 2, 30 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE-NEXT: xxmrghd 35, 60, 1 ; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload ; PC64LE-NEXT: li 3, 96 @@ -8891,7 +8717,6 @@ define <4 x double> @constrained_vector_atan2_v4f64(<4 x double> %x, <4 x double ; PC64LE9-NEXT: xxswapd 2, 62 ; PC64LE9-NEXT: bl atan2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 62, 59, 1 ; PC64LE9-NEXT: xscpsgndp 1, 61, 61 ; PC64LE9-NEXT: xscpsgndp 2, 63, 63 @@ -8902,7 +8727,6 @@ define <4 x double> @constrained_vector_atan2_v4f64(<4 x double> %x, <4 x double ; PC64LE9-NEXT: xxswapd 2, 63 ; PC64LE9-NEXT: bl atan2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 ; PC64LE9-NEXT: xxmrghd 35, 60, 1 ; PC64LE9-NEXT: vmr 2, 30 ; PC64LE9-NEXT: lxv 63, 96(1) # 16-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/abds.ll b/llvm/test/CodeGen/RISCV/abds.ll index 28a95ef..f11a9c8 100644 --- a/llvm/test/CodeGen/RISCV/abds.ll +++ b/llvm/test/CodeGen/RISCV/abds.ll @@ -2011,50 +2011,50 @@ define i64 @abd_subnsw_i64_undef(i64 %a, i64 %b) nounwind { define i128 @abd_subnsw_i128(i128 %a, i128 %b) nounwind { ; RV32I-LABEL: abd_subnsw_i128: ; RV32I: # %bb.0: -; RV32I-NEXT: lw a4, 0(a2) -; RV32I-NEXT: lw a3, 4(a2) +; RV32I-NEXT: lw a3, 0(a2) +; RV32I-NEXT: lw a4, 4(a2) ; RV32I-NEXT: lw a5, 8(a2) -; RV32I-NEXT: lw a6, 12(a2) +; RV32I-NEXT: lw a2, 12(a2) ; RV32I-NEXT: lw a7, 8(a1) ; RV32I-NEXT: lw t0, 12(a1) -; RV32I-NEXT: lw a2, 0(a1) +; RV32I-NEXT: lw a6, 0(a1) ; RV32I-NEXT: lw a1, 4(a1) ; RV32I-NEXT: sltu t1, a7, a5 -; RV32I-NEXT: sub t0, t0, a6 -; RV32I-NEXT: sltu a6, a2, a4 +; RV32I-NEXT: sub t0, t0, a2 +; RV32I-NEXT: sltu a2, a6, a3 ; RV32I-NEXT: sub t0, t0, t1 -; RV32I-NEXT: mv t1, a6 -; RV32I-NEXT: beq a1, a3, .LBB31_2 +; RV32I-NEXT: mv t1, a2 +; RV32I-NEXT: beq a1, a4, .LBB31_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu t1, a1, a3 +; RV32I-NEXT: sltu t1, a1, a4 ; RV32I-NEXT: .LBB31_2: ; RV32I-NEXT: sub a5, a7, a5 -; RV32I-NEXT: sub a3, a1, a3 -; RV32I-NEXT: sltu a1, a5, t1 +; RV32I-NEXT: sub a1, a1, a4 +; RV32I-NEXT: sltu a4, a5, t1 ; RV32I-NEXT: sub a5, a5, t1 -; RV32I-NEXT: sub a1, t0, a1 -; RV32I-NEXT: sub a3, a3, a6 -; RV32I-NEXT: sub a2, a2, a4 -; RV32I-NEXT: bgez a1, .LBB31_4 +; RV32I-NEXT: sub a4, t0, a4 +; RV32I-NEXT: sub a2, a1, a2 +; RV32I-NEXT: sub a1, a6, a3 +; RV32I-NEXT: bgez a4, .LBB31_4 ; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: snez a4, a3 -; RV32I-NEXT: snez a6, a2 +; RV32I-NEXT: snez a3, a2 +; RV32I-NEXT: snez a6, a1 ; RV32I-NEXT: neg a7, a5 ; RV32I-NEXT: snez a5, a5 +; RV32I-NEXT: or a3, a6, a3 +; RV32I-NEXT: add a4, a4, a5 +; RV32I-NEXT: add a2, a2, a6 +; RV32I-NEXT: sltu a6, a7, a3 +; RV32I-NEXT: neg a4, a4 +; RV32I-NEXT: sub a5, a7, a3 ; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: or a4, a6, a4 -; RV32I-NEXT: add a1, a1, a5 -; RV32I-NEXT: add a3, a3, a6 -; RV32I-NEXT: sltu a6, a7, a4 +; RV32I-NEXT: sub a4, a4, a6 ; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: sub a5, a7, a4 -; RV32I-NEXT: sub a1, a1, a6 -; RV32I-NEXT: neg a3, a3 ; RV32I-NEXT: .LBB31_4: -; RV32I-NEXT: sw a2, 0(a0) -; RV32I-NEXT: sw a3, 4(a0) +; RV32I-NEXT: sw a1, 0(a0) +; RV32I-NEXT: sw a2, 4(a0) ; RV32I-NEXT: sw a5, 8(a0) -; RV32I-NEXT: sw a1, 12(a0) +; RV32I-NEXT: sw a4, 12(a0) ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_subnsw_i128: @@ -2074,50 +2074,50 @@ define i128 @abd_subnsw_i128(i128 %a, i128 %b) nounwind { ; ; RV32ZBB-LABEL: abd_subnsw_i128: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: lw a4, 0(a2) -; RV32ZBB-NEXT: lw a3, 4(a2) +; RV32ZBB-NEXT: lw a3, 0(a2) +; RV32ZBB-NEXT: lw a4, 4(a2) ; RV32ZBB-NEXT: lw a5, 8(a2) -; RV32ZBB-NEXT: lw a6, 12(a2) +; RV32ZBB-NEXT: lw a2, 12(a2) ; RV32ZBB-NEXT: lw a7, 8(a1) ; RV32ZBB-NEXT: lw t0, 12(a1) -; RV32ZBB-NEXT: lw a2, 0(a1) +; RV32ZBB-NEXT: lw a6, 0(a1) ; RV32ZBB-NEXT: lw a1, 4(a1) ; RV32ZBB-NEXT: sltu t1, a7, a5 -; RV32ZBB-NEXT: sub t0, t0, a6 -; RV32ZBB-NEXT: sltu a6, a2, a4 +; RV32ZBB-NEXT: sub t0, t0, a2 +; RV32ZBB-NEXT: sltu a2, a6, a3 ; RV32ZBB-NEXT: sub t0, t0, t1 -; RV32ZBB-NEXT: mv t1, a6 -; RV32ZBB-NEXT: beq a1, a3, .LBB31_2 +; RV32ZBB-NEXT: mv t1, a2 +; RV32ZBB-NEXT: beq a1, a4, .LBB31_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu t1, a1, a3 +; RV32ZBB-NEXT: sltu t1, a1, a4 ; RV32ZBB-NEXT: .LBB31_2: ; RV32ZBB-NEXT: sub a5, a7, a5 -; RV32ZBB-NEXT: sub a3, a1, a3 -; RV32ZBB-NEXT: sltu a1, a5, t1 +; RV32ZBB-NEXT: sub a1, a1, a4 +; RV32ZBB-NEXT: sltu a4, a5, t1 ; RV32ZBB-NEXT: sub a5, a5, t1 -; RV32ZBB-NEXT: sub a1, t0, a1 -; RV32ZBB-NEXT: sub a3, a3, a6 -; RV32ZBB-NEXT: sub a2, a2, a4 -; RV32ZBB-NEXT: bgez a1, .LBB31_4 +; RV32ZBB-NEXT: sub a4, t0, a4 +; RV32ZBB-NEXT: sub a2, a1, a2 +; RV32ZBB-NEXT: sub a1, a6, a3 +; RV32ZBB-NEXT: bgez a4, .LBB31_4 ; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: snez a4, a3 -; RV32ZBB-NEXT: snez a6, a2 +; RV32ZBB-NEXT: snez a3, a2 +; RV32ZBB-NEXT: snez a6, a1 ; RV32ZBB-NEXT: neg a7, a5 ; RV32ZBB-NEXT: snez a5, a5 +; RV32ZBB-NEXT: or a3, a6, a3 +; RV32ZBB-NEXT: add a4, a4, a5 +; RV32ZBB-NEXT: add a2, a2, a6 +; RV32ZBB-NEXT: sltu a6, a7, a3 +; RV32ZBB-NEXT: neg a4, a4 +; RV32ZBB-NEXT: sub a5, a7, a3 ; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: or a4, a6, a4 -; RV32ZBB-NEXT: add a1, a1, a5 -; RV32ZBB-NEXT: add a3, a3, a6 -; RV32ZBB-NEXT: sltu a6, a7, a4 +; RV32ZBB-NEXT: sub a4, a4, a6 ; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: sub a5, a7, a4 -; RV32ZBB-NEXT: sub a1, a1, a6 -; RV32ZBB-NEXT: neg a3, a3 ; RV32ZBB-NEXT: .LBB31_4: -; RV32ZBB-NEXT: sw a2, 0(a0) -; RV32ZBB-NEXT: sw a3, 4(a0) +; RV32ZBB-NEXT: sw a1, 0(a0) +; RV32ZBB-NEXT: sw a2, 4(a0) ; RV32ZBB-NEXT: sw a5, 8(a0) -; RV32ZBB-NEXT: sw a1, 12(a0) +; RV32ZBB-NEXT: sw a4, 12(a0) ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_subnsw_i128: @@ -2142,50 +2142,50 @@ define i128 @abd_subnsw_i128(i128 %a, i128 %b) nounwind { define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind { ; RV32I-LABEL: abd_subnsw_i128_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: lw a4, 0(a2) -; RV32I-NEXT: lw a3, 4(a2) +; RV32I-NEXT: lw a3, 0(a2) +; RV32I-NEXT: lw a4, 4(a2) ; RV32I-NEXT: lw a5, 8(a2) -; RV32I-NEXT: lw a6, 12(a2) +; RV32I-NEXT: lw a2, 12(a2) ; RV32I-NEXT: lw a7, 8(a1) ; RV32I-NEXT: lw t0, 12(a1) -; RV32I-NEXT: lw a2, 0(a1) +; RV32I-NEXT: lw a6, 0(a1) ; RV32I-NEXT: lw a1, 4(a1) ; RV32I-NEXT: sltu t1, a7, a5 -; RV32I-NEXT: sub t0, t0, a6 -; RV32I-NEXT: sltu a6, a2, a4 +; RV32I-NEXT: sub t0, t0, a2 +; RV32I-NEXT: sltu a2, a6, a3 ; RV32I-NEXT: sub t0, t0, t1 -; RV32I-NEXT: mv t1, a6 -; RV32I-NEXT: beq a1, a3, .LBB32_2 +; RV32I-NEXT: mv t1, a2 +; RV32I-NEXT: beq a1, a4, .LBB32_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu t1, a1, a3 +; RV32I-NEXT: sltu t1, a1, a4 ; RV32I-NEXT: .LBB32_2: ; RV32I-NEXT: sub a5, a7, a5 -; RV32I-NEXT: sub a3, a1, a3 -; RV32I-NEXT: sltu a1, a5, t1 +; RV32I-NEXT: sub a1, a1, a4 +; RV32I-NEXT: sltu a4, a5, t1 ; RV32I-NEXT: sub a5, a5, t1 -; RV32I-NEXT: sub a1, t0, a1 -; RV32I-NEXT: sub a3, a3, a6 -; RV32I-NEXT: sub a2, a2, a4 -; RV32I-NEXT: bgez a1, .LBB32_4 +; RV32I-NEXT: sub a4, t0, a4 +; RV32I-NEXT: sub a2, a1, a2 +; RV32I-NEXT: sub a1, a6, a3 +; RV32I-NEXT: bgez a4, .LBB32_4 ; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: snez a4, a3 -; RV32I-NEXT: snez a6, a2 +; RV32I-NEXT: snez a3, a2 +; RV32I-NEXT: snez a6, a1 ; RV32I-NEXT: neg a7, a5 ; RV32I-NEXT: snez a5, a5 +; RV32I-NEXT: or a3, a6, a3 +; RV32I-NEXT: add a4, a4, a5 +; RV32I-NEXT: add a2, a2, a6 +; RV32I-NEXT: sltu a6, a7, a3 +; RV32I-NEXT: neg a4, a4 +; RV32I-NEXT: sub a5, a7, a3 ; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: or a4, a6, a4 -; RV32I-NEXT: add a1, a1, a5 -; RV32I-NEXT: add a3, a3, a6 -; RV32I-NEXT: sltu a6, a7, a4 +; RV32I-NEXT: sub a4, a4, a6 ; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: sub a5, a7, a4 -; RV32I-NEXT: sub a1, a1, a6 -; RV32I-NEXT: neg a3, a3 ; RV32I-NEXT: .LBB32_4: -; RV32I-NEXT: sw a2, 0(a0) -; RV32I-NEXT: sw a3, 4(a0) +; RV32I-NEXT: sw a1, 0(a0) +; RV32I-NEXT: sw a2, 4(a0) ; RV32I-NEXT: sw a5, 8(a0) -; RV32I-NEXT: sw a1, 12(a0) +; RV32I-NEXT: sw a4, 12(a0) ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_subnsw_i128_undef: @@ -2205,50 +2205,50 @@ define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind { ; ; RV32ZBB-LABEL: abd_subnsw_i128_undef: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: lw a4, 0(a2) -; RV32ZBB-NEXT: lw a3, 4(a2) +; RV32ZBB-NEXT: lw a3, 0(a2) +; RV32ZBB-NEXT: lw a4, 4(a2) ; RV32ZBB-NEXT: lw a5, 8(a2) -; RV32ZBB-NEXT: lw a6, 12(a2) +; RV32ZBB-NEXT: lw a2, 12(a2) ; RV32ZBB-NEXT: lw a7, 8(a1) ; RV32ZBB-NEXT: lw t0, 12(a1) -; RV32ZBB-NEXT: lw a2, 0(a1) +; RV32ZBB-NEXT: lw a6, 0(a1) ; RV32ZBB-NEXT: lw a1, 4(a1) ; RV32ZBB-NEXT: sltu t1, a7, a5 -; RV32ZBB-NEXT: sub t0, t0, a6 -; RV32ZBB-NEXT: sltu a6, a2, a4 +; RV32ZBB-NEXT: sub t0, t0, a2 +; RV32ZBB-NEXT: sltu a2, a6, a3 ; RV32ZBB-NEXT: sub t0, t0, t1 -; RV32ZBB-NEXT: mv t1, a6 -; RV32ZBB-NEXT: beq a1, a3, .LBB32_2 +; RV32ZBB-NEXT: mv t1, a2 +; RV32ZBB-NEXT: beq a1, a4, .LBB32_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu t1, a1, a3 +; RV32ZBB-NEXT: sltu t1, a1, a4 ; RV32ZBB-NEXT: .LBB32_2: ; RV32ZBB-NEXT: sub a5, a7, a5 -; RV32ZBB-NEXT: sub a3, a1, a3 -; RV32ZBB-NEXT: sltu a1, a5, t1 +; RV32ZBB-NEXT: sub a1, a1, a4 +; RV32ZBB-NEXT: sltu a4, a5, t1 ; RV32ZBB-NEXT: sub a5, a5, t1 -; RV32ZBB-NEXT: sub a1, t0, a1 -; RV32ZBB-NEXT: sub a3, a3, a6 -; RV32ZBB-NEXT: sub a2, a2, a4 -; RV32ZBB-NEXT: bgez a1, .LBB32_4 +; RV32ZBB-NEXT: sub a4, t0, a4 +; RV32ZBB-NEXT: sub a2, a1, a2 +; RV32ZBB-NEXT: sub a1, a6, a3 +; RV32ZBB-NEXT: bgez a4, .LBB32_4 ; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: snez a4, a3 -; RV32ZBB-NEXT: snez a6, a2 +; RV32ZBB-NEXT: snez a3, a2 +; RV32ZBB-NEXT: snez a6, a1 ; RV32ZBB-NEXT: neg a7, a5 ; RV32ZBB-NEXT: snez a5, a5 +; RV32ZBB-NEXT: or a3, a6, a3 +; RV32ZBB-NEXT: add a4, a4, a5 +; RV32ZBB-NEXT: add a2, a2, a6 +; RV32ZBB-NEXT: sltu a6, a7, a3 +; RV32ZBB-NEXT: neg a4, a4 +; RV32ZBB-NEXT: sub a5, a7, a3 ; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: or a4, a6, a4 -; RV32ZBB-NEXT: add a1, a1, a5 -; RV32ZBB-NEXT: add a3, a3, a6 -; RV32ZBB-NEXT: sltu a6, a7, a4 +; RV32ZBB-NEXT: sub a4, a4, a6 ; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: sub a5, a7, a4 -; RV32ZBB-NEXT: sub a1, a1, a6 -; RV32ZBB-NEXT: neg a3, a3 ; RV32ZBB-NEXT: .LBB32_4: -; RV32ZBB-NEXT: sw a2, 0(a0) -; RV32ZBB-NEXT: sw a3, 4(a0) +; RV32ZBB-NEXT: sw a1, 0(a0) +; RV32ZBB-NEXT: sw a2, 4(a0) ; RV32ZBB-NEXT: sw a5, 8(a0) -; RV32ZBB-NEXT: sw a1, 12(a0) +; RV32ZBB-NEXT: sw a4, 12(a0) ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_subnsw_i128_undef: diff --git a/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll b/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll new file mode 100644 index 0000000..be3de37 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll @@ -0,0 +1,14 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=riscv64 | FileCheck %s + +define i1 @src(i64 %x) { +; CHECK-LABEL: src: +; CHECK: # %bb.0: +; CHECK-NEXT: srai a0, a0, 30 +; CHECK-NEXT: addi a0, a0, 2 +; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: ret + %a = and i64 %x, -1073741824 + %b = icmp eq i64 %a, -2147483648 + ret i1 %b +} diff --git a/llvm/test/CodeGen/RISCV/combine-storetomstore.ll b/llvm/test/CodeGen/RISCV/combine-storetomstore.ll new file mode 100644 index 0000000..c7d1f76 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/combine-storetomstore.ll @@ -0,0 +1,684 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=riscv64-- -mattr=+m,+v,+f | FileCheck %s -check-prefix=RISCV + +define void @test_masked_store_success_v4i8(<4 x i8> %x, ptr %ptr, <4 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v4i8: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RISCV-NEXT: vse8.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <4 x i8>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x i8> %x, <4 x i8> %load + store <4 x i8> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4i16(<4 x i16> %x, ptr %ptr, <4 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v4i16: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RISCV-NEXT: vse16.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <4 x i16>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x i16> %x, <4 x i16> %load + store <4 x i16> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4i32(<4 x i32> %x, ptr %ptr, <4 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v4i32: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RISCV-NEXT: vse32.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <4 x i32>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %load + store <4 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4i64(<4 x i64> %x, ptr %ptr, <4 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v4i64: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RISCV-NEXT: vse64.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <4 x i64>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %load + store <4 x i64> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4f16(<4 x half> %x, ptr %ptr, <4 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v4f16: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RISCV-NEXT: vmv1r.v v9, v0 +; RISCV-NEXT: vfirst.m a3, v0 +; RISCV-NEXT: mv a2, a0 +; RISCV-NEXT: beqz a3, .LBB4_2 +; RISCV-NEXT: # %bb.1: +; RISCV-NEXT: mv a2, a1 +; RISCV-NEXT: .LBB4_2: +; RISCV-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RISCV-NEXT: vmv.v.i v8, 0 +; RISCV-NEXT: vmv1r.v v0, v9 +; RISCV-NEXT: vmerge.vim v8, v8, 1, v0 +; RISCV-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RISCV-NEXT: vslidedown.vi v8, v8, 2 +; RISCV-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RISCV-NEXT: vmsne.vi v8, v8, 0 +; RISCV-NEXT: vmv.v.i v10, 0 +; RISCV-NEXT: vmv1r.v v0, v8 +; RISCV-NEXT: vmerge.vim v11, v10, 1, v0 +; RISCV-NEXT: vslidedown.vi v11, v11, 1 +; RISCV-NEXT: vmv.x.s a3, v11 +; RISCV-NEXT: andi a3, a3, 1 +; RISCV-NEXT: bnez a3, .LBB4_4 +; RISCV-NEXT: # %bb.3: +; RISCV-NEXT: addi a3, a1, 6 +; RISCV-NEXT: j .LBB4_5 +; RISCV-NEXT: .LBB4_4: +; RISCV-NEXT: addi a3, a0, 24 +; RISCV-NEXT: .LBB4_5: +; RISCV-NEXT: vmv1r.v v0, v9 +; RISCV-NEXT: vmerge.vim v9, v10, 1, v0 +; RISCV-NEXT: vslidedown.vi v9, v9, 1 +; RISCV-NEXT: vmv.x.s a4, v9 +; RISCV-NEXT: andi a4, a4, 1 +; RISCV-NEXT: bnez a4, .LBB4_7 +; RISCV-NEXT: # %bb.6: +; RISCV-NEXT: addi a5, a1, 2 +; RISCV-NEXT: j .LBB4_8 +; RISCV-NEXT: .LBB4_7: +; RISCV-NEXT: addi a5, a0, 8 +; RISCV-NEXT: .LBB4_8: +; RISCV-NEXT: lh a4, 0(a2) +; RISCV-NEXT: lh a2, 0(a3) +; RISCV-NEXT: lh a3, 0(a5) +; RISCV-NEXT: vfirst.m a5, v8 +; RISCV-NEXT: beqz a5, .LBB4_10 +; RISCV-NEXT: # %bb.9: +; RISCV-NEXT: addi a0, a1, 4 +; RISCV-NEXT: j .LBB4_11 +; RISCV-NEXT: .LBB4_10: +; RISCV-NEXT: addi a0, a0, 16 +; RISCV-NEXT: .LBB4_11: +; RISCV-NEXT: lh a0, 0(a0) +; RISCV-NEXT: sh a4, 0(a1) +; RISCV-NEXT: sh a3, 2(a1) +; RISCV-NEXT: sh a0, 4(a1) +; RISCV-NEXT: sh a2, 6(a1) +; RISCV-NEXT: ret + %load = load <4 x half>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x half> %x, <4 x half> %load + store <4 x half> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4f32(<4 x float> %x, ptr %ptr, <4 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v4f32: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RISCV-NEXT: vse32.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <4 x float>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x float> %x, <4 x float> %load + store <4 x float> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4f64(<4 x double> %x, ptr %ptr, <4 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v4f64: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RISCV-NEXT: vse64.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <4 x double>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x double> %x, <4 x double> %load + store <4 x double> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8i8(<8 x i8> %x, ptr %ptr, <8 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v8i8: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RISCV-NEXT: vse8.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <8 x i8>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i8> %x, <8 x i8> %load + store <8 x i8> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8i16(<8 x i16> %x, ptr %ptr, <8 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v8i16: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RISCV-NEXT: vse16.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <8 x i16>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %load + store <8 x i16> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8i32(<8 x i32> %x, ptr %ptr, <8 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v8i32: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RISCV-NEXT: vse32.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <8 x i32>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %load + store <8 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8i64(<8 x i64> %x, ptr %ptr, <8 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v8i64: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RISCV-NEXT: vse64.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <8 x i64>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %load + store <8 x i64> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8f16(<8 x half> %x, ptr %ptr, <8 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v8f16: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RISCV-NEXT: vmv1r.v v8, v0 +; RISCV-NEXT: vfirst.m a3, v0 +; RISCV-NEXT: mv a2, a0 +; RISCV-NEXT: beqz a3, .LBB11_2 +; RISCV-NEXT: # %bb.1: +; RISCV-NEXT: mv a2, a1 +; RISCV-NEXT: .LBB11_2: +; RISCV-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RISCV-NEXT: vmv.v.i v9, 0 +; RISCV-NEXT: vmv1r.v v0, v8 +; RISCV-NEXT: vmerge.vim v9, v9, 1, v0 +; RISCV-NEXT: vsetivli zero, 4, e8, mf2, ta, ma +; RISCV-NEXT: vslidedown.vi v9, v9, 4 +; RISCV-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RISCV-NEXT: vmsne.vi v11, v9, 0 +; RISCV-NEXT: vmv.v.i v10, 0 +; RISCV-NEXT: vmv1r.v v0, v11 +; RISCV-NEXT: vmerge.vim v9, v10, 1, v0 +; RISCV-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RISCV-NEXT: vslidedown.vi v9, v9, 2 +; RISCV-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RISCV-NEXT: vmsne.vi v9, v9, 0 +; RISCV-NEXT: vmv.v.i v12, 0 +; RISCV-NEXT: vmv1r.v v0, v9 +; RISCV-NEXT: vmerge.vim v13, v12, 1, v0 +; RISCV-NEXT: vslidedown.vi v13, v13, 1 +; RISCV-NEXT: vmv.x.s a3, v13 +; RISCV-NEXT: andi a3, a3, 1 +; RISCV-NEXT: bnez a3, .LBB11_4 +; RISCV-NEXT: # %bb.3: +; RISCV-NEXT: addi a3, a1, 14 +; RISCV-NEXT: j .LBB11_5 +; RISCV-NEXT: .LBB11_4: +; RISCV-NEXT: addi a3, a0, 56 +; RISCV-NEXT: .LBB11_5: +; RISCV-NEXT: vmv1r.v v0, v8 +; RISCV-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RISCV-NEXT: vmerge.vim v10, v10, 1, v0 +; RISCV-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RISCV-NEXT: vslidedown.vi v10, v10, 2 +; RISCV-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RISCV-NEXT: vmsne.vi v10, v10, 0 +; RISCV-NEXT: vmv1r.v v0, v10 +; RISCV-NEXT: vmerge.vim v13, v12, 1, v0 +; RISCV-NEXT: vslidedown.vi v13, v13, 1 +; RISCV-NEXT: vmv.x.s a4, v13 +; RISCV-NEXT: andi a4, a4, 1 +; RISCV-NEXT: bnez a4, .LBB11_8 +; RISCV-NEXT: # %bb.6: +; RISCV-NEXT: addi a4, a1, 6 +; RISCV-NEXT: vfirst.m a5, v11 +; RISCV-NEXT: bnez a5, .LBB11_9 +; RISCV-NEXT: .LBB11_7: +; RISCV-NEXT: addi a5, a0, 32 +; RISCV-NEXT: j .LBB11_10 +; RISCV-NEXT: .LBB11_8: +; RISCV-NEXT: addi a4, a0, 24 +; RISCV-NEXT: vfirst.m a5, v11 +; RISCV-NEXT: beqz a5, .LBB11_7 +; RISCV-NEXT: .LBB11_9: +; RISCV-NEXT: addi a5, a1, 8 +; RISCV-NEXT: .LBB11_10: +; RISCV-NEXT: vmv1r.v v0, v11 +; RISCV-NEXT: vmerge.vim v11, v12, 1, v0 +; RISCV-NEXT: vslidedown.vi v11, v11, 1 +; RISCV-NEXT: vmv.x.s a6, v11 +; RISCV-NEXT: andi a6, a6, 1 +; RISCV-NEXT: bnez a6, .LBB11_14 +; RISCV-NEXT: # %bb.11: +; RISCV-NEXT: addi a6, a1, 10 +; RISCV-NEXT: vfirst.m a7, v9 +; RISCV-NEXT: bnez a7, .LBB11_15 +; RISCV-NEXT: .LBB11_12: +; RISCV-NEXT: addi a7, a0, 48 +; RISCV-NEXT: vfirst.m t0, v10 +; RISCV-NEXT: bnez t0, .LBB11_16 +; RISCV-NEXT: .LBB11_13: +; RISCV-NEXT: addi t1, a0, 16 +; RISCV-NEXT: j .LBB11_17 +; RISCV-NEXT: .LBB11_14: +; RISCV-NEXT: addi a6, a0, 40 +; RISCV-NEXT: vfirst.m a7, v9 +; RISCV-NEXT: beqz a7, .LBB11_12 +; RISCV-NEXT: .LBB11_15: +; RISCV-NEXT: addi a7, a1, 12 +; RISCV-NEXT: vfirst.m t0, v10 +; RISCV-NEXT: beqz t0, .LBB11_13 +; RISCV-NEXT: .LBB11_16: +; RISCV-NEXT: addi t1, a1, 4 +; RISCV-NEXT: .LBB11_17: +; RISCV-NEXT: vmv1r.v v0, v8 +; RISCV-NEXT: lh t0, 0(a2) +; RISCV-NEXT: lh a2, 0(a3) +; RISCV-NEXT: lh a3, 0(a4) +; RISCV-NEXT: lh a4, 0(a5) +; RISCV-NEXT: lh a5, 0(a6) +; RISCV-NEXT: lh a6, 0(a7) +; RISCV-NEXT: lh a7, 0(t1) +; RISCV-NEXT: vmerge.vim v8, v12, 1, v0 +; RISCV-NEXT: vslidedown.vi v8, v8, 1 +; RISCV-NEXT: vmv.x.s t1, v8 +; RISCV-NEXT: andi t1, t1, 1 +; RISCV-NEXT: bnez t1, .LBB11_19 +; RISCV-NEXT: # %bb.18: +; RISCV-NEXT: addi a0, a1, 2 +; RISCV-NEXT: j .LBB11_20 +; RISCV-NEXT: .LBB11_19: +; RISCV-NEXT: addi a0, a0, 8 +; RISCV-NEXT: .LBB11_20: +; RISCV-NEXT: lh a0, 0(a0) +; RISCV-NEXT: sh t0, 0(a1) +; RISCV-NEXT: sh a0, 2(a1) +; RISCV-NEXT: sh a7, 4(a1) +; RISCV-NEXT: sh a3, 6(a1) +; RISCV-NEXT: sh a4, 8(a1) +; RISCV-NEXT: sh a5, 10(a1) +; RISCV-NEXT: sh a6, 12(a1) +; RISCV-NEXT: sh a2, 14(a1) +; RISCV-NEXT: ret + %load = load <8 x half>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x half> %x, <8 x half> %load + store <8 x half> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8f32(<8 x float> %x, ptr %ptr, <8 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v8f32: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RISCV-NEXT: vse32.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <8 x float>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x float> %x, <8 x float> %load + store <8 x float> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8f64(<8 x double> %x, ptr %ptr, <8 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v8f64: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RISCV-NEXT: vse64.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <8 x double>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x double> %x, <8 x double> %load + store <8 x double> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v16i8(<16 x i8> %x, ptr %ptr, <16 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v16i8: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RISCV-NEXT: vse8.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <16 x i8>, ptr %ptr, align 32 + %sel = select <16 x i1> %mask, <16 x i8> %x, <16 x i8> %load + store <16 x i8> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v16i16(<16 x i16> %x, ptr %ptr, <16 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v16i16: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; RISCV-NEXT: vse16.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <16 x i16>, ptr %ptr, align 32 + %sel = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %load + store <16 x i16> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v16i32(<16 x i32> %x, ptr %ptr, <16 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v16i32: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RISCV-NEXT: vse32.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <16 x i32>, ptr %ptr, align 32 + %sel = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %load + store <16 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v32i8(<32 x i8> %x, ptr %ptr, <32 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v32i8: +; RISCV: # %bb.0: +; RISCV-NEXT: li a1, 32 +; RISCV-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; RISCV-NEXT: vse8.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <32 x i8>, ptr %ptr, align 32 + %sel = select <32 x i1> %mask, <32 x i8> %x, <32 x i8> %load + store <32 x i8> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v32i16(<32 x i16> %x, ptr %ptr, <32 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v32i16: +; RISCV: # %bb.0: +; RISCV-NEXT: li a1, 32 +; RISCV-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; RISCV-NEXT: vse16.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <32 x i16>, ptr %ptr, align 32 + %sel = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %load + store <32 x i16> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v64i8(<64 x i8> %x, ptr %ptr, <64 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_v64i8: +; RISCV: # %bb.0: +; RISCV-NEXT: li a1, 64 +; RISCV-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; RISCV-NEXT: vse8.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <64 x i8>, ptr %ptr, align 32 + %sel = select <64 x i1> %mask, <64 x i8> %x, <64 x i8> %load + store <64 x i8> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_invert_mask_v4i32(<4 x i32> %x, ptr %ptr, <4 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_invert_mask_v4i32: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RISCV-NEXT: vmnot.m v0, v0 +; RISCV-NEXT: vse32.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <4 x i32>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x i32> %load, <4 x i32> %x + store <4 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_invert_mask_v8i32(<8 x i32> %x, ptr %ptr, <8 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_invert_mask_v8i32: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RISCV-NEXT: vmnot.m v0, v0 +; RISCV-NEXT: vse32.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <8 x i32>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i32> %load, <8 x i32> %x + store <8 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_invert_mask_v16i32(<16 x i32> %x, ptr %ptr, <16 x i1> %mask) { +; RISCV-LABEL: test_masked_store_success_invert_mask_v16i32: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RISCV-NEXT: vmnot.m v0, v0 +; RISCV-NEXT: vse32.v v8, (a0), v0.t +; RISCV-NEXT: ret + %load = load <16 x i32>, ptr %ptr, align 32 + %sel = select <16 x i1> %mask, <16 x i32> %load, <16 x i32> %x + store <16 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_zextload(<4 x i64> %x, ptr %ptr, <4 x i1> %mask) { +; RISCV-LABEL: test_masked_store_zextload: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RISCV-NEXT: vle32.v v12, (a0) +; RISCV-NEXT: vzext.vf2 v10, v12 +; RISCV-NEXT: vmerge.vvm v8, v10, v8, v0 +; RISCV-NEXT: vse64.v v8, (a0) +; RISCV-NEXT: ret + %load = load <4 x i32>, ptr %ptr, align 32 + %zext = zext <4 x i32> %load to <4 x i64> + %masked = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %zext + store <4 x i64> %masked, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_volatile_load(<8 x i32> %x, ptr %ptr, <8 x i1> %mask) { +; RISCV-LABEL: test_masked_store_volatile_load: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RISCV-NEXT: vle32.v v10, (a0) +; RISCV-NEXT: vmerge.vvm v8, v10, v8, v0 +; RISCV-NEXT: vse32.v v8, (a0) +; RISCV-NEXT: ret + %load = load volatile <8 x i32>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %load + store <8 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_volatile_store(<8 x i32> %x, ptr %ptr, <8 x i1> %mask) { +; RISCV-LABEL: test_masked_store_volatile_store: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RISCV-NEXT: vle32.v v10, (a0) +; RISCV-NEXT: vmerge.vvm v8, v10, v8, v0 +; RISCV-NEXT: vse32.v v8, (a0) +; RISCV-NEXT: ret + %load = load <8 x i32>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %load + store volatile <8 x i32> %sel, ptr %ptr, align 32 + ret void +} + +declare void @use_vec(<8 x i32>) + +define void @test_masked_store_intervening(<8 x i32> %x, ptr %ptr, <8 x i1> %mask) nounwind { +; RISCV-LABEL: test_masked_store_intervening: +; RISCV: # %bb.0: +; RISCV-NEXT: addi sp, sp, -32 +; RISCV-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RISCV-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RISCV-NEXT: csrr a1, vlenb +; RISCV-NEXT: slli a2, a1, 2 +; RISCV-NEXT: add a1, a2, a1 +; RISCV-NEXT: sub sp, sp, a1 +; RISCV-NEXT: csrr a1, vlenb +; RISCV-NEXT: slli a1, a1, 2 +; RISCV-NEXT: add a1, sp, a1 +; RISCV-NEXT: addi a1, a1, 16 +; RISCV-NEXT: vs1r.v v0, (a1) # vscale x 8-byte Folded Spill +; RISCV-NEXT: mv s0, a0 +; RISCV-NEXT: csrr a1, vlenb +; RISCV-NEXT: slli a1, a1, 1 +; RISCV-NEXT: add a1, sp, a1 +; RISCV-NEXT: addi a1, a1, 16 +; RISCV-NEXT: vs2r.v v8, (a1) # vscale x 16-byte Folded Spill +; RISCV-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RISCV-NEXT: vle32.v v8, (a0) +; RISCV-NEXT: addi a1, sp, 16 +; RISCV-NEXT: vs2r.v v8, (a1) # vscale x 16-byte Folded Spill +; RISCV-NEXT: vmv.v.i v8, 0 +; RISCV-NEXT: vse32.v v8, (a0) +; RISCV-NEXT: call use_vec +; RISCV-NEXT: csrr a0, vlenb +; RISCV-NEXT: slli a0, a0, 2 +; RISCV-NEXT: add a0, sp, a0 +; RISCV-NEXT: addi a0, a0, 16 +; RISCV-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload +; RISCV-NEXT: csrr a0, vlenb +; RISCV-NEXT: slli a0, a0, 1 +; RISCV-NEXT: add a0, sp, a0 +; RISCV-NEXT: addi a0, a0, 16 +; RISCV-NEXT: vl2r.v v8, (a0) # vscale x 16-byte Folded Reload +; RISCV-NEXT: addi a0, sp, 16 +; RISCV-NEXT: vl2r.v v10, (a0) # vscale x 16-byte Folded Reload +; RISCV-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RISCV-NEXT: vmerge.vvm v8, v10, v8, v0 +; RISCV-NEXT: vse32.v v8, (s0) +; RISCV-NEXT: csrr a0, vlenb +; RISCV-NEXT: slli a1, a0, 2 +; RISCV-NEXT: add a0, a1, a0 +; RISCV-NEXT: add sp, sp, a0 +; RISCV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RISCV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RISCV-NEXT: addi sp, sp, 32 +; RISCV-NEXT: ret + %load = load <8 x i32>, ptr %ptr, align 32 + store <8 x i32> zeroinitializer, ptr %ptr, align 32 + %tmp = load <8 x i32>, ptr %ptr + call void @use_vec(<8 x i32> %tmp) + %sel = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %load + store <8 x i32> %sel, ptr %ptr, align 32 + ret void +} + + +define void @test_masked_store_multiple_v8i32(<8 x i32> %x, <8 x i32> %y, ptr %ptr1, ptr %ptr2, <8 x i1> %mask, <8 x i1> %mask2) { +; RISCV-LABEL: test_masked_store_multiple_v8i32: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RISCV-NEXT: vmv1r.v v13, v0 +; RISCV-NEXT: vle32.v v14, (a1) +; RISCV-NEXT: vmv1r.v v0, v12 +; RISCV-NEXT: vmerge.vvm v10, v14, v10, v0 +; RISCV-NEXT: vmv1r.v v0, v13 +; RISCV-NEXT: vse32.v v8, (a0), v0.t +; RISCV-NEXT: vse32.v v10, (a1) +; RISCV-NEXT: ret + %load = load <8 x i32>, ptr %ptr1, align 32 + %load2 = load <8 x i32>, ptr %ptr2, align 32 + %sel = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %load + %sel2 = select <8 x i1> %mask2, <8 x i32> %y, <8 x i32> %load2 + store <8 x i32> %sel, ptr %ptr1, align 32 + store <8 x i32> %sel2, ptr %ptr2, align 32 + ret void +} + +define void @test_masked_store_multiple_v8i64(<8 x i64> %x, <8 x i64> %y, ptr %ptr1, ptr %ptr2, <8 x i1> %mask, <8 x i1> %mask2) { +; RISCV-LABEL: test_masked_store_multiple_v8i64: +; RISCV: # %bb.0: +; RISCV-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RISCV-NEXT: vmv1r.v v17, v0 +; RISCV-NEXT: vle64.v v20, (a1) +; RISCV-NEXT: vmv1r.v v0, v16 +; RISCV-NEXT: vmerge.vvm v12, v20, v12, v0 +; RISCV-NEXT: vmv1r.v v0, v17 +; RISCV-NEXT: vse64.v v8, (a0), v0.t +; RISCV-NEXT: vse64.v v12, (a1) +; RISCV-NEXT: ret + %load = load <8 x i64>, ptr %ptr1, align 32 + %load2 = load <8 x i64>, ptr %ptr2, align 32 + %sel = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %load + %sel2 = select <8 x i1> %mask2, <8 x i64> %y, <8 x i64> %load2 + store <8 x i64> %sel, ptr %ptr1, align 32 + store <8 x i64> %sel2, ptr %ptr2, align 32 + ret void +} + +define void @test_masked_store_unaligned_v4i32(<4 x i32> %data, ptr %ptr, <4 x i1> %mask) { +; RISCV-LABEL: test_masked_store_unaligned_v4i32: +; RISCV: # %bb.0: +; RISCV-NEXT: addi a0, a0, 1 +; RISCV-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RISCV-NEXT: vle8.v v9, (a0) +; RISCV-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RISCV-NEXT: vmerge.vvm v8, v9, v8, v0 +; RISCV-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RISCV-NEXT: vse8.v v8, (a0) +; RISCV-NEXT: ret + %ptr_i8 = getelementptr i8, ptr %ptr, i32 1 + %ptr_vec = bitcast ptr %ptr_i8 to ptr + %load = load <4 x i32>, ptr %ptr_vec, align 1 + %sel = select <4 x i1> %mask, <4 x i32> %data, <4 x i32> %load + store <4 x i32> %sel, ptr %ptr_vec, align 1 + ret void +} + +define void @test_masked_store_unaligned_v4i64(<4 x i64> %data, ptr %ptr, <4 x i1> %mask) { +; RISCV-LABEL: test_masked_store_unaligned_v4i64: +; RISCV: # %bb.0: +; RISCV-NEXT: addi a0, a0, 1 +; RISCV-NEXT: li a1, 32 +; RISCV-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; RISCV-NEXT: vle8.v v10, (a0) +; RISCV-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RISCV-NEXT: vmerge.vvm v8, v10, v8, v0 +; RISCV-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; RISCV-NEXT: vse8.v v8, (a0) +; RISCV-NEXT: ret + %ptr_i8 = getelementptr i8, ptr %ptr, i64 1 + %ptr_vec = bitcast ptr %ptr_i8 to ptr + %load = load <4 x i64>, ptr %ptr_vec, align 1 + %sel = select <4 x i1> %mask, <4 x i64> %data, <4 x i64> %load + store <4 x i64> %sel, ptr %ptr_vec, align 1 + ret void +} + +define void @test_masked_store_unaligned_v8i32(<8 x i32> %data, ptr %ptr, <8 x i1> %mask) { +; RISCV-LABEL: test_masked_store_unaligned_v8i32: +; RISCV: # %bb.0: +; RISCV-NEXT: addi a0, a0, 1 +; RISCV-NEXT: li a1, 32 +; RISCV-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; RISCV-NEXT: vle8.v v10, (a0) +; RISCV-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RISCV-NEXT: vmerge.vvm v8, v10, v8, v0 +; RISCV-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; RISCV-NEXT: vse8.v v8, (a0) +; RISCV-NEXT: ret + %ptr_i8 = getelementptr i8, ptr %ptr, i32 1 + %ptr_vec = bitcast ptr %ptr_i8 to ptr + %load = load <8 x i32>, ptr %ptr_vec, align 1 + %sel = select <8 x i1> %mask, <8 x i32> %data, <8 x i32> %load + store <8 x i32> %sel, ptr %ptr_vec, align 1 + ret void +} + +define void @test_masked_store_unaligned_v8i64(<8 x i64> %data, ptr %ptr, <8 x i1> %mask) { +; RISCV-LABEL: test_masked_store_unaligned_v8i64: +; RISCV: # %bb.0: +; RISCV-NEXT: addi a0, a0, 1 +; RISCV-NEXT: li a1, 64 +; RISCV-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; RISCV-NEXT: vle8.v v12, (a0) +; RISCV-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RISCV-NEXT: vmerge.vvm v8, v12, v8, v0 +; RISCV-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; RISCV-NEXT: vse8.v v8, (a0) +; RISCV-NEXT: ret + %ptr_i8 = getelementptr i8, ptr %ptr, i64 1 + %ptr_vec = bitcast ptr %ptr_i8 to ptr + %load = load <8 x i64>, ptr %ptr_vec, align 1 + %sel = select <8 x i1> %mask, <8 x i64> %data, <8 x i64> %load + store <8 x i64> %sel, ptr %ptr_vec, align 1 + ret void +} diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll index b94665b..fb53921 100644 --- a/llvm/test/CodeGen/RISCV/features-info.ll +++ b/llvm/test/CodeGen/RISCV/features-info.ll @@ -6,13 +6,21 @@ ; CHECK-NEXT: 32bit - Implements RV32. ; CHECK-NEXT: 64bit - Implements RV64. ; CHECK-NEXT: a - 'A' (Atomic Instructions). +; CHECK-NEXT: add-load-fusion - Enable ADD(.UW) + load macrofusion. +; CHECK-NEXT: addi-load-fusion - Enable ADDI + load macrofusion. ; CHECK-NEXT: andes45 - Andes 45-Series processors. ; CHECK-NEXT: auipc-addi-fusion - Enable AUIPC+ADDI macrofusion. +; CHECK-NEXT: auipc-load-fusion - Enable AUIPC + load macrofusion. ; CHECK-NEXT: b - 'B' (the collection of the Zba, Zbb, Zbs extensions). +; CHECK-NEXT: bfext-fusion - Enable SLLI+SRLI (bitfield extract) macrofusion. ; CHECK-NEXT: c - 'C' (Compressed Instructions). ; CHECK-NEXT: conditional-cmv-fusion - Enable branch+c.mv fusion. ; CHECK-NEXT: d - 'D' (Double-Precision Floating-Point). ; CHECK-NEXT: disable-latency-sched-heuristic - Disable latency scheduling heuristic. +; CHECK-NEXT: disable-misched-load-clustering - Disable load clustering in the machine scheduler. +; CHECK-NEXT: disable-misched-store-clustering - Disable store clustering in the machine scheduler. +; CHECK-NEXT: disable-postmisched-load-clustering - Disable PostRA load clustering in the machine scheduler. +; CHECK-NEXT: disable-postmisched-store-clustering - Disable PostRA store clustering in the machine scheduler. ; CHECK-NEXT: dlen-factor-2 - Vector unit DLEN(data path width) is half of VLEN. ; CHECK-NEXT: e - 'E' (Embedded Instruction Set with 16 GPRs). ; CHECK-NEXT: exact-asm - Enable Exact Assembly (Disables Compression and Relaxation). @@ -58,6 +66,7 @@ ; CHECK-NEXT: ld-add-fusion - Enable LD+ADD macrofusion. ; CHECK-NEXT: log-vrgather - Has vrgather.vv with LMUL*log2(LMUL) latency ; CHECK-NEXT: lui-addi-fusion - Enable LUI+ADDI macro fusion. +; CHECK-NEXT: lui-load-fusion - Enable LUI + load macrofusion. ; CHECK-NEXT: m - 'M' (Integer Multiplication and Division). ; CHECK-NEXT: mips-p8700 - MIPS p8700 processor. ; CHECK-NEXT: no-default-unroll - Disable default unroll preference.. @@ -130,6 +139,7 @@ ; CHECK-NEXT: shvsatpa - 'Shvsatpa' (vsatp supports all modes supported by satp). ; CHECK-NEXT: shvstvala - 'Shvstvala' (vstval provides all needed values). ; CHECK-NEXT: shvstvecd - 'Shvstvecd' (vstvec supports Direct mode). +; CHECK-NEXT: shxadd-load-fusion - Enable SH(1|2|3)ADD(.UW) + load macrofusion. ; CHECK-NEXT: sifive7 - SiFive 7-Series processors. ; CHECK-NEXT: smaia - 'Smaia' (Advanced Interrupt Architecture Machine Level). ; CHECK-NEXT: smcdeleg - 'Smcdeleg' (Counter Delegation Machine Level). diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll index 117e3e4..519f1e8 100644 --- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll +++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll @@ -1110,15 +1110,15 @@ define i64 @stest_f64i64(double %x) { ; RV32IF-NEXT: .LBB18_3: # %entry ; RV32IF-NEXT: slti a6, a4, 0 ; RV32IF-NEXT: .LBB18_4: # %entry -; RV32IF-NEXT: addi a7, a6, -1 -; RV32IF-NEXT: neg t0, a6 +; RV32IF-NEXT: neg a7, a6 +; RV32IF-NEXT: addi t0, a6, -1 ; RV32IF-NEXT: bnez a6, .LBB18_6 ; RV32IF-NEXT: # %bb.5: # %entry ; RV32IF-NEXT: mv a1, a5 ; RV32IF-NEXT: .LBB18_6: # %entry -; RV32IF-NEXT: or a3, a7, a3 -; RV32IF-NEXT: and a4, t0, a4 -; RV32IF-NEXT: and a2, t0, a2 +; RV32IF-NEXT: or a3, t0, a3 +; RV32IF-NEXT: and a4, a7, a4 +; RV32IF-NEXT: and a2, a7, a2 ; RV32IF-NEXT: beq a1, a0, .LBB18_8 ; RV32IF-NEXT: # %bb.7: # %entry ; RV32IF-NEXT: sltu a0, a0, a1 @@ -1213,15 +1213,15 @@ define i64 @stest_f64i64(double %x) { ; RV32IFD-NEXT: .LBB18_3: # %entry ; RV32IFD-NEXT: slti a6, a4, 0 ; RV32IFD-NEXT: .LBB18_4: # %entry -; RV32IFD-NEXT: addi a7, a6, -1 -; RV32IFD-NEXT: neg t0, a6 +; RV32IFD-NEXT: neg a7, a6 +; RV32IFD-NEXT: addi t0, a6, -1 ; RV32IFD-NEXT: bnez a6, .LBB18_6 ; RV32IFD-NEXT: # %bb.5: # %entry ; RV32IFD-NEXT: mv a1, a5 ; RV32IFD-NEXT: .LBB18_6: # %entry -; RV32IFD-NEXT: or a3, a7, a3 -; RV32IFD-NEXT: and a4, t0, a4 -; RV32IFD-NEXT: and a2, t0, a2 +; RV32IFD-NEXT: or a3, t0, a3 +; RV32IFD-NEXT: and a4, a7, a4 +; RV32IFD-NEXT: and a2, a7, a2 ; RV32IFD-NEXT: beq a1, a0, .LBB18_8 ; RV32IFD-NEXT: # %bb.7: # %entry ; RV32IFD-NEXT: sltu a0, a0, a1 @@ -1378,8 +1378,8 @@ define i64 @ustest_f64i64(double %x) { ; RV32IF-NEXT: # %bb.4: # %entry ; RV32IF-NEXT: li a0, 1 ; RV32IF-NEXT: .LBB20_5: # %entry -; RV32IF-NEXT: lw a3, 8(sp) -; RV32IF-NEXT: lw a4, 12(sp) +; RV32IF-NEXT: lw a4, 8(sp) +; RV32IF-NEXT: lw a3, 12(sp) ; RV32IF-NEXT: and a5, a2, a1 ; RV32IF-NEXT: beqz a5, .LBB20_7 ; RV32IF-NEXT: # %bb.6: # %entry @@ -1393,12 +1393,12 @@ define i64 @ustest_f64i64(double %x) { ; RV32IF-NEXT: and a2, a2, a3 ; RV32IF-NEXT: bnez a0, .LBB20_10 ; RV32IF-NEXT: # %bb.9: -; RV32IF-NEXT: or a0, a2, a4 +; RV32IF-NEXT: or a0, a4, a2 ; RV32IF-NEXT: snez a1, a0 ; RV32IF-NEXT: .LBB20_10: # %entry ; RV32IF-NEXT: neg a1, a1 -; RV32IF-NEXT: and a0, a1, a2 -; RV32IF-NEXT: and a1, a1, a4 +; RV32IF-NEXT: and a0, a1, a4 +; RV32IF-NEXT: and a1, a1, a2 ; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IF-NEXT: .cfi_restore ra ; RV32IF-NEXT: addi sp, sp, 32 @@ -1461,8 +1461,8 @@ define i64 @ustest_f64i64(double %x) { ; RV32IFD-NEXT: # %bb.4: # %entry ; RV32IFD-NEXT: li a0, 1 ; RV32IFD-NEXT: .LBB20_5: # %entry -; RV32IFD-NEXT: lw a3, 8(sp) -; RV32IFD-NEXT: lw a4, 12(sp) +; RV32IFD-NEXT: lw a4, 8(sp) +; RV32IFD-NEXT: lw a3, 12(sp) ; RV32IFD-NEXT: and a5, a2, a1 ; RV32IFD-NEXT: beqz a5, .LBB20_7 ; RV32IFD-NEXT: # %bb.6: # %entry @@ -1476,12 +1476,12 @@ define i64 @ustest_f64i64(double %x) { ; RV32IFD-NEXT: and a2, a2, a3 ; RV32IFD-NEXT: bnez a0, .LBB20_10 ; RV32IFD-NEXT: # %bb.9: -; RV32IFD-NEXT: or a0, a2, a4 +; RV32IFD-NEXT: or a0, a4, a2 ; RV32IFD-NEXT: snez a1, a0 ; RV32IFD-NEXT: .LBB20_10: # %entry ; RV32IFD-NEXT: neg a1, a1 -; RV32IFD-NEXT: and a0, a1, a2 -; RV32IFD-NEXT: and a1, a1, a4 +; RV32IFD-NEXT: and a0, a1, a4 +; RV32IFD-NEXT: and a1, a1, a2 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: .cfi_restore ra ; RV32IFD-NEXT: addi sp, sp, 32 @@ -1525,15 +1525,15 @@ define i64 @stest_f32i64(float %x) { ; RV32-NEXT: .LBB21_3: # %entry ; RV32-NEXT: slti a6, a4, 0 ; RV32-NEXT: .LBB21_4: # %entry -; RV32-NEXT: addi a7, a6, -1 -; RV32-NEXT: neg t0, a6 +; RV32-NEXT: neg a7, a6 +; RV32-NEXT: addi t0, a6, -1 ; RV32-NEXT: bnez a6, .LBB21_6 ; RV32-NEXT: # %bb.5: # %entry ; RV32-NEXT: mv a1, a5 ; RV32-NEXT: .LBB21_6: # %entry -; RV32-NEXT: or a3, a7, a3 -; RV32-NEXT: and a4, t0, a4 -; RV32-NEXT: and a2, t0, a2 +; RV32-NEXT: or a3, t0, a3 +; RV32-NEXT: and a4, a7, a4 +; RV32-NEXT: and a2, a7, a2 ; RV32-NEXT: beq a1, a0, .LBB21_8 ; RV32-NEXT: # %bb.7: # %entry ; RV32-NEXT: sltu a0, a0, a1 @@ -1658,8 +1658,8 @@ define i64 @ustest_f32i64(float %x) { ; RV32-NEXT: # %bb.4: # %entry ; RV32-NEXT: li a0, 1 ; RV32-NEXT: .LBB23_5: # %entry -; RV32-NEXT: lw a3, 8(sp) -; RV32-NEXT: lw a4, 12(sp) +; RV32-NEXT: lw a4, 8(sp) +; RV32-NEXT: lw a3, 12(sp) ; RV32-NEXT: and a5, a2, a1 ; RV32-NEXT: beqz a5, .LBB23_7 ; RV32-NEXT: # %bb.6: # %entry @@ -1673,12 +1673,12 @@ define i64 @ustest_f32i64(float %x) { ; RV32-NEXT: and a2, a2, a3 ; RV32-NEXT: bnez a0, .LBB23_10 ; RV32-NEXT: # %bb.9: -; RV32-NEXT: or a0, a2, a4 +; RV32-NEXT: or a0, a4, a2 ; RV32-NEXT: snez a1, a0 ; RV32-NEXT: .LBB23_10: # %entry ; RV32-NEXT: neg a1, a1 -; RV32-NEXT: and a0, a1, a2 -; RV32-NEXT: and a1, a1, a4 +; RV32-NEXT: and a0, a1, a4 +; RV32-NEXT: and a1, a1, a2 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 32 @@ -1752,15 +1752,15 @@ define i64 @stest_f16i64(half %x) { ; RV32-NEXT: .LBB24_3: # %entry ; RV32-NEXT: slti a6, a4, 0 ; RV32-NEXT: .LBB24_4: # %entry -; RV32-NEXT: addi a7, a6, -1 -; RV32-NEXT: neg t0, a6 +; RV32-NEXT: neg a7, a6 +; RV32-NEXT: addi t0, a6, -1 ; RV32-NEXT: bnez a6, .LBB24_6 ; RV32-NEXT: # %bb.5: # %entry ; RV32-NEXT: mv a1, a5 ; RV32-NEXT: .LBB24_6: # %entry -; RV32-NEXT: or a3, a7, a3 -; RV32-NEXT: and a4, t0, a4 -; RV32-NEXT: and a2, t0, a2 +; RV32-NEXT: or a3, t0, a3 +; RV32-NEXT: and a4, a7, a4 +; RV32-NEXT: and a2, a7, a2 ; RV32-NEXT: beq a1, a0, .LBB24_8 ; RV32-NEXT: # %bb.7: # %entry ; RV32-NEXT: sltu a0, a0, a1 @@ -1921,8 +1921,8 @@ define i64 @ustest_f16i64(half %x) { ; RV32-NEXT: # %bb.4: # %entry ; RV32-NEXT: li a0, 1 ; RV32-NEXT: .LBB26_5: # %entry -; RV32-NEXT: lw a3, 8(sp) -; RV32-NEXT: lw a4, 12(sp) +; RV32-NEXT: lw a4, 8(sp) +; RV32-NEXT: lw a3, 12(sp) ; RV32-NEXT: and a5, a2, a1 ; RV32-NEXT: beqz a5, .LBB26_7 ; RV32-NEXT: # %bb.6: # %entry @@ -1936,12 +1936,12 @@ define i64 @ustest_f16i64(half %x) { ; RV32-NEXT: and a2, a2, a3 ; RV32-NEXT: bnez a0, .LBB26_10 ; RV32-NEXT: # %bb.9: -; RV32-NEXT: or a0, a2, a4 +; RV32-NEXT: or a0, a4, a2 ; RV32-NEXT: snez a1, a0 ; RV32-NEXT: .LBB26_10: # %entry ; RV32-NEXT: neg a1, a1 -; RV32-NEXT: and a0, a1, a2 -; RV32-NEXT: and a1, a1, a4 +; RV32-NEXT: and a0, a1, a4 +; RV32-NEXT: and a1, a1, a2 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 32 @@ -3046,15 +3046,15 @@ define i64 @stest_f64i64_mm(double %x) { ; RV32IF-NEXT: .LBB45_3: # %entry ; RV32IF-NEXT: slti a6, a4, 0 ; RV32IF-NEXT: .LBB45_4: # %entry -; RV32IF-NEXT: addi a7, a6, -1 -; RV32IF-NEXT: neg t0, a6 +; RV32IF-NEXT: neg a7, a6 +; RV32IF-NEXT: addi t0, a6, -1 ; RV32IF-NEXT: bnez a6, .LBB45_6 ; RV32IF-NEXT: # %bb.5: # %entry ; RV32IF-NEXT: mv a1, a5 ; RV32IF-NEXT: .LBB45_6: # %entry -; RV32IF-NEXT: or a3, a7, a3 -; RV32IF-NEXT: and a4, t0, a4 -; RV32IF-NEXT: and a2, t0, a2 +; RV32IF-NEXT: or a3, t0, a3 +; RV32IF-NEXT: and a4, a7, a4 +; RV32IF-NEXT: and a2, a7, a2 ; RV32IF-NEXT: beq a1, a0, .LBB45_8 ; RV32IF-NEXT: # %bb.7: # %entry ; RV32IF-NEXT: sltu a0, a0, a1 @@ -3149,15 +3149,15 @@ define i64 @stest_f64i64_mm(double %x) { ; RV32IFD-NEXT: .LBB45_3: # %entry ; RV32IFD-NEXT: slti a6, a4, 0 ; RV32IFD-NEXT: .LBB45_4: # %entry -; RV32IFD-NEXT: addi a7, a6, -1 -; RV32IFD-NEXT: neg t0, a6 +; RV32IFD-NEXT: neg a7, a6 +; RV32IFD-NEXT: addi t0, a6, -1 ; RV32IFD-NEXT: bnez a6, .LBB45_6 ; RV32IFD-NEXT: # %bb.5: # %entry ; RV32IFD-NEXT: mv a1, a5 ; RV32IFD-NEXT: .LBB45_6: # %entry -; RV32IFD-NEXT: or a3, a7, a3 -; RV32IFD-NEXT: and a4, t0, a4 -; RV32IFD-NEXT: and a2, t0, a2 +; RV32IFD-NEXT: or a3, t0, a3 +; RV32IFD-NEXT: and a4, a7, a4 +; RV32IFD-NEXT: and a2, a7, a2 ; RV32IFD-NEXT: beq a1, a0, .LBB45_8 ; RV32IFD-NEXT: # %bb.7: # %entry ; RV32IFD-NEXT: sltu a0, a0, a1 @@ -3292,30 +3292,30 @@ define i64 @ustest_f64i64_mm(double %x) { ; RV32IF-NEXT: mv a1, a0 ; RV32IF-NEXT: addi a0, sp, 8 ; RV32IF-NEXT: call __fixdfti -; RV32IF-NEXT: lw a0, 20(sp) -; RV32IF-NEXT: lw a1, 8(sp) -; RV32IF-NEXT: lw a2, 12(sp) +; RV32IF-NEXT: lw a0, 8(sp) +; RV32IF-NEXT: lw a1, 12(sp) +; RV32IF-NEXT: lw a2, 20(sp) ; RV32IF-NEXT: lw a3, 16(sp) -; RV32IF-NEXT: beqz a0, .LBB47_2 +; RV32IF-NEXT: beqz a2, .LBB47_2 ; RV32IF-NEXT: # %bb.1: # %entry -; RV32IF-NEXT: slti a4, a0, 0 +; RV32IF-NEXT: slti a4, a2, 0 ; RV32IF-NEXT: j .LBB47_3 ; RV32IF-NEXT: .LBB47_2: ; RV32IF-NEXT: seqz a4, a3 ; RV32IF-NEXT: .LBB47_3: # %entry ; RV32IF-NEXT: xori a3, a3, 1 -; RV32IF-NEXT: or a3, a3, a0 +; RV32IF-NEXT: or a3, a3, a2 ; RV32IF-NEXT: seqz a3, a3 ; RV32IF-NEXT: addi a3, a3, -1 ; RV32IF-NEXT: and a3, a3, a4 ; RV32IF-NEXT: neg a3, a3 -; RV32IF-NEXT: and a2, a3, a2 ; RV32IF-NEXT: and a1, a3, a1 ; RV32IF-NEXT: and a0, a3, a0 -; RV32IF-NEXT: slti a0, a0, 0 -; RV32IF-NEXT: addi a3, a0, -1 -; RV32IF-NEXT: and a0, a3, a1 -; RV32IF-NEXT: and a1, a3, a2 +; RV32IF-NEXT: and a2, a3, a2 +; RV32IF-NEXT: slti a2, a2, 0 +; RV32IF-NEXT: addi a2, a2, -1 +; RV32IF-NEXT: and a0, a2, a0 +; RV32IF-NEXT: and a1, a2, a1 ; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IF-NEXT: .cfi_restore ra ; RV32IF-NEXT: addi sp, sp, 32 @@ -3354,30 +3354,30 @@ define i64 @ustest_f64i64_mm(double %x) { ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: addi a0, sp, 8 ; RV32IFD-NEXT: call __fixdfti -; RV32IFD-NEXT: lw a0, 20(sp) -; RV32IFD-NEXT: lw a1, 8(sp) -; RV32IFD-NEXT: lw a2, 12(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: lw a2, 20(sp) ; RV32IFD-NEXT: lw a3, 16(sp) -; RV32IFD-NEXT: beqz a0, .LBB47_2 +; RV32IFD-NEXT: beqz a2, .LBB47_2 ; RV32IFD-NEXT: # %bb.1: # %entry -; RV32IFD-NEXT: slti a4, a0, 0 +; RV32IFD-NEXT: slti a4, a2, 0 ; RV32IFD-NEXT: j .LBB47_3 ; RV32IFD-NEXT: .LBB47_2: ; RV32IFD-NEXT: seqz a4, a3 ; RV32IFD-NEXT: .LBB47_3: # %entry ; RV32IFD-NEXT: xori a3, a3, 1 -; RV32IFD-NEXT: or a3, a3, a0 +; RV32IFD-NEXT: or a3, a3, a2 ; RV32IFD-NEXT: seqz a3, a3 ; RV32IFD-NEXT: addi a3, a3, -1 ; RV32IFD-NEXT: and a3, a3, a4 ; RV32IFD-NEXT: neg a3, a3 -; RV32IFD-NEXT: and a2, a3, a2 ; RV32IFD-NEXT: and a1, a3, a1 ; RV32IFD-NEXT: and a0, a3, a0 -; RV32IFD-NEXT: slti a0, a0, 0 -; RV32IFD-NEXT: addi a3, a0, -1 -; RV32IFD-NEXT: and a0, a3, a1 -; RV32IFD-NEXT: and a1, a3, a2 +; RV32IFD-NEXT: and a2, a3, a2 +; RV32IFD-NEXT: slti a2, a2, 0 +; RV32IFD-NEXT: addi a2, a2, -1 +; RV32IFD-NEXT: and a0, a2, a0 +; RV32IFD-NEXT: and a1, a2, a1 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: .cfi_restore ra ; RV32IFD-NEXT: addi sp, sp, 32 @@ -3419,15 +3419,15 @@ define i64 @stest_f32i64_mm(float %x) { ; RV32-NEXT: .LBB48_3: # %entry ; RV32-NEXT: slti a6, a4, 0 ; RV32-NEXT: .LBB48_4: # %entry -; RV32-NEXT: addi a7, a6, -1 -; RV32-NEXT: neg t0, a6 +; RV32-NEXT: neg a7, a6 +; RV32-NEXT: addi t0, a6, -1 ; RV32-NEXT: bnez a6, .LBB48_6 ; RV32-NEXT: # %bb.5: # %entry ; RV32-NEXT: mv a1, a5 ; RV32-NEXT: .LBB48_6: # %entry -; RV32-NEXT: or a3, a7, a3 -; RV32-NEXT: and a4, t0, a4 -; RV32-NEXT: and a2, t0, a2 +; RV32-NEXT: or a3, t0, a3 +; RV32-NEXT: and a4, a7, a4 +; RV32-NEXT: and a2, a7, a2 ; RV32-NEXT: beq a1, a0, .LBB48_8 ; RV32-NEXT: # %bb.7: # %entry ; RV32-NEXT: sltu a0, a0, a1 @@ -3530,30 +3530,30 @@ define i64 @ustest_f32i64_mm(float %x) { ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: call __fixsfti -; RV32-NEXT: lw a0, 20(sp) -; RV32-NEXT: lw a1, 8(sp) -; RV32-NEXT: lw a2, 12(sp) +; RV32-NEXT: lw a0, 8(sp) +; RV32-NEXT: lw a1, 12(sp) +; RV32-NEXT: lw a2, 20(sp) ; RV32-NEXT: lw a3, 16(sp) -; RV32-NEXT: beqz a0, .LBB50_2 +; RV32-NEXT: beqz a2, .LBB50_2 ; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: slti a4, a0, 0 +; RV32-NEXT: slti a4, a2, 0 ; RV32-NEXT: j .LBB50_3 ; RV32-NEXT: .LBB50_2: ; RV32-NEXT: seqz a4, a3 ; RV32-NEXT: .LBB50_3: # %entry ; RV32-NEXT: xori a3, a3, 1 -; RV32-NEXT: or a3, a3, a0 +; RV32-NEXT: or a3, a3, a2 ; RV32-NEXT: seqz a3, a3 ; RV32-NEXT: addi a3, a3, -1 ; RV32-NEXT: and a3, a3, a4 ; RV32-NEXT: neg a3, a3 -; RV32-NEXT: and a2, a3, a2 ; RV32-NEXT: and a1, a3, a1 ; RV32-NEXT: and a0, a3, a0 -; RV32-NEXT: slti a0, a0, 0 -; RV32-NEXT: addi a3, a0, -1 -; RV32-NEXT: and a0, a3, a1 -; RV32-NEXT: and a1, a3, a2 +; RV32-NEXT: and a2, a3, a2 +; RV32-NEXT: slti a2, a2, 0 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: and a0, a2, a0 +; RV32-NEXT: and a1, a2, a1 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 32 @@ -3620,15 +3620,15 @@ define i64 @stest_f16i64_mm(half %x) { ; RV32-NEXT: .LBB51_3: # %entry ; RV32-NEXT: slti a6, a4, 0 ; RV32-NEXT: .LBB51_4: # %entry -; RV32-NEXT: addi a7, a6, -1 -; RV32-NEXT: neg t0, a6 +; RV32-NEXT: neg a7, a6 +; RV32-NEXT: addi t0, a6, -1 ; RV32-NEXT: bnez a6, .LBB51_6 ; RV32-NEXT: # %bb.5: # %entry ; RV32-NEXT: mv a1, a5 ; RV32-NEXT: .LBB51_6: # %entry -; RV32-NEXT: or a3, a7, a3 -; RV32-NEXT: and a4, t0, a4 -; RV32-NEXT: and a2, t0, a2 +; RV32-NEXT: or a3, t0, a3 +; RV32-NEXT: and a4, a7, a4 +; RV32-NEXT: and a2, a7, a2 ; RV32-NEXT: beq a1, a0, .LBB51_8 ; RV32-NEXT: # %bb.7: # %entry ; RV32-NEXT: sltu a0, a0, a1 @@ -3767,30 +3767,30 @@ define i64 @ustest_f16i64_mm(half %x) { ; RV32-NEXT: call __extendhfsf2 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: call __fixsfti -; RV32-NEXT: lw a0, 20(sp) -; RV32-NEXT: lw a1, 8(sp) -; RV32-NEXT: lw a2, 12(sp) +; RV32-NEXT: lw a0, 8(sp) +; RV32-NEXT: lw a1, 12(sp) +; RV32-NEXT: lw a2, 20(sp) ; RV32-NEXT: lw a3, 16(sp) -; RV32-NEXT: beqz a0, .LBB53_2 +; RV32-NEXT: beqz a2, .LBB53_2 ; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: slti a4, a0, 0 +; RV32-NEXT: slti a4, a2, 0 ; RV32-NEXT: j .LBB53_3 ; RV32-NEXT: .LBB53_2: ; RV32-NEXT: seqz a4, a3 ; RV32-NEXT: .LBB53_3: # %entry ; RV32-NEXT: xori a3, a3, 1 -; RV32-NEXT: or a3, a3, a0 +; RV32-NEXT: or a3, a3, a2 ; RV32-NEXT: seqz a3, a3 ; RV32-NEXT: addi a3, a3, -1 ; RV32-NEXT: and a3, a3, a4 ; RV32-NEXT: neg a3, a3 -; RV32-NEXT: and a2, a3, a2 ; RV32-NEXT: and a1, a3, a1 ; RV32-NEXT: and a0, a3, a0 -; RV32-NEXT: slti a0, a0, 0 -; RV32-NEXT: addi a3, a0, -1 -; RV32-NEXT: and a0, a3, a1 -; RV32-NEXT: and a1, a3, a2 +; RV32-NEXT: and a2, a3, a2 +; RV32-NEXT: slti a2, a2, 0 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: and a0, a2, a0 +; RV32-NEXT: and a1, a2, a1 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 32 diff --git a/llvm/test/CodeGen/RISCV/iabs.ll b/llvm/test/CodeGen/RISCV/iabs.ll index 774f1a1..c157c63 100644 --- a/llvm/test/CodeGen/RISCV/iabs.ll +++ b/llvm/test/CodeGen/RISCV/iabs.ll @@ -301,58 +301,58 @@ define i64 @select_abs64(i64 %x) { define i128 @abs128(i128 %x) { ; RV32I-LABEL: abs128: ; RV32I: # %bb.0: -; RV32I-NEXT: lw a3, 12(a1) -; RV32I-NEXT: lw a2, 0(a1) +; RV32I-NEXT: lw a2, 12(a1) +; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: lw a4, 4(a1) ; RV32I-NEXT: lw a1, 8(a1) -; RV32I-NEXT: bgez a3, .LBB8_2 +; RV32I-NEXT: bgez a2, .LBB8_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: neg a5, a1 ; RV32I-NEXT: snez a6, a4 -; RV32I-NEXT: snez a7, a2 +; RV32I-NEXT: snez a7, a3 ; RV32I-NEXT: snez a1, a1 ; RV32I-NEXT: neg a4, a4 ; RV32I-NEXT: or a6, a7, a6 -; RV32I-NEXT: add a1, a3, a1 +; RV32I-NEXT: add a1, a2, a1 ; RV32I-NEXT: sub a4, a4, a7 -; RV32I-NEXT: sltu a3, a5, a6 +; RV32I-NEXT: sltu a2, a5, a6 ; RV32I-NEXT: neg a7, a1 ; RV32I-NEXT: sub a1, a5, a6 -; RV32I-NEXT: sub a3, a7, a3 -; RV32I-NEXT: neg a2, a2 +; RV32I-NEXT: sub a2, a7, a2 +; RV32I-NEXT: neg a3, a3 ; RV32I-NEXT: .LBB8_2: -; RV32I-NEXT: sw a2, 0(a0) +; RV32I-NEXT: sw a3, 0(a0) ; RV32I-NEXT: sw a4, 4(a0) ; RV32I-NEXT: sw a1, 8(a0) -; RV32I-NEXT: sw a3, 12(a0) +; RV32I-NEXT: sw a2, 12(a0) ; RV32I-NEXT: ret ; ; RV32ZBB-LABEL: abs128: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: lw a3, 12(a1) -; RV32ZBB-NEXT: lw a2, 0(a1) +; RV32ZBB-NEXT: lw a2, 12(a1) +; RV32ZBB-NEXT: lw a3, 0(a1) ; RV32ZBB-NEXT: lw a4, 4(a1) ; RV32ZBB-NEXT: lw a1, 8(a1) -; RV32ZBB-NEXT: bgez a3, .LBB8_2 +; RV32ZBB-NEXT: bgez a2, .LBB8_2 ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: neg a5, a1 ; RV32ZBB-NEXT: snez a6, a4 -; RV32ZBB-NEXT: snez a7, a2 +; RV32ZBB-NEXT: snez a7, a3 ; RV32ZBB-NEXT: snez a1, a1 ; RV32ZBB-NEXT: neg a4, a4 ; RV32ZBB-NEXT: or a6, a7, a6 -; RV32ZBB-NEXT: add a1, a3, a1 +; RV32ZBB-NEXT: add a1, a2, a1 ; RV32ZBB-NEXT: sub a4, a4, a7 -; RV32ZBB-NEXT: sltu a3, a5, a6 +; RV32ZBB-NEXT: sltu a2, a5, a6 ; RV32ZBB-NEXT: neg a7, a1 ; RV32ZBB-NEXT: sub a1, a5, a6 -; RV32ZBB-NEXT: sub a3, a7, a3 -; RV32ZBB-NEXT: neg a2, a2 +; RV32ZBB-NEXT: sub a2, a7, a2 +; RV32ZBB-NEXT: neg a3, a3 ; RV32ZBB-NEXT: .LBB8_2: -; RV32ZBB-NEXT: sw a2, 0(a0) +; RV32ZBB-NEXT: sw a3, 0(a0) ; RV32ZBB-NEXT: sw a4, 4(a0) ; RV32ZBB-NEXT: sw a1, 8(a0) -; RV32ZBB-NEXT: sw a3, 12(a0) +; RV32ZBB-NEXT: sw a2, 12(a0) ; RV32ZBB-NEXT: ret ; ; RV64I-LABEL: abs128: @@ -383,58 +383,58 @@ define i128 @abs128(i128 %x) { define i128 @select_abs128(i128 %x) { ; RV32I-LABEL: select_abs128: ; RV32I: # %bb.0: -; RV32I-NEXT: lw a3, 12(a1) -; RV32I-NEXT: lw a2, 0(a1) +; RV32I-NEXT: lw a2, 12(a1) +; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: lw a4, 4(a1) ; RV32I-NEXT: lw a1, 8(a1) -; RV32I-NEXT: bgez a3, .LBB9_2 +; RV32I-NEXT: bgez a2, .LBB9_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: neg a5, a1 ; RV32I-NEXT: snez a6, a4 -; RV32I-NEXT: snez a7, a2 +; RV32I-NEXT: snez a7, a3 ; RV32I-NEXT: snez a1, a1 ; RV32I-NEXT: neg a4, a4 ; RV32I-NEXT: or a6, a7, a6 -; RV32I-NEXT: add a1, a3, a1 +; RV32I-NEXT: add a1, a2, a1 ; RV32I-NEXT: sub a4, a4, a7 -; RV32I-NEXT: sltu a3, a5, a6 +; RV32I-NEXT: sltu a2, a5, a6 ; RV32I-NEXT: neg a7, a1 ; RV32I-NEXT: sub a1, a5, a6 -; RV32I-NEXT: sub a3, a7, a3 -; RV32I-NEXT: neg a2, a2 +; RV32I-NEXT: sub a2, a7, a2 +; RV32I-NEXT: neg a3, a3 ; RV32I-NEXT: .LBB9_2: -; RV32I-NEXT: sw a2, 0(a0) +; RV32I-NEXT: sw a3, 0(a0) ; RV32I-NEXT: sw a4, 4(a0) ; RV32I-NEXT: sw a1, 8(a0) -; RV32I-NEXT: sw a3, 12(a0) +; RV32I-NEXT: sw a2, 12(a0) ; RV32I-NEXT: ret ; ; RV32ZBB-LABEL: select_abs128: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: lw a3, 12(a1) -; RV32ZBB-NEXT: lw a2, 0(a1) +; RV32ZBB-NEXT: lw a2, 12(a1) +; RV32ZBB-NEXT: lw a3, 0(a1) ; RV32ZBB-NEXT: lw a4, 4(a1) ; RV32ZBB-NEXT: lw a1, 8(a1) -; RV32ZBB-NEXT: bgez a3, .LBB9_2 +; RV32ZBB-NEXT: bgez a2, .LBB9_2 ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: neg a5, a1 ; RV32ZBB-NEXT: snez a6, a4 -; RV32ZBB-NEXT: snez a7, a2 +; RV32ZBB-NEXT: snez a7, a3 ; RV32ZBB-NEXT: snez a1, a1 ; RV32ZBB-NEXT: neg a4, a4 ; RV32ZBB-NEXT: or a6, a7, a6 -; RV32ZBB-NEXT: add a1, a3, a1 +; RV32ZBB-NEXT: add a1, a2, a1 ; RV32ZBB-NEXT: sub a4, a4, a7 -; RV32ZBB-NEXT: sltu a3, a5, a6 +; RV32ZBB-NEXT: sltu a2, a5, a6 ; RV32ZBB-NEXT: neg a7, a1 ; RV32ZBB-NEXT: sub a1, a5, a6 -; RV32ZBB-NEXT: sub a3, a7, a3 -; RV32ZBB-NEXT: neg a2, a2 +; RV32ZBB-NEXT: sub a2, a7, a2 +; RV32ZBB-NEXT: neg a3, a3 ; RV32ZBB-NEXT: .LBB9_2: -; RV32ZBB-NEXT: sw a2, 0(a0) +; RV32ZBB-NEXT: sw a3, 0(a0) ; RV32ZBB-NEXT: sw a4, 4(a0) ; RV32ZBB-NEXT: sw a1, 8(a0) -; RV32ZBB-NEXT: sw a3, 12(a0) +; RV32ZBB-NEXT: sw a2, 12(a0) ; RV32ZBB-NEXT: ret ; ; RV64I-LABEL: select_abs128: diff --git a/llvm/test/CodeGen/RISCV/macro-fusions.mir b/llvm/test/CodeGen/RISCV/macro-fusions.mir index 1346414..ae5b52d 100644 --- a/llvm/test/CodeGen/RISCV/macro-fusions.mir +++ b/llvm/test/CodeGen/RISCV/macro-fusions.mir @@ -2,7 +2,12 @@ # RUN: llc -mtriple=riscv64-linux-gnu -x=mir < %s \ # RUN: -debug-only=machine-scheduler -start-before=machine-scheduler 2>&1 \ # RUN: -mattr=+lui-addi-fusion,+auipc-addi-fusion,+zexth-fusion,+zextw-fusion,+shifted-zextw-fusion,+ld-add-fusion \ +# RUN: -mattr=+add-load-fusion,+auipc-load-fusion,+lui-load-fusion,+addi-load-fusion \ +# RUN: -mattr=+zba,+shxadd-load-fusion \ # RUN: | FileCheck %s +# RUN: llc -mtriple=riscv64-linux-gnu -x=mir < %s \ +# RUN: -debug-only=machine-scheduler -start-before=machine-scheduler 2>&1 \ +# RUN: -mattr=+zba,+bfext-fusion | FileCheck --check-prefixes=CHECK-BFEXT %s # CHECK: lui_addi:%bb.0 # CHECK: Macro fuse: {{.*}}LUI - ADDI @@ -174,3 +179,1374 @@ body: | $x11 = COPY %5 PseudoRET ... + +# CHECK: add_lb +# CHECK: Macro fuse: {{.*}}ADD - LB +--- +name: add_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: add_lh +# CHECK: Macro fuse: {{.*}}ADD - LH +--- +name: add_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: add_lw +# CHECK: Macro fuse: {{.*}}ADD - LW +--- +name: add_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: add_lbu +# CHECK: Macro fuse: {{.*}}ADD - LBU +--- +name: add_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: add_lhu +# CHECK: Macro fuse: {{.*}}ADD - LHU +--- +name: add_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: add_lwu +# CHECK: Macro fuse: {{.*}}ADD - LWU +--- +name: add_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: auipc_lb +# CHECK: Macro fuse: {{.*}}AUIPC - LB +--- +name: auipc_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LB %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_lh +# CHECK: Macro fuse: {{.*}}AUIPC - LH +--- +name: auipc_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LH %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_lw +# CHECK: Macro fuse: {{.*}}AUIPC - LW +--- +name: auipc_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LW %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_ld +# CHECK: Macro fuse: {{.*}}AUIPC - LD +--- +name: auipc_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LD %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_lbu +# CHECK: Macro fuse: {{.*}}AUIPC - LBU +--- +name: auipc_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LBU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_lhu +# CHECK: Macro fuse: {{.*}}AUIPC - LHU +--- +name: auipc_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LHU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_lwu +# CHECK: Macro fuse: {{.*}}AUIPC - LWU +--- +name: auipc_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LWU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lb +# CHECK: Macro fuse: {{.*}}LUI - LB +--- +name: lui_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LB %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lh +# CHECK: Macro fuse: {{.*}}LUI - LH +--- +name: lui_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LH %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lw +# CHECK: Macro fuse: {{.*}}LUI - LW +--- +name: lui_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LW %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_ld +# CHECK: Macro fuse: {{.*}}LUI - LD +--- +name: lui_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LD %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lbu +# CHECK: Macro fuse: {{.*}}LUI - LBU +--- +name: lui_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LBU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lhu +# CHECK: Macro fuse: {{.*}}LUI - LHU +--- +name: lui_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LHU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lwu +# CHECK: Macro fuse: {{.*}}LUI - LWU +--- +name: lui_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LWU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK-BFEXT: bitfield_extract +# CHECK-BFEXT: Macro fuse: {{.*}}SLLI - SRLI +--- +name: bitfield_extract +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = SLLI %1, 31 + %3:gpr = XORI %1, 3 + %4:gpr = SRLI %2, 48 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: addi_lb +# CHECK: Macro fuse: {{.*}}ADDI - LB +--- +name: addi_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_lh +# CHECK: Macro fuse: {{.*}}ADDI - LH +--- +name: addi_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_lw +# CHECK: Macro fuse: {{.*}}ADDI - LW +--- +name: addi_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_ld +# CHECK: Macro fuse: {{.*}}ADDI - LD +--- +name: addi_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_lbu +# CHECK: Macro fuse: {{.*}}ADDI - LBU +--- +name: addi_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_lhu +# CHECK: Macro fuse: {{.*}}ADDI - LHU +--- +name: addi_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_lwu +# CHECK: Macro fuse: {{.*}}ADDI - LWU +--- +name: addi_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lb +# CHECK: Macro fuse: {{.*}}ADD_UW - LB +--- +name: adduw_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lh +# CHECK: Macro fuse: {{.*}}ADD_UW - LH +--- +name: adduw_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lw +# CHECK: Macro fuse: {{.*}}ADD_UW - LW +--- +name: adduw_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_ld +# CHECK: Macro fuse: {{.*}}ADD_UW - LD +--- +name: adduw_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lbu +# CHECK: Macro fuse: {{.*}}ADD_UW - LBU +--- +name: adduw_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lhu +# CHECK: Macro fuse: {{.*}}ADD_UW - LHU +--- +name: adduw_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lwu +# CHECK: Macro fuse: {{.*}}ADD_UW - LWU +--- +name: adduw_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lb +# CHECK: Macro fuse: {{.*}}SH1ADD - LB +--- +name: sh1add_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lb +# CHECK: Macro fuse: {{.*}}SH2ADD - LB +--- +name: sh2add_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lb +# CHECK: Macro fuse: {{.*}}SH3ADD - LB +--- +name: sh3add_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lh +# CHECK: Macro fuse: {{.*}}SH1ADD - LH +--- +name: sh1add_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lh +# CHECK: Macro fuse: {{.*}}SH2ADD - LH +--- +name: sh2add_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lh +# CHECK: Macro fuse: {{.*}}SH3ADD - LH +--- +name: sh3add_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lw +# CHECK: Macro fuse: {{.*}}SH1ADD - LW +--- +name: sh1add_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lw +# CHECK: Macro fuse: {{.*}}SH2ADD - LW +--- +name: sh2add_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lw +# CHECK: Macro fuse: {{.*}}SH3ADD - LW +--- +name: sh3add_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_ld +# CHECK: Macro fuse: {{.*}}SH1ADD - LD +--- +name: sh1add_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_ld +# CHECK: Macro fuse: {{.*}}SH2ADD - LD +--- +name: sh2add_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_ld +# CHECK: Macro fuse: {{.*}}SH3ADD - LD +--- +name: sh3add_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lbu +# CHECK: Macro fuse: {{.*}}SH1ADD - LBU +--- +name: sh1add_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lbu +# CHECK: Macro fuse: {{.*}}SH2ADD - LBU +--- +name: sh2add_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lbu +# CHECK: Macro fuse: {{.*}}SH3ADD - LBU +--- +name: sh3add_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lhu +# CHECK: Macro fuse: {{.*}}SH1ADD - LHU +--- +name: sh1add_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lhu +# CHECK: Macro fuse: {{.*}}SH2ADD - LHU +--- +name: sh2add_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lhu +# CHECK: Macro fuse: {{.*}}SH3ADD - LHU +--- +name: sh3add_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lwu +# CHECK: Macro fuse: {{.*}}SH1ADD - LWU +--- +name: sh1add_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lwu +# CHECK: Macro fuse: {{.*}}SH2ADD - LWU +--- +name: sh2add_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lwu +# CHECK: Macro fuse: {{.*}}SH3ADD - LWU +--- +name: sh3add_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lb +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LB +--- +name: sh1adduw_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lb +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LB +--- +name: sh2adduw_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lb +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LB +--- +name: sh3adduw_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lh +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LH +--- +name: sh1adduw_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lh +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LH +--- +name: sh2adduw_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lh +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LH +--- +name: sh3adduw_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lw +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LW +--- +name: sh1adduw_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lw +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LW +--- +name: sh2adduw_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lw +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LW +--- +name: sh3adduw_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_ld +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LD +--- +name: sh1adduw_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_ld +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LD +--- +name: sh2adduw_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_ld +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LD +--- +name: sh3adduw_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lbu +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LBU +--- +name: sh1adduw_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lbu +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LBU +--- +name: sh2adduw_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lbu +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LBU +--- +name: sh3adduw_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lhu +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LHU +--- +name: sh1adduw_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lhu +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LHU +--- +name: sh2adduw_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lhu +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LHU +--- +name: sh3adduw_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lwu +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LWU +--- +name: sh1adduw_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lwu +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LWU +--- +name: sh2adduw_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lwu +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LWU +--- +name: sh3adduw_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... diff --git a/llvm/test/CodeGen/RISCV/misched-load-clustering.ll b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll index 160f0ae..abdc1ba 100644 --- a/llvm/test/CodeGen/RISCV/misched-load-clustering.ll +++ b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll @@ -1,17 +1,42 @@ ; REQUIRES: asserts -; RUN: llc -mtriple=riscv32 -verify-misched -riscv-misched-load-store-clustering=false \ +; +; Disable all misched clustering +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \ ; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ ; RUN: | FileCheck -check-prefix=NOCLUSTER %s -; RUN: llc -mtriple=riscv64 -verify-misched -riscv-misched-load-store-clustering=false \ +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \ ; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ ; RUN: | FileCheck -check-prefix=NOCLUSTER %s +; +; ST misched clustering only +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=STCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=STCLUSTER %s +; +; LD misched clustering only ; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-store-clustering \ ; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ ; RUN: | FileCheck -check-prefix=LDCLUSTER %s ; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-store-clustering \ ; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ ; RUN: | FileCheck -check-prefix=LDCLUSTER %s - +; +; Default misched cluster settings (i.e. both LD and ST clustering) +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s define i32 @load_clustering_1(ptr nocapture %p) { ; NOCLUSTER: ********** MI Scheduling ********** @@ -22,6 +47,14 @@ define i32 @load_clustering_1(ptr nocapture %p) { ; NOCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4 ; NOCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16 ; +; STCLUSTER: ********** MI Scheduling ********** +; STCLUSTER-LABEL: load_clustering_1:%bb.0 +; STCLUSTER: *** Final schedule for %bb.0 *** +; STCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12 +; STCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8 +; STCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4 +; STCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16 +; ; LDCLUSTER: ********** MI Scheduling ********** ; LDCLUSTER-LABEL: load_clustering_1:%bb.0 ; LDCLUSTER: *** Final schedule for %bb.0 *** @@ -29,6 +62,14 @@ define i32 @load_clustering_1(ptr nocapture %p) { ; LDCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8 ; LDCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12 ; LDCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16 +; +; DEFAULTCLUSTER: ********** MI Scheduling ********** +; DEFAULTCLUSTER-LABEL: load_clustering_1:%bb.0 +; DEFAULTCLUSTER: *** Final schedule for %bb.0 *** +; DEFAULTCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4 +; DEFAULTCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8 +; DEFAULTCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12 +; DEFAULTCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16 entry: %arrayidx0 = getelementptr inbounds i32, ptr %p, i32 3 %val0 = load i32, ptr %arrayidx0 diff --git a/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir b/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir index 21398d3..01960f9 100644 --- a/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir +++ b/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir @@ -1,10 +1,12 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 # RUN: llc -mtriple=riscv64 -x mir -mcpu=sifive-p470 -verify-misched -enable-post-misched=false \ -# RUN: -riscv-postmisched-load-store-clustering=false -debug-only=machine-scheduler \ +# RUN: -mattr=+disable-postmisched-load-clustering \ +# RUN: -mattr=+disable-postmisched-store-clustering -debug-only=machine-scheduler \ # RUN: -start-before=machine-scheduler -stop-after=postmisched -misched-regpressure=false -o - 2>&1 < %s \ # RUN: | FileCheck -check-prefix=NOPOSTMISCHED %s # RUN: llc -mtriple=riscv64 -x mir -mcpu=sifive-p470 -mattr=+use-postra-scheduler -verify-misched -enable-post-misched=true \ -# RUN: -riscv-postmisched-load-store-clustering=false -debug-only=machine-scheduler \ +# RUN: -mattr=+disable-postmisched-load-clustering \ +# RUN: -mattr=+disable-postmisched-store-clustering -debug-only=machine-scheduler \ # RUN: -start-before=machine-scheduler -stop-after=postmisched -misched-regpressure=false -o - 2>&1 < %s \ # RUN: | FileCheck -check-prefix=NOCLUSTER %s # RUN: llc -mtriple=riscv64 -x mir -mcpu=sifive-p470 -mattr=+use-postra-scheduler -verify-misched -enable-post-misched=true \ diff --git a/llvm/test/CodeGen/RISCV/misched-store-clustering.ll b/llvm/test/CodeGen/RISCV/misched-store-clustering.ll new file mode 100644 index 0000000..02e853d --- /dev/null +++ b/llvm/test/CodeGen/RISCV/misched-store-clustering.ll @@ -0,0 +1,83 @@ +; REQUIRES: asserts +; +; Disable all misched clustering +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=NOCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=NOCLUSTER %s +; +; ST misched clustering only +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=STCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=STCLUSTER %s +; +; LD misched clustering only +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-store-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=LDCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-store-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=LDCLUSTER %s +; +; Default misched cluster settings (i.e. both LD and ST clustering) +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s + +define i32 @store_clustering_1(ptr nocapture %p, i32 %v) { +; NOCLUSTER: ********** MI Scheduling ********** +; NOCLUSTER-LABEL: store_clustering_1:%bb.0 +; NOCLUSTER: *** Final schedule for %bb.0 *** +; NOCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0) +; NOCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1) +; NOCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2) +; NOCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3) +; +; STCLUSTER: ********** MI Scheduling ********** +; STCLUSTER-LABEL: store_clustering_1:%bb.0 +; STCLUSTER: *** Final schedule for %bb.0 *** +; STCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2) +; STCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1) +; STCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0) +; STCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3) +; +; LDCLUSTER: ********** MI Scheduling ********** +; LDCLUSTER-LABEL: store_clustering_1:%bb.0 +; LDCLUSTER: *** Final schedule for %bb.0 *** +; LDCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0) +; LDCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1) +; LDCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2) +; LDCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3) +; +; DEFAULTCLUSTER: ********** MI Scheduling ********** +; DEFAULTCLUSTER-LABEL: store_clustering_1:%bb.0 +; DEFAULTCLUSTER: *** Final schedule for %bb.0 *** +; DEFAULTCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2) +; DEFAULTCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1) +; DEFAULTCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0) +; DEFAULTCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3) +entry: + %arrayidx0 = getelementptr inbounds i32, ptr %p, i32 3 + store i32 %v, ptr %arrayidx0 + %arrayidx1 = getelementptr inbounds i32, ptr %p, i32 2 + store i32 %v, ptr %arrayidx1 + %arrayidx2 = getelementptr inbounds i32, ptr %p, i32 1 + store i32 %v, ptr %arrayidx2 + %arrayidx3 = getelementptr inbounds i32, ptr %p, i32 4 + store i32 %v, ptr %arrayidx3 + ret i32 %v +} diff --git a/llvm/test/CodeGen/RISCV/note-gnu-property-zicfiss.ll b/llvm/test/CodeGen/RISCV/note-gnu-property-zicfiss.ll index 24d63cb..efc4439 100644 --- a/llvm/test/CodeGen/RISCV/note-gnu-property-zicfiss.ll +++ b/llvm/test/CodeGen/RISCV/note-gnu-property-zicfiss.ll @@ -7,19 +7,18 @@ ; ASM: .section ".note.GNU-stack","",@progbits ; ASM-NEXT: .section .note.gnu.property,"a",@note +; ASM32-NEXT: .p2align 2, 0x0 +; ASM64-NEXT: .p2align 3, 0x0 ; ASM-NEXT: .word 4 -; ASM-NEXT: .word .Ltmp1-.Ltmp0 +; ASM32-NEXT: .word 12 +; ASM64-NEXT: .word 16 ; ASM-NEXT: .word 5 ; ASM-NEXT: .asciz "GNU" -; ASM-NEXT: .Ltmp0: -; ASM32-NEXT: .p2align 2, 0x0 -; ASM64-NEXT: .p2align 3, 0x0 ; ASM-NEXT: .word 3221225472 ; ASM-NEXT: .word 4 ; ASM-NEXT: .word 2 ; ASM32-NEXT: .p2align 2, 0x0 ; ASM64-NEXT: .p2align 3, 0x0 -; ASM-NEXT: .Ltmp1: define i32 @f() "hw-shadow-stack" { entry: diff --git a/llvm/test/CodeGen/RISCV/rnmi-interrupt-attr-error.ll b/llvm/test/CodeGen/RISCV/rnmi-interrupt-attr-error.ll new file mode 100644 index 0000000..19cc994 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rnmi-interrupt-attr-error.ll @@ -0,0 +1,9 @@ +; RUN: not llc -mtriple riscv32-unknown-elf -mattr=-smrnmi -o - %s 2>&1 \ +; RUN: | FileCheck %s +; RUN: not llc -mtriple riscv64-unknown-elf -mattr=-smrnmi -o - %s 2>&1 \ +; RUN: | FileCheck %s + +; CHECK: LLVM ERROR: 'rnmi' interrupt kind requires Srnmi extension +define void @test_rnmi() "interrupt"="rnmi" { + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rnmi-interrupt-attr.ll b/llvm/test/CodeGen/RISCV/rnmi-interrupt-attr.ll new file mode 100644 index 0000000..03236a0 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rnmi-interrupt-attr.ll @@ -0,0 +1,373 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple riscv32-unknown-elf -mattr=+smrnmi -o - %s \ +; RUN: -verify-machineinstrs | FileCheck --check-prefix=RNMI-RV32 %s + +; RUN: llc -mtriple riscv32-unknown-elf -mattr=+smrnmi -o - %s \ +; RUN: -verify-machineinstrs -frame-pointer=all | FileCheck --check-prefix=RNMI-RV32-FP %s + +; RUN: llc -mtriple riscv64-unknown-elf -mattr=+smrnmi -o - %s \ +; RUN: -verify-machineinstrs | FileCheck --check-prefix=RNMI-RV64 %s + +; RUN: llc -mtriple riscv64-unknown-elf -mattr=+smrnmi -o - %s \ +; RUN: -verify-machineinstrs -frame-pointer=all | FileCheck --check-prefix=RNMI-RV64-FP %s + +define void @test_rnmi_empty() "interrupt"="rnmi" { +; RNMI-RV32-LABEL: test_rnmi_empty: +; RNMI-RV32: # %bb.0: +; RNMI-RV32-NEXT: mnret +; +; RNMI-RV32-FP-LABEL: test_rnmi_empty: +; RNMI-RV32-FP: # %bb.0: +; RNMI-RV32-FP-NEXT: addi sp, sp, -16 +; RNMI-RV32-FP-NEXT: .cfi_def_cfa_offset 16 +; RNMI-RV32-FP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: .cfi_offset ra, -4 +; RNMI-RV32-FP-NEXT: .cfi_offset s0, -8 +; RNMI-RV32-FP-NEXT: addi s0, sp, 16 +; RNMI-RV32-FP-NEXT: .cfi_def_cfa s0, 0 +; RNMI-RV32-FP-NEXT: .cfi_def_cfa sp, 16 +; RNMI-RV32-FP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: .cfi_restore ra +; RNMI-RV32-FP-NEXT: .cfi_restore s0 +; RNMI-RV32-FP-NEXT: addi sp, sp, 16 +; RNMI-RV32-FP-NEXT: .cfi_def_cfa_offset 0 +; RNMI-RV32-FP-NEXT: mnret +; +; RNMI-RV64-LABEL: test_rnmi_empty: +; RNMI-RV64: # %bb.0: +; RNMI-RV64-NEXT: mnret +; +; RNMI-RV64-FP-LABEL: test_rnmi_empty: +; RNMI-RV64-FP: # %bb.0: +; RNMI-RV64-FP-NEXT: addi sp, sp, -16 +; RNMI-RV64-FP-NEXT: .cfi_def_cfa_offset 16 +; RNMI-RV64-FP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: .cfi_offset ra, -8 +; RNMI-RV64-FP-NEXT: .cfi_offset s0, -16 +; RNMI-RV64-FP-NEXT: addi s0, sp, 16 +; RNMI-RV64-FP-NEXT: .cfi_def_cfa s0, 0 +; RNMI-RV64-FP-NEXT: .cfi_def_cfa sp, 16 +; RNMI-RV64-FP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: .cfi_restore ra +; RNMI-RV64-FP-NEXT: .cfi_restore s0 +; RNMI-RV64-FP-NEXT: addi sp, sp, 16 +; RNMI-RV64-FP-NEXT: .cfi_def_cfa_offset 0 +; RNMI-RV64-FP-NEXT: mnret + ret void +} + +declare void @callee() + +define void @test_rnmi_caller() "interrupt"="rnmi" { +; RNMI-RV32-LABEL: test_rnmi_caller: +; RNMI-RV32: # %bb.0: +; RNMI-RV32-NEXT: addi sp, sp, -64 +; RNMI-RV32-NEXT: .cfi_def_cfa_offset 64 +; RNMI-RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: sw t0, 56(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: sw t1, 52(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: sw t2, 48(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: sw a0, 44(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: sw a1, 40(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: sw a2, 36(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: sw a3, 32(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: sw a4, 28(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: sw a5, 24(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: sw a6, 20(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: sw a7, 16(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: sw t3, 12(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: sw t4, 8(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: sw t5, 4(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: sw t6, 0(sp) # 4-byte Folded Spill +; RNMI-RV32-NEXT: .cfi_offset ra, -4 +; RNMI-RV32-NEXT: .cfi_offset t0, -8 +; RNMI-RV32-NEXT: .cfi_offset t1, -12 +; RNMI-RV32-NEXT: .cfi_offset t2, -16 +; RNMI-RV32-NEXT: .cfi_offset a0, -20 +; RNMI-RV32-NEXT: .cfi_offset a1, -24 +; RNMI-RV32-NEXT: .cfi_offset a2, -28 +; RNMI-RV32-NEXT: .cfi_offset a3, -32 +; RNMI-RV32-NEXT: .cfi_offset a4, -36 +; RNMI-RV32-NEXT: .cfi_offset a5, -40 +; RNMI-RV32-NEXT: .cfi_offset a6, -44 +; RNMI-RV32-NEXT: .cfi_offset a7, -48 +; RNMI-RV32-NEXT: .cfi_offset t3, -52 +; RNMI-RV32-NEXT: .cfi_offset t4, -56 +; RNMI-RV32-NEXT: .cfi_offset t5, -60 +; RNMI-RV32-NEXT: .cfi_offset t6, -64 +; RNMI-RV32-NEXT: call callee +; RNMI-RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: lw t0, 56(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: lw t1, 52(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: lw t2, 48(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: lw a0, 44(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: lw a1, 40(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: lw a2, 36(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: lw a3, 32(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: lw a4, 28(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: lw a5, 24(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: lw a6, 20(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: lw a7, 16(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: lw t3, 12(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: lw t4, 8(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: lw t5, 4(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: lw t6, 0(sp) # 4-byte Folded Reload +; RNMI-RV32-NEXT: .cfi_restore ra +; RNMI-RV32-NEXT: .cfi_restore t0 +; RNMI-RV32-NEXT: .cfi_restore t1 +; RNMI-RV32-NEXT: .cfi_restore t2 +; RNMI-RV32-NEXT: .cfi_restore a0 +; RNMI-RV32-NEXT: .cfi_restore a1 +; RNMI-RV32-NEXT: .cfi_restore a2 +; RNMI-RV32-NEXT: .cfi_restore a3 +; RNMI-RV32-NEXT: .cfi_restore a4 +; RNMI-RV32-NEXT: .cfi_restore a5 +; RNMI-RV32-NEXT: .cfi_restore a6 +; RNMI-RV32-NEXT: .cfi_restore a7 +; RNMI-RV32-NEXT: .cfi_restore t3 +; RNMI-RV32-NEXT: .cfi_restore t4 +; RNMI-RV32-NEXT: .cfi_restore t5 +; RNMI-RV32-NEXT: .cfi_restore t6 +; RNMI-RV32-NEXT: addi sp, sp, 64 +; RNMI-RV32-NEXT: .cfi_def_cfa_offset 0 +; RNMI-RV32-NEXT: mnret +; +; RNMI-RV32-FP-LABEL: test_rnmi_caller: +; RNMI-RV32-FP: # %bb.0: +; RNMI-RV32-FP-NEXT: addi sp, sp, -80 +; RNMI-RV32-FP-NEXT: .cfi_def_cfa_offset 80 +; RNMI-RV32-FP-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw t0, 72(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw t1, 68(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw t2, 64(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw s0, 60(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw a0, 56(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw a1, 52(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw a2, 48(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw a3, 44(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw a4, 40(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw a5, 36(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw a6, 32(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw a7, 28(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw t3, 24(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw t4, 20(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw t5, 16(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: sw t6, 12(sp) # 4-byte Folded Spill +; RNMI-RV32-FP-NEXT: .cfi_offset ra, -4 +; RNMI-RV32-FP-NEXT: .cfi_offset t0, -8 +; RNMI-RV32-FP-NEXT: .cfi_offset t1, -12 +; RNMI-RV32-FP-NEXT: .cfi_offset t2, -16 +; RNMI-RV32-FP-NEXT: .cfi_offset s0, -20 +; RNMI-RV32-FP-NEXT: .cfi_offset a0, -24 +; RNMI-RV32-FP-NEXT: .cfi_offset a1, -28 +; RNMI-RV32-FP-NEXT: .cfi_offset a2, -32 +; RNMI-RV32-FP-NEXT: .cfi_offset a3, -36 +; RNMI-RV32-FP-NEXT: .cfi_offset a4, -40 +; RNMI-RV32-FP-NEXT: .cfi_offset a5, -44 +; RNMI-RV32-FP-NEXT: .cfi_offset a6, -48 +; RNMI-RV32-FP-NEXT: .cfi_offset a7, -52 +; RNMI-RV32-FP-NEXT: .cfi_offset t3, -56 +; RNMI-RV32-FP-NEXT: .cfi_offset t4, -60 +; RNMI-RV32-FP-NEXT: .cfi_offset t5, -64 +; RNMI-RV32-FP-NEXT: .cfi_offset t6, -68 +; RNMI-RV32-FP-NEXT: addi s0, sp, 80 +; RNMI-RV32-FP-NEXT: .cfi_def_cfa s0, 0 +; RNMI-RV32-FP-NEXT: call callee +; RNMI-RV32-FP-NEXT: .cfi_def_cfa sp, 80 +; RNMI-RV32-FP-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw t0, 72(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw t1, 68(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw t2, 64(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw s0, 60(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw a0, 56(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw a1, 52(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw a2, 48(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw a3, 44(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw a4, 40(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw a5, 36(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw a6, 32(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw a7, 28(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw t3, 24(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw t4, 20(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw t5, 16(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: lw t6, 12(sp) # 4-byte Folded Reload +; RNMI-RV32-FP-NEXT: .cfi_restore ra +; RNMI-RV32-FP-NEXT: .cfi_restore t0 +; RNMI-RV32-FP-NEXT: .cfi_restore t1 +; RNMI-RV32-FP-NEXT: .cfi_restore t2 +; RNMI-RV32-FP-NEXT: .cfi_restore s0 +; RNMI-RV32-FP-NEXT: .cfi_restore a0 +; RNMI-RV32-FP-NEXT: .cfi_restore a1 +; RNMI-RV32-FP-NEXT: .cfi_restore a2 +; RNMI-RV32-FP-NEXT: .cfi_restore a3 +; RNMI-RV32-FP-NEXT: .cfi_restore a4 +; RNMI-RV32-FP-NEXT: .cfi_restore a5 +; RNMI-RV32-FP-NEXT: .cfi_restore a6 +; RNMI-RV32-FP-NEXT: .cfi_restore a7 +; RNMI-RV32-FP-NEXT: .cfi_restore t3 +; RNMI-RV32-FP-NEXT: .cfi_restore t4 +; RNMI-RV32-FP-NEXT: .cfi_restore t5 +; RNMI-RV32-FP-NEXT: .cfi_restore t6 +; RNMI-RV32-FP-NEXT: addi sp, sp, 80 +; RNMI-RV32-FP-NEXT: .cfi_def_cfa_offset 0 +; RNMI-RV32-FP-NEXT: mnret +; +; RNMI-RV64-LABEL: test_rnmi_caller: +; RNMI-RV64: # %bb.0: +; RNMI-RV64-NEXT: addi sp, sp, -128 +; RNMI-RV64-NEXT: .cfi_def_cfa_offset 128 +; RNMI-RV64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: sd t0, 112(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: sd t1, 104(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: sd t2, 96(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: sd a0, 88(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: sd a1, 80(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: sd a2, 72(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: sd a3, 64(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: sd a4, 56(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: sd a5, 48(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: sd a6, 40(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: sd a7, 32(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: sd t3, 24(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: sd t4, 16(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: sd t5, 8(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: sd t6, 0(sp) # 8-byte Folded Spill +; RNMI-RV64-NEXT: .cfi_offset ra, -8 +; RNMI-RV64-NEXT: .cfi_offset t0, -16 +; RNMI-RV64-NEXT: .cfi_offset t1, -24 +; RNMI-RV64-NEXT: .cfi_offset t2, -32 +; RNMI-RV64-NEXT: .cfi_offset a0, -40 +; RNMI-RV64-NEXT: .cfi_offset a1, -48 +; RNMI-RV64-NEXT: .cfi_offset a2, -56 +; RNMI-RV64-NEXT: .cfi_offset a3, -64 +; RNMI-RV64-NEXT: .cfi_offset a4, -72 +; RNMI-RV64-NEXT: .cfi_offset a5, -80 +; RNMI-RV64-NEXT: .cfi_offset a6, -88 +; RNMI-RV64-NEXT: .cfi_offset a7, -96 +; RNMI-RV64-NEXT: .cfi_offset t3, -104 +; RNMI-RV64-NEXT: .cfi_offset t4, -112 +; RNMI-RV64-NEXT: .cfi_offset t5, -120 +; RNMI-RV64-NEXT: .cfi_offset t6, -128 +; RNMI-RV64-NEXT: call callee +; RNMI-RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: ld t0, 112(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: ld t1, 104(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: ld t2, 96(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: ld a0, 88(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: ld a1, 80(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: ld a2, 72(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: ld a3, 64(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: ld a4, 56(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: ld a5, 48(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: ld a6, 40(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: ld a7, 32(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: ld t3, 24(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: ld t4, 16(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: ld t5, 8(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: ld t6, 0(sp) # 8-byte Folded Reload +; RNMI-RV64-NEXT: .cfi_restore ra +; RNMI-RV64-NEXT: .cfi_restore t0 +; RNMI-RV64-NEXT: .cfi_restore t1 +; RNMI-RV64-NEXT: .cfi_restore t2 +; RNMI-RV64-NEXT: .cfi_restore a0 +; RNMI-RV64-NEXT: .cfi_restore a1 +; RNMI-RV64-NEXT: .cfi_restore a2 +; RNMI-RV64-NEXT: .cfi_restore a3 +; RNMI-RV64-NEXT: .cfi_restore a4 +; RNMI-RV64-NEXT: .cfi_restore a5 +; RNMI-RV64-NEXT: .cfi_restore a6 +; RNMI-RV64-NEXT: .cfi_restore a7 +; RNMI-RV64-NEXT: .cfi_restore t3 +; RNMI-RV64-NEXT: .cfi_restore t4 +; RNMI-RV64-NEXT: .cfi_restore t5 +; RNMI-RV64-NEXT: .cfi_restore t6 +; RNMI-RV64-NEXT: addi sp, sp, 128 +; RNMI-RV64-NEXT: .cfi_def_cfa_offset 0 +; RNMI-RV64-NEXT: mnret +; +; RNMI-RV64-FP-LABEL: test_rnmi_caller: +; RNMI-RV64-FP: # %bb.0: +; RNMI-RV64-FP-NEXT: addi sp, sp, -144 +; RNMI-RV64-FP-NEXT: .cfi_def_cfa_offset 144 +; RNMI-RV64-FP-NEXT: sd ra, 136(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd t0, 128(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd t1, 120(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd t2, 112(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd s0, 104(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd a0, 96(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd a1, 88(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd a2, 80(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd a3, 72(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd a4, 64(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd a5, 56(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd a6, 48(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd a7, 40(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd t3, 32(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd t4, 24(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd t5, 16(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: sd t6, 8(sp) # 8-byte Folded Spill +; RNMI-RV64-FP-NEXT: .cfi_offset ra, -8 +; RNMI-RV64-FP-NEXT: .cfi_offset t0, -16 +; RNMI-RV64-FP-NEXT: .cfi_offset t1, -24 +; RNMI-RV64-FP-NEXT: .cfi_offset t2, -32 +; RNMI-RV64-FP-NEXT: .cfi_offset s0, -40 +; RNMI-RV64-FP-NEXT: .cfi_offset a0, -48 +; RNMI-RV64-FP-NEXT: .cfi_offset a1, -56 +; RNMI-RV64-FP-NEXT: .cfi_offset a2, -64 +; RNMI-RV64-FP-NEXT: .cfi_offset a3, -72 +; RNMI-RV64-FP-NEXT: .cfi_offset a4, -80 +; RNMI-RV64-FP-NEXT: .cfi_offset a5, -88 +; RNMI-RV64-FP-NEXT: .cfi_offset a6, -96 +; RNMI-RV64-FP-NEXT: .cfi_offset a7, -104 +; RNMI-RV64-FP-NEXT: .cfi_offset t3, -112 +; RNMI-RV64-FP-NEXT: .cfi_offset t4, -120 +; RNMI-RV64-FP-NEXT: .cfi_offset t5, -128 +; RNMI-RV64-FP-NEXT: .cfi_offset t6, -136 +; RNMI-RV64-FP-NEXT: addi s0, sp, 144 +; RNMI-RV64-FP-NEXT: .cfi_def_cfa s0, 0 +; RNMI-RV64-FP-NEXT: call callee +; RNMI-RV64-FP-NEXT: .cfi_def_cfa sp, 144 +; RNMI-RV64-FP-NEXT: ld ra, 136(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld t0, 128(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld t1, 120(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld t2, 112(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld s0, 104(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld a0, 96(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld a1, 88(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld a2, 80(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld a3, 72(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld a4, 64(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld a5, 56(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld a6, 48(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld a7, 40(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld t3, 32(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld t4, 24(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld t5, 16(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: ld t6, 8(sp) # 8-byte Folded Reload +; RNMI-RV64-FP-NEXT: .cfi_restore ra +; RNMI-RV64-FP-NEXT: .cfi_restore t0 +; RNMI-RV64-FP-NEXT: .cfi_restore t1 +; RNMI-RV64-FP-NEXT: .cfi_restore t2 +; RNMI-RV64-FP-NEXT: .cfi_restore s0 +; RNMI-RV64-FP-NEXT: .cfi_restore a0 +; RNMI-RV64-FP-NEXT: .cfi_restore a1 +; RNMI-RV64-FP-NEXT: .cfi_restore a2 +; RNMI-RV64-FP-NEXT: .cfi_restore a3 +; RNMI-RV64-FP-NEXT: .cfi_restore a4 +; RNMI-RV64-FP-NEXT: .cfi_restore a5 +; RNMI-RV64-FP-NEXT: .cfi_restore a6 +; RNMI-RV64-FP-NEXT: .cfi_restore a7 +; RNMI-RV64-FP-NEXT: .cfi_restore t3 +; RNMI-RV64-FP-NEXT: .cfi_restore t4 +; RNMI-RV64-FP-NEXT: .cfi_restore t5 +; RNMI-RV64-FP-NEXT: .cfi_restore t6 +; RNMI-RV64-FP-NEXT: addi sp, sp, 144 +; RNMI-RV64-FP-NEXT: .cfi_def_cfa_offset 0 +; RNMI-RV64-FP-NEXT: mnret + call void @callee() + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll index 8dd6301..eb8b769 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll @@ -1587,59 +1587,59 @@ define i64 @sub_if_uge_i64(i64 %x, i64 %y) { define i128 @sub_if_uge_i128(i128 %x, i128 %y) { ; CHECK-LABEL: sub_if_uge_i128: ; CHECK: # %bb.0: -; CHECK-NEXT: lw a7, 4(a2) -; CHECK-NEXT: lw a6, 8(a2) -; CHECK-NEXT: lw t0, 12(a2) ; CHECK-NEXT: lw a3, 4(a1) -; CHECK-NEXT: lw a4, 12(a1) -; CHECK-NEXT: lw a5, 8(a1) -; CHECK-NEXT: beq a4, t0, .LBB53_2 +; CHECK-NEXT: lw a4, 8(a1) +; CHECK-NEXT: lw a5, 12(a1) +; CHECK-NEXT: lw a6, 4(a2) +; CHECK-NEXT: lw t0, 12(a2) +; CHECK-NEXT: lw a7, 8(a2) +; CHECK-NEXT: beq a5, t0, .LBB53_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: sltu t1, a4, t0 +; CHECK-NEXT: sltu t1, a5, t0 ; CHECK-NEXT: j .LBB53_3 ; CHECK-NEXT: .LBB53_2: -; CHECK-NEXT: sltu t1, a5, a6 +; CHECK-NEXT: sltu t1, a4, a7 ; CHECK-NEXT: .LBB53_3: -; CHECK-NEXT: lw a2, 0(a2) ; CHECK-NEXT: lw a1, 0(a1) -; CHECK-NEXT: beq a3, a7, .LBB53_5 +; CHECK-NEXT: lw a2, 0(a2) +; CHECK-NEXT: beq a3, a6, .LBB53_5 ; CHECK-NEXT: # %bb.4: -; CHECK-NEXT: sltu t2, a3, a7 +; CHECK-NEXT: sltu t2, a3, a6 ; CHECK-NEXT: j .LBB53_6 ; CHECK-NEXT: .LBB53_5: ; CHECK-NEXT: sltu t2, a1, a2 ; CHECK-NEXT: .LBB53_6: -; CHECK-NEXT: xor t3, a4, t0 -; CHECK-NEXT: xor t4, a5, a6 +; CHECK-NEXT: xor t3, a5, t0 +; CHECK-NEXT: xor t4, a4, a7 ; CHECK-NEXT: or t3, t4, t3 ; CHECK-NEXT: beqz t3, .LBB53_8 ; CHECK-NEXT: # %bb.7: ; CHECK-NEXT: mv t2, t1 ; CHECK-NEXT: .LBB53_8: -; CHECK-NEXT: addi t2, t2, -1 -; CHECK-NEXT: and t1, t2, t0 -; CHECK-NEXT: and t0, t2, a2 -; CHECK-NEXT: and a7, t2, a7 +; CHECK-NEXT: addi t3, t2, -1 +; CHECK-NEXT: and t2, t3, t0 +; CHECK-NEXT: and t0, t3, a2 +; CHECK-NEXT: and t1, t3, a6 ; CHECK-NEXT: sltu a2, a1, t0 -; CHECK-NEXT: and t2, t2, a6 +; CHECK-NEXT: and a7, t3, a7 ; CHECK-NEXT: mv a6, a2 -; CHECK-NEXT: beq a3, a7, .LBB53_10 +; CHECK-NEXT: beq a3, t1, .LBB53_10 ; CHECK-NEXT: # %bb.9: -; CHECK-NEXT: sltu a6, a3, a7 +; CHECK-NEXT: sltu a6, a3, t1 ; CHECK-NEXT: .LBB53_10: -; CHECK-NEXT: sub t3, a5, t2 -; CHECK-NEXT: sltu a5, a5, t2 -; CHECK-NEXT: sub a4, a4, t1 -; CHECK-NEXT: sub a3, a3, a7 +; CHECK-NEXT: sub t3, a4, a7 +; CHECK-NEXT: sltu a4, a4, a7 +; CHECK-NEXT: sub a5, a5, t2 +; CHECK-NEXT: sub a3, a3, t1 ; CHECK-NEXT: sub a1, a1, t0 ; CHECK-NEXT: sltu a7, t3, a6 -; CHECK-NEXT: sub a4, a4, a5 -; CHECK-NEXT: sub a5, t3, a6 +; CHECK-NEXT: sub a5, a5, a4 +; CHECK-NEXT: sub a4, t3, a6 ; CHECK-NEXT: sub a3, a3, a2 -; CHECK-NEXT: sub a2, a4, a7 +; CHECK-NEXT: sub a2, a5, a7 ; CHECK-NEXT: sw a1, 0(a0) ; CHECK-NEXT: sw a3, 4(a0) -; CHECK-NEXT: sw a5, 8(a0) +; CHECK-NEXT: sw a4, 8(a0) ; CHECK-NEXT: sw a2, 12(a0) ; CHECK-NEXT: ret %cmp = icmp ult i128 %x, %y diff --git a/llvm/test/CodeGen/RISCV/rv32zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbkb.ll index 4aa6dd4..7ebbd78 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbkb.ll @@ -319,3 +319,83 @@ define i64 @zext_i16_to_i64(i16 %a) nounwind { %1 = zext i16 %a to i64 ret i64 %1 } + +define i32 @pack_lo_packh_hi_packh(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, i8 zeroext %3) nounwind { +; RV32I-LABEL: pack_lo_packh_hi_packh: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a1, a1, 8 +; RV32I-NEXT: slli a2, a2, 16 +; RV32I-NEXT: slli a3, a3, 24 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: or a2, a2, a3 +; RV32I-NEXT: or a0, a0, a2 +; RV32I-NEXT: ret +; +; RV32ZBKB-LABEL: pack_lo_packh_hi_packh: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: packh a0, a0, a1 +; RV32ZBKB-NEXT: packh a1, a2, a3 +; RV32ZBKB-NEXT: pack a0, a0, a1 +; RV32ZBKB-NEXT: ret + %a = zext i8 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = zext i8 %3 to i32 + %e = shl i32 %b, 8 + %f = shl i32 %c, 16 + %g = shl i32 %d, 24 + %h = or i32 %a, %e + %i = or i32 %h, %f + %j = or i32 %i, %g + ret i32 %j +} + +define i32 @pack_lo_zext_hi_packh(i16 zeroext %0, i8 zeroext %1, i8 zeroext %2) nounwind { +; RV32I-LABEL: pack_lo_zext_hi_packh: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a1, a2, 16 +; RV32I-NEXT: slli a2, a2, 24 +; RV32I-NEXT: or a1, a2, a1 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: ret +; +; RV32ZBKB-LABEL: pack_lo_zext_hi_packh: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: packh a1, a2, a2 +; RV32ZBKB-NEXT: pack a0, a0, a1 +; RV32ZBKB-NEXT: ret + %a = zext i16 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = shl i32 %c, 8 + %e = or i32 %c, %d + %f = shl i32 %e, 16 + %g = or i32 %f, %a + ret i32 %g +} + +; Negative test, %a isn't extended so we can't use pack for the outer or, but +; we can use packh for the high half. +define i32 @pack_lo_noext_hi_packh(i32 %a, i8 zeroext %1, i8 zeroext %2) nounwind { +; RV32I-LABEL: pack_lo_noext_hi_packh: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a1, a2, 16 +; RV32I-NEXT: slli a2, a2, 24 +; RV32I-NEXT: or a1, a2, a1 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: ret +; +; RV32ZBKB-LABEL: pack_lo_noext_hi_packh: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: packh a1, a2, a2 +; RV32ZBKB-NEXT: slli a1, a1, 16 +; RV32ZBKB-NEXT: or a0, a1, a0 +; RV32ZBKB-NEXT: ret + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = shl i32 %c, 8 + %e = or i32 %c, %d + %f = shl i32 %e, 16 + %g = or i32 %f, %a + ret i32 %g +} diff --git a/llvm/test/CodeGen/RISCV/rv32zbs.ll b/llvm/test/CodeGen/RISCV/rv32zbs.ll index 1a3beeb7..e3728bf 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbs.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbs.ll @@ -785,16 +785,16 @@ define i32 @bset_trailing_ones_i32_no_mask(i32 %a) nounwind { define i64 @bset_trailing_ones_i64_mask(i64 %a) nounwind { ; CHECK-LABEL: bset_trailing_ones_i64_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, -1 -; CHECK-NEXT: andi a3, a0, 63 -; CHECK-NEXT: addi a1, a3, -32 -; CHECK-NEXT: sll a0, a2, a0 +; CHECK-NEXT: andi a2, a0, 63 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: addi a1, a2, -32 +; CHECK-NEXT: sll a0, a3, a0 ; CHECK-NEXT: bltz a1, .LBB43_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: sll a2, a2, a3 +; CHECK-NEXT: sll a2, a3, a2 ; CHECK-NEXT: j .LBB43_3 ; CHECK-NEXT: .LBB43_2: -; CHECK-NEXT: not a2, a3 +; CHECK-NEXT: not a2, a2 ; CHECK-NEXT: lui a3, 524288 ; CHECK-NEXT: addi a3, a3, -1 ; CHECK-NEXT: srl a2, a3, a2 diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll index 818ea72..37c9eae 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll @@ -392,3 +392,125 @@ define i64 @zext_i16_to_i64(i16 %a) nounwind { %1 = zext i16 %a to i64 ret i64 %1 } + +define void @pack_lo_packh_hi_packh(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, i8 zeroext %3, ptr %p) nounwind { +; RV64I-LABEL: pack_lo_packh_hi_packh: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a1, 8 +; RV64I-NEXT: slli a2, a2, 16 +; RV64I-NEXT: slli a3, a3, 24 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: or a2, a2, a3 +; RV64I-NEXT: or a0, a0, a2 +; RV64I-NEXT: sw a0, 0(a4) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_lo_packh_hi_packh: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a0, a0, a1 +; RV64ZBKB-NEXT: packh a1, a2, a3 +; RV64ZBKB-NEXT: packw a0, a0, a1 +; RV64ZBKB-NEXT: sw a0, 0(a4) +; RV64ZBKB-NEXT: ret + %a = zext i8 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = zext i8 %3 to i32 + %e = shl i32 %b, 8 + %f = shl i32 %c, 16 + %g = shl i32 %d, 24 + %h = or i32 %a, %e + %i = or i32 %h, %f + %j = or i32 %i, %g + store i32 %j, ptr %p + ret void +} + +define void @pack_lo_packh_hi_packh_2(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, i8 zeroext %3, ptr %p) nounwind { +; RV64I-LABEL: pack_lo_packh_hi_packh_2: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a1, 8 +; RV64I-NEXT: slli a2, a2, 16 +; RV64I-NEXT: slli a3, a3, 24 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: or a2, a2, a3 +; RV64I-NEXT: or a0, a2, a0 +; RV64I-NEXT: sw a0, 0(a4) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_lo_packh_hi_packh_2: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a0, a0, a1 +; RV64ZBKB-NEXT: packh a1, a3, a2 +; RV64ZBKB-NEXT: packw a0, a0, a1 +; RV64ZBKB-NEXT: sw a0, 0(a4) +; RV64ZBKB-NEXT: ret + %a = zext i8 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = zext i8 %3 to i32 + %e = shl i32 %b, 8 + %f = shl i32 %c, 16 + %g = shl i32 %d, 24 + %h = or i32 %a, %e + %i = or i32 %g, %h + %j = or i32 %f, %i + store i32 %j, ptr %p + ret void +} + +define void @pack_lo_zext_hi_packh(i16 zeroext %0, i8 zeroext %1, i8 zeroext %2, ptr %p) nounwind { +; RV64I-LABEL: pack_lo_zext_hi_packh: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a2, 16 +; RV64I-NEXT: slli a2, a2, 24 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: sw a0, 0(a3) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_lo_zext_hi_packh: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a1, a2, a2 +; RV64ZBKB-NEXT: packw a0, a0, a1 +; RV64ZBKB-NEXT: sw a0, 0(a3) +; RV64ZBKB-NEXT: ret + %a = zext i16 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = shl i32 %c, 8 + %e = or i32 %c, %d + %f = shl i32 %e, 16 + %g = or i32 %f, %a + store i32 %g, ptr %p + ret void +} + +; Negative test, %a isn't extended so we can't use packw for the outer or, but +; we can use packh for the high half. +define void @pack_lo_noext_hi_packh(i32 %a, i8 zeroext %1, i8 zeroext %2, ptr %p) nounwind { +; RV64I-LABEL: pack_lo_noext_hi_packh: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a2, 16 +; RV64I-NEXT: slli a2, a2, 24 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: sw a0, 0(a3) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_lo_noext_hi_packh: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a1, a2, a2 +; RV64ZBKB-NEXT: slli a1, a1, 16 +; RV64ZBKB-NEXT: or a0, a1, a0 +; RV64ZBKB-NEXT: sw a0, 0(a3) +; RV64ZBKB-NEXT: ret + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = shl i32 %c, 8 + %e = or i32 %c, %d + %f = shl i32 %e, 16 + %g = or i32 %f, %a + store i32 %g, ptr %p + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vploadff.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vploadff.ll new file mode 100644 index 0000000..5b01976 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vploadff.ll @@ -0,0 +1,586 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +define { <2 x i8>, i32 } @vploadff_v2i8(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i8>, i32 } @llvm.vp.load.ff.v2i8.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x i8>, i32 } %load +} + +define { <2 x i8>, i32 } @vploadff_v2i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i8>, i32 } @llvm.vp.load.ff.v2i8.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x i8>, i32 } %load +} + +define { <4 x i8>, i32 } @vploadff_v4i8(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i8>, i32 } @llvm.vp.load.ff.v4i8.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x i8>, i32 } %load +} + +define { <4 x i8>, i32 } @vploadff_v4i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i8>, i32 } @llvm.vp.load.ff.v4i8.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x i8>, i32 } %load +} + +define { <8 x i8>, i32 } @vploadff_v8i8(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i8>, i32 } @llvm.vp.load.ff.v8i8.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x i8>, i32 } %load +} + +define { <8 x i8>, i32 } @vploadff_v8i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i8>, i32 } @llvm.vp.load.ff.v8i8.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x i8>, i32 } %load +} + +define { <2 x i16>, i32 } @vploadff_v2i16(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i16>, i32 } @llvm.vp.load.ff.v2i16.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x i16>, i32 } %load +} + +define { <2 x i16>, i32 } @vploadff_v2i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i16>, i32 } @llvm.vp.load.ff.v2i16.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x i16>, i32 } %load +} + +define { <4 x i16>, i32 } @vploadff_v4i16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i16>, i32 } @llvm.vp.load.ff.v4i16.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x i16>, i32 } %load +} + +define { <4 x i16>, i32 } @vploadff_v4i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i16>, i32 } @llvm.vp.load.ff.v4i16.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x i16>, i32 } %load +} + +define { <8 x i16>, i32 } @vploadff_v8i16(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i16>, i32 } @llvm.vp.load.ff.v8i16.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x i16>, i32 } %load +} + +define { <8 x i16>, i32 } @vploadff_v8i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i16>, i32 } @llvm.vp.load.ff.v8i16.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x i16>, i32 } %load +} + +define { <2 x i32>, i32 } @vploadff_v2i32(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i32>, i32 } @llvm.vp.load.ff.v2i32.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x i32>, i32 } %load +} + +define { <2 x i32>, i32 } @vploadff_v2i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i32>, i32 } @llvm.vp.load.ff.v2i32.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x i32>, i32 } %load +} + +define { <4 x i32>, i32 } @vploadff_v4i32(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i32>, i32 } @llvm.vp.load.ff.v4i32.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x i32>, i32 } %load +} + +define { <4 x i32>, i32 } @vploadff_v4i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i32>, i32 } @llvm.vp.load.ff.v4i32.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x i32>, i32 } %load +} + +define { <8 x i32>, i32 } @vploadff_v8i32(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i32>, i32 } @llvm.vp.load.ff.v8i32.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x i32>, i32 } %load +} + +define { <8 x i32>, i32 } @vploadff_v8i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i32>, i32 } @llvm.vp.load.ff.v8i32.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x i32>, i32 } %load +} + +define { <2 x i64>, i32 } @vploadff_v2i64(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i64>, i32 } @llvm.vp.load.ff.v2i64.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x i64>, i32 } %load +} + +define { <2 x i64>, i32 } @vploadff_v2i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i64>, i32 } @llvm.vp.load.ff.v2i64.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x i64>, i32 } %load +} + +define { <4 x i64>, i32 } @vploadff_v4i64(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i64>, i32 } @llvm.vp.load.ff.v4i64.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x i64>, i32 } %load +} + +define { <4 x i64>, i32 } @vploadff_v4i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i64>, i32 } @llvm.vp.load.ff.v4i64.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x i64>, i32 } %load +} + +define { <8 x i64>, i32 } @vploadff_v8i64(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i64>, i32 } @llvm.vp.load.ff.v8i64.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x i64>, i32 } %load +} + +define { <8 x i64>, i32 } @vploadff_v8i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i64>, i32 } @llvm.vp.load.ff.v8i64.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x i64>, i32 } %load +} + +define { <32 x i64>, i32 } @vploadff_v32i64(ptr %ptr, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v32i64: +; CHECK: # %bb.0: +; CHECK-NEXT: li a3, 16 +; CHECK-NEXT: bltu a2, a3, .LBB24_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: .LBB24_2: +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a1), v0.t +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: sw a1, 256(a0) +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: ret + %load = call { <32 x i64>, i32 } @llvm.vp.load.ff.v32i64.p0(ptr %ptr, <32 x i1> %m, i32 %evl) + ret { <32 x i64>, i32 } %load +} + +define { <32 x i64>, i32 } @vploadff_v32i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v32i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: li a3, 16 +; CHECK-NEXT: bltu a2, a3, .LBB25_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a1) +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: sw a1, 256(a0) +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: ret + %load = call { <32 x i64>, i32 } @llvm.vp.load.ff.v32i64.p0(ptr %ptr, <32 x i1> splat (i1 true), i32 %evl) + ret { <32 x i64>, i32 } %load +} + +define { <2 x half>, i32 } @vploadff_v2f16(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x half>, i32 } @llvm.vp.load.ff.v2f16.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x half>, i32 } %load +} + +define { <2 x half>, i32 } @vploadff_v2f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x half>, i32 } @llvm.vp.load.ff.v2f16.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x half>, i32 } %load +} + +define { <4 x half>, i32 } @vploadff_v4f16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x half>, i32 } @llvm.vp.load.ff.v4f16.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x half>, i32 } %load +} + +define { <4 x half>, i32 } @vploadff_v4f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x half>, i32 } @llvm.vp.load.ff.v4f16.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x half>, i32 } %load +} + +define { <8 x half>, i32 } @vploadff_v8f16(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x half>, i32 } @llvm.vp.load.ff.v8f16.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x half>, i32 } %load +} + +define { <8 x half>, i32 } @vploadff_v8f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x half>, i32 } @llvm.vp.load.ff.v8f16.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x half>, i32 } %load +} + +define { <2 x float>, i32 } @vploadff_v2f32(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x float>, i32 } @llvm.vp.load.ff.v2f32.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x float>, i32 } %load +} + +define { <2 x float>, i32 } @vploadff_v2f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x float>, i32 } @llvm.vp.load.ff.v2f32.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x float>, i32 } %load +} + +define { <4 x float>, i32 } @vploadff_v4f32(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x float>, i32 } @llvm.vp.load.ff.v4f32.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x float>, i32 } %load +} + +define { <4 x float>, i32 } @vploadff_v4f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x float>, i32 } @llvm.vp.load.ff.v4f32.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x float>, i32 } %load +} + +define { <8 x float>, i32 } @vploadff_v8f32(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x float>, i32 } @llvm.vp.load.ff.v8f32.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x float>, i32 } %load +} + +define { <8 x float>, i32 } @vploadff_v8f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x float>, i32 } @llvm.vp.load.ff.v8f32.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x float>, i32 } %load +} + +define { <2 x double>, i32 } @vploadff_v2f64(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x double>, i32 } @llvm.vp.load.ff.v2f64.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x double>, i32 } %load +} + +define { <2 x double>, i32 } @vploadff_v2f64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2f64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x double>, i32 } @llvm.vp.load.ff.v2f64.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x double>, i32 } %load +} + +define { <4 x double>, i32 } @vploadff_v4f64(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x double>, i32 } @llvm.vp.load.ff.v4f64.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x double>, i32 } %load +} + +define { <4 x double>, i32 } @vploadff_v4f64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4f64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x double>, i32 } @llvm.vp.load.ff.v4f64.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x double>, i32 } %load +} + +define { <8 x double>, i32 } @vploadff_v8f64(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x double>, i32 } @llvm.vp.load.ff.v8f64.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x double>, i32 } %load +} + +define { <8 x double>, i32 } @vploadff_v8f64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8f64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x double>, i32 } @llvm.vp.load.ff.v8f64.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x double>, i32 } %load +} + +define { <2 x bfloat>, i32 } @vploadff_v2bf16(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x bfloat>, i32 } @llvm.vp.load.ff.v2bf16.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x bfloat>, i32 } %load +} + +define { <2 x bfloat>, i32 } @vploadff_v2bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x bfloat>, i32 } @llvm.vp.load.ff.v2bf16.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x bfloat>, i32 } %load +} + +define { <4 x bfloat>, i32 } @vploadff_v4bf16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x bfloat>, i32 } @llvm.vp.load.ff.v4bf16.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x bfloat>, i32 } %load +} + +define { <4 x bfloat>, i32 } @vploadff_v4bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x bfloat>, i32 } @llvm.vp.load.ff.v4bf16.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x bfloat>, i32 } %load +} + +define { <8 x bfloat>, i32 } @vploadff_v8bf16(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x bfloat>, i32 } @llvm.vp.load.ff.v8bf16.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x bfloat>, i32 } %load +} + +define { <8 x bfloat>, i32 } @vploadff_v8bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x bfloat>, i32 } @llvm.vp.load.ff.v8bf16.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x bfloat>, i32 } %load +} + +define { <7 x i8>, i32 } @vploadff_v7i8(ptr %ptr, <7 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v7i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <7 x i8>, i32 } @llvm.vp.load.ff.v7i8.p0(ptr %ptr, <7 x i1> %m, i32 %evl) + ret { <7 x i8>, i32 } %load +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fp4-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fp4-bitcast.ll new file mode 100644 index 0000000..ac3cd84 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fp4-bitcast.ll @@ -0,0 +1,33 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=riscv64 -mattr='+v' < %s | FileCheck %s + +define <2 x i8> @fp4(<4 x i4> %0) nounwind { +; CHECK-LABEL: fp4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: vslidedown.vi v9, v8, 1 +; CHECK-NEXT: vmv.x.s a1, v9 +; CHECK-NEXT: vslidedown.vi v9, v8, 2 +; CHECK-NEXT: vslidedown.vi v8, v8, 3 +; CHECK-NEXT: andi a0, a0, 15 +; CHECK-NEXT: vmv.x.s a2, v9 +; CHECK-NEXT: andi a1, a1, 15 +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: or a0, a0, a1 +; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: andi a2, a2, 15 +; CHECK-NEXT: slli a1, a1, 12 +; CHECK-NEXT: slli a2, a2, 8 +; CHECK-NEXT: or a1, a2, a1 +; CHECK-NEXT: or a0, a0, a1 +; CHECK-NEXT: sh a0, 14(sp) +; CHECK-NEXT: addi a0, sp, 14 +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %2 = bitcast <4 x i4> %0 to <2 x i8> + ret <2 x i8> %2 +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll index f481f9c..9ef7f94 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll @@ -89,17 +89,17 @@ entry: define <2 x i32> @ustest_f64i32(<2 x double> %x) { ; CHECK-NOV-LABEL: ustest_f64i32: ; CHECK-NOV: # %bb.0: # %entry -; CHECK-NOV-NEXT: fcvt.l.d a1, fa1, rtz +; CHECK-NOV-NEXT: fcvt.l.d a0, fa0, rtz ; CHECK-NOV-NEXT: li a2, -1 ; CHECK-NOV-NEXT: srli a2, a2, 32 -; CHECK-NOV-NEXT: fcvt.l.d a0, fa0, rtz -; CHECK-NOV-NEXT: blt a1, a2, .LBB2_2 +; CHECK-NOV-NEXT: fcvt.l.d a1, fa1, rtz +; CHECK-NOV-NEXT: blt a0, a2, .LBB2_2 ; CHECK-NOV-NEXT: # %bb.1: # %entry -; CHECK-NOV-NEXT: mv a1, a2 +; CHECK-NOV-NEXT: mv a0, a2 ; CHECK-NOV-NEXT: .LBB2_2: # %entry -; CHECK-NOV-NEXT: blt a0, a2, .LBB2_4 +; CHECK-NOV-NEXT: blt a1, a2, .LBB2_4 ; CHECK-NOV-NEXT: # %bb.3: # %entry -; CHECK-NOV-NEXT: mv a0, a2 +; CHECK-NOV-NEXT: mv a1, a2 ; CHECK-NOV-NEXT: .LBB2_4: # %entry ; CHECK-NOV-NEXT: sgtz a2, a1 ; CHECK-NOV-NEXT: sgtz a3, a0 @@ -254,50 +254,50 @@ entry: define <4 x i32> @ustest_f32i32(<4 x float> %x) { ; CHECK-NOV-LABEL: ustest_f32i32: ; CHECK-NOV: # %bb.0: # %entry -; CHECK-NOV-NEXT: fcvt.l.s a1, fa3, rtz -; CHECK-NOV-NEXT: li a4, -1 -; CHECK-NOV-NEXT: srli a4, a4, 32 -; CHECK-NOV-NEXT: fcvt.l.s a2, fa2, rtz -; CHECK-NOV-NEXT: bge a1, a4, .LBB5_6 +; CHECK-NOV-NEXT: fcvt.l.s a1, fa0, rtz +; CHECK-NOV-NEXT: li a5, -1 +; CHECK-NOV-NEXT: srli a5, a5, 32 +; CHECK-NOV-NEXT: fcvt.l.s a2, fa1, rtz +; CHECK-NOV-NEXT: bge a1, a5, .LBB5_6 ; CHECK-NOV-NEXT: # %bb.1: # %entry -; CHECK-NOV-NEXT: fcvt.l.s a3, fa1, rtz -; CHECK-NOV-NEXT: bge a2, a4, .LBB5_7 +; CHECK-NOV-NEXT: fcvt.l.s a3, fa2, rtz +; CHECK-NOV-NEXT: bge a2, a5, .LBB5_7 ; CHECK-NOV-NEXT: .LBB5_2: # %entry -; CHECK-NOV-NEXT: fcvt.l.s a5, fa0, rtz -; CHECK-NOV-NEXT: bge a3, a4, .LBB5_8 +; CHECK-NOV-NEXT: fcvt.l.s a4, fa3, rtz +; CHECK-NOV-NEXT: bge a3, a5, .LBB5_8 ; CHECK-NOV-NEXT: .LBB5_3: # %entry -; CHECK-NOV-NEXT: blt a5, a4, .LBB5_5 +; CHECK-NOV-NEXT: blt a4, a5, .LBB5_5 ; CHECK-NOV-NEXT: .LBB5_4: # %entry -; CHECK-NOV-NEXT: mv a5, a4 +; CHECK-NOV-NEXT: mv a4, a5 ; CHECK-NOV-NEXT: .LBB5_5: # %entry -; CHECK-NOV-NEXT: sgtz a4, a1 -; CHECK-NOV-NEXT: sgtz a6, a2 -; CHECK-NOV-NEXT: sgtz a7, a3 -; CHECK-NOV-NEXT: sgtz t0, a5 +; CHECK-NOV-NEXT: sgtz a5, a4 +; CHECK-NOV-NEXT: sgtz a6, a3 +; CHECK-NOV-NEXT: sgtz a7, a2 +; CHECK-NOV-NEXT: sgtz t0, a1 ; CHECK-NOV-NEXT: neg t0, t0 ; CHECK-NOV-NEXT: neg a7, a7 ; CHECK-NOV-NEXT: neg a6, a6 -; CHECK-NOV-NEXT: neg a4, a4 -; CHECK-NOV-NEXT: and a5, t0, a5 -; CHECK-NOV-NEXT: and a3, a7, a3 -; CHECK-NOV-NEXT: and a2, a6, a2 -; CHECK-NOV-NEXT: and a1, a4, a1 -; CHECK-NOV-NEXT: sw a5, 0(a0) -; CHECK-NOV-NEXT: sw a3, 4(a0) -; CHECK-NOV-NEXT: sw a2, 8(a0) -; CHECK-NOV-NEXT: sw a1, 12(a0) +; CHECK-NOV-NEXT: neg a5, a5 +; CHECK-NOV-NEXT: and a1, t0, a1 +; CHECK-NOV-NEXT: and a2, a7, a2 +; CHECK-NOV-NEXT: and a3, a6, a3 +; CHECK-NOV-NEXT: and a4, a5, a4 +; CHECK-NOV-NEXT: sw a1, 0(a0) +; CHECK-NOV-NEXT: sw a2, 4(a0) +; CHECK-NOV-NEXT: sw a3, 8(a0) +; CHECK-NOV-NEXT: sw a4, 12(a0) ; CHECK-NOV-NEXT: ret ; CHECK-NOV-NEXT: .LBB5_6: # %entry -; CHECK-NOV-NEXT: mv a1, a4 -; CHECK-NOV-NEXT: fcvt.l.s a3, fa1, rtz -; CHECK-NOV-NEXT: blt a2, a4, .LBB5_2 +; CHECK-NOV-NEXT: mv a1, a5 +; CHECK-NOV-NEXT: fcvt.l.s a3, fa2, rtz +; CHECK-NOV-NEXT: blt a2, a5, .LBB5_2 ; CHECK-NOV-NEXT: .LBB5_7: # %entry -; CHECK-NOV-NEXT: mv a2, a4 -; CHECK-NOV-NEXT: fcvt.l.s a5, fa0, rtz -; CHECK-NOV-NEXT: blt a3, a4, .LBB5_3 +; CHECK-NOV-NEXT: mv a2, a5 +; CHECK-NOV-NEXT: fcvt.l.s a4, fa3, rtz +; CHECK-NOV-NEXT: blt a3, a5, .LBB5_3 ; CHECK-NOV-NEXT: .LBB5_8: # %entry -; CHECK-NOV-NEXT: mv a3, a4 -; CHECK-NOV-NEXT: bge a5, a4, .LBB5_4 +; CHECK-NOV-NEXT: mv a3, a5 +; CHECK-NOV-NEXT: bge a4, a5, .LBB5_4 ; CHECK-NOV-NEXT: j .LBB5_5 ; ; CHECK-V-LABEL: ustest_f32i32: @@ -720,8 +720,8 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) { ; CHECK-NOV-NEXT: .cfi_offset fs2, -64 ; CHECK-NOV-NEXT: .cfi_remember_state ; CHECK-NOV-NEXT: lhu s1, 0(a1) -; CHECK-NOV-NEXT: lhu s2, 8(a1) -; CHECK-NOV-NEXT: lhu a2, 16(a1) +; CHECK-NOV-NEXT: lhu a2, 8(a1) +; CHECK-NOV-NEXT: lhu s2, 16(a1) ; CHECK-NOV-NEXT: lhu s3, 24(a1) ; CHECK-NOV-NEXT: mv s0, a0 ; CHECK-NOV-NEXT: fmv.w.x fa0, a2 @@ -730,43 +730,43 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) { ; CHECK-NOV-NEXT: fmv.w.x fa0, s2 ; CHECK-NOV-NEXT: call __extendhfsf2 ; CHECK-NOV-NEXT: fmv.s fs1, fa0 -; CHECK-NOV-NEXT: fmv.w.x fa0, s1 +; CHECK-NOV-NEXT: fmv.w.x fa0, s3 ; CHECK-NOV-NEXT: call __extendhfsf2 ; CHECK-NOV-NEXT: fmv.s fs0, fa0 -; CHECK-NOV-NEXT: fmv.w.x fa0, s3 +; CHECK-NOV-NEXT: fmv.w.x fa0, s1 ; CHECK-NOV-NEXT: fcvt.l.s s1, fs2, rtz ; CHECK-NOV-NEXT: call __extendhfsf2 ; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz -; CHECK-NOV-NEXT: li a2, -1 -; CHECK-NOV-NEXT: srli a2, a2, 32 -; CHECK-NOV-NEXT: bge a0, a2, .LBB8_6 +; CHECK-NOV-NEXT: li a3, -1 +; CHECK-NOV-NEXT: srli a3, a3, 32 +; CHECK-NOV-NEXT: bge a0, a3, .LBB8_6 ; CHECK-NOV-NEXT: # %bb.1: # %entry ; CHECK-NOV-NEXT: fcvt.l.s a1, fs1, rtz -; CHECK-NOV-NEXT: bge s1, a2, .LBB8_7 +; CHECK-NOV-NEXT: bge s1, a3, .LBB8_7 ; CHECK-NOV-NEXT: .LBB8_2: # %entry -; CHECK-NOV-NEXT: fcvt.l.s a3, fs0, rtz -; CHECK-NOV-NEXT: bge a1, a2, .LBB8_8 +; CHECK-NOV-NEXT: fcvt.l.s a2, fs0, rtz +; CHECK-NOV-NEXT: bge a1, a3, .LBB8_8 ; CHECK-NOV-NEXT: .LBB8_3: # %entry -; CHECK-NOV-NEXT: blt a3, a2, .LBB8_5 +; CHECK-NOV-NEXT: blt a2, a3, .LBB8_5 ; CHECK-NOV-NEXT: .LBB8_4: # %entry -; CHECK-NOV-NEXT: mv a3, a2 +; CHECK-NOV-NEXT: mv a2, a3 ; CHECK-NOV-NEXT: .LBB8_5: # %entry -; CHECK-NOV-NEXT: sgtz a2, a0 -; CHECK-NOV-NEXT: sgtz a4, s1 -; CHECK-NOV-NEXT: sgtz a5, a1 -; CHECK-NOV-NEXT: sgtz a6, a3 +; CHECK-NOV-NEXT: sgtz a3, a2 +; CHECK-NOV-NEXT: sgtz a4, a1 +; CHECK-NOV-NEXT: sgtz a5, s1 +; CHECK-NOV-NEXT: sgtz a6, a0 ; CHECK-NOV-NEXT: neg a6, a6 ; CHECK-NOV-NEXT: neg a5, a5 ; CHECK-NOV-NEXT: neg a4, a4 -; CHECK-NOV-NEXT: neg a2, a2 -; CHECK-NOV-NEXT: and a3, a6, a3 -; CHECK-NOV-NEXT: and a1, a5, a1 -; CHECK-NOV-NEXT: and a4, a4, s1 -; CHECK-NOV-NEXT: and a0, a2, a0 -; CHECK-NOV-NEXT: sw a3, 0(s0) -; CHECK-NOV-NEXT: sw a1, 4(s0) -; CHECK-NOV-NEXT: sw a4, 8(s0) -; CHECK-NOV-NEXT: sw a0, 12(s0) +; CHECK-NOV-NEXT: neg a3, a3 +; CHECK-NOV-NEXT: and a0, a6, a0 +; CHECK-NOV-NEXT: and a5, a5, s1 +; CHECK-NOV-NEXT: and a1, a4, a1 +; CHECK-NOV-NEXT: and a2, a3, a2 +; CHECK-NOV-NEXT: sw a0, 0(s0) +; CHECK-NOV-NEXT: sw a5, 4(s0) +; CHECK-NOV-NEXT: sw a1, 8(s0) +; CHECK-NOV-NEXT: sw a2, 12(s0) ; CHECK-NOV-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -788,16 +788,16 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) { ; CHECK-NOV-NEXT: ret ; CHECK-NOV-NEXT: .LBB8_6: # %entry ; CHECK-NOV-NEXT: .cfi_restore_state -; CHECK-NOV-NEXT: mv a0, a2 +; CHECK-NOV-NEXT: mv a0, a3 ; CHECK-NOV-NEXT: fcvt.l.s a1, fs1, rtz -; CHECK-NOV-NEXT: blt s1, a2, .LBB8_2 +; CHECK-NOV-NEXT: blt s1, a3, .LBB8_2 ; CHECK-NOV-NEXT: .LBB8_7: # %entry -; CHECK-NOV-NEXT: mv s1, a2 -; CHECK-NOV-NEXT: fcvt.l.s a3, fs0, rtz -; CHECK-NOV-NEXT: blt a1, a2, .LBB8_3 +; CHECK-NOV-NEXT: mv s1, a3 +; CHECK-NOV-NEXT: fcvt.l.s a2, fs0, rtz +; CHECK-NOV-NEXT: blt a1, a3, .LBB8_3 ; CHECK-NOV-NEXT: .LBB8_8: # %entry -; CHECK-NOV-NEXT: mv a1, a2 -; CHECK-NOV-NEXT: bge a3, a2, .LBB8_4 +; CHECK-NOV-NEXT: mv a1, a3 +; CHECK-NOV-NEXT: bge a2, a3, .LBB8_4 ; CHECK-NOV-NEXT: j .LBB8_5 ; ; CHECK-V-LABEL: ustest_f16i32: @@ -977,17 +977,17 @@ entry: define <2 x i16> @ustest_f64i16(<2 x double> %x) { ; CHECK-NOV-LABEL: ustest_f64i16: ; CHECK-NOV: # %bb.0: # %entry -; CHECK-NOV-NEXT: fcvt.w.d a1, fa1, rtz +; CHECK-NOV-NEXT: fcvt.w.d a0, fa0, rtz ; CHECK-NOV-NEXT: lui a2, 16 ; CHECK-NOV-NEXT: addi a2, a2, -1 -; CHECK-NOV-NEXT: fcvt.w.d a0, fa0, rtz -; CHECK-NOV-NEXT: blt a1, a2, .LBB11_2 +; CHECK-NOV-NEXT: fcvt.w.d a1, fa1, rtz +; CHECK-NOV-NEXT: blt a0, a2, .LBB11_2 ; CHECK-NOV-NEXT: # %bb.1: # %entry -; CHECK-NOV-NEXT: mv a1, a2 +; CHECK-NOV-NEXT: mv a0, a2 ; CHECK-NOV-NEXT: .LBB11_2: # %entry -; CHECK-NOV-NEXT: blt a0, a2, .LBB11_4 +; CHECK-NOV-NEXT: blt a1, a2, .LBB11_4 ; CHECK-NOV-NEXT: # %bb.3: # %entry -; CHECK-NOV-NEXT: mv a0, a2 +; CHECK-NOV-NEXT: mv a1, a2 ; CHECK-NOV-NEXT: .LBB11_4: # %entry ; CHECK-NOV-NEXT: sgtz a2, a1 ; CHECK-NOV-NEXT: sgtz a3, a0 @@ -1146,50 +1146,50 @@ entry: define <4 x i16> @ustest_f32i16(<4 x float> %x) { ; CHECK-NOV-LABEL: ustest_f32i16: ; CHECK-NOV: # %bb.0: # %entry -; CHECK-NOV-NEXT: fcvt.w.s a1, fa3, rtz -; CHECK-NOV-NEXT: lui a4, 16 -; CHECK-NOV-NEXT: addi a4, a4, -1 -; CHECK-NOV-NEXT: fcvt.w.s a2, fa2, rtz -; CHECK-NOV-NEXT: bge a1, a4, .LBB14_6 +; CHECK-NOV-NEXT: fcvt.w.s a1, fa0, rtz +; CHECK-NOV-NEXT: lui a5, 16 +; CHECK-NOV-NEXT: addi a5, a5, -1 +; CHECK-NOV-NEXT: fcvt.w.s a2, fa1, rtz +; CHECK-NOV-NEXT: bge a1, a5, .LBB14_6 ; CHECK-NOV-NEXT: # %bb.1: # %entry -; CHECK-NOV-NEXT: fcvt.w.s a3, fa1, rtz -; CHECK-NOV-NEXT: bge a2, a4, .LBB14_7 +; CHECK-NOV-NEXT: fcvt.w.s a3, fa2, rtz +; CHECK-NOV-NEXT: bge a2, a5, .LBB14_7 ; CHECK-NOV-NEXT: .LBB14_2: # %entry -; CHECK-NOV-NEXT: fcvt.w.s a5, fa0, rtz -; CHECK-NOV-NEXT: bge a3, a4, .LBB14_8 +; CHECK-NOV-NEXT: fcvt.w.s a4, fa3, rtz +; CHECK-NOV-NEXT: bge a3, a5, .LBB14_8 ; CHECK-NOV-NEXT: .LBB14_3: # %entry -; CHECK-NOV-NEXT: blt a5, a4, .LBB14_5 +; CHECK-NOV-NEXT: blt a4, a5, .LBB14_5 ; CHECK-NOV-NEXT: .LBB14_4: # %entry -; CHECK-NOV-NEXT: mv a5, a4 +; CHECK-NOV-NEXT: mv a4, a5 ; CHECK-NOV-NEXT: .LBB14_5: # %entry -; CHECK-NOV-NEXT: sgtz a4, a1 -; CHECK-NOV-NEXT: sgtz a6, a2 -; CHECK-NOV-NEXT: sgtz a7, a3 -; CHECK-NOV-NEXT: sgtz t0, a5 +; CHECK-NOV-NEXT: sgtz a5, a4 +; CHECK-NOV-NEXT: sgtz a6, a3 +; CHECK-NOV-NEXT: sgtz a7, a2 +; CHECK-NOV-NEXT: sgtz t0, a1 ; CHECK-NOV-NEXT: neg t0, t0 ; CHECK-NOV-NEXT: neg a7, a7 ; CHECK-NOV-NEXT: neg a6, a6 -; CHECK-NOV-NEXT: neg a4, a4 -; CHECK-NOV-NEXT: and a5, t0, a5 -; CHECK-NOV-NEXT: and a3, a7, a3 -; CHECK-NOV-NEXT: and a2, a6, a2 -; CHECK-NOV-NEXT: and a1, a4, a1 -; CHECK-NOV-NEXT: sh a5, 0(a0) -; CHECK-NOV-NEXT: sh a3, 2(a0) -; CHECK-NOV-NEXT: sh a2, 4(a0) -; CHECK-NOV-NEXT: sh a1, 6(a0) +; CHECK-NOV-NEXT: neg a5, a5 +; CHECK-NOV-NEXT: and a1, t0, a1 +; CHECK-NOV-NEXT: and a2, a7, a2 +; CHECK-NOV-NEXT: and a3, a6, a3 +; CHECK-NOV-NEXT: and a4, a5, a4 +; CHECK-NOV-NEXT: sh a1, 0(a0) +; CHECK-NOV-NEXT: sh a2, 2(a0) +; CHECK-NOV-NEXT: sh a3, 4(a0) +; CHECK-NOV-NEXT: sh a4, 6(a0) ; CHECK-NOV-NEXT: ret ; CHECK-NOV-NEXT: .LBB14_6: # %entry -; CHECK-NOV-NEXT: mv a1, a4 -; CHECK-NOV-NEXT: fcvt.w.s a3, fa1, rtz -; CHECK-NOV-NEXT: blt a2, a4, .LBB14_2 +; CHECK-NOV-NEXT: mv a1, a5 +; CHECK-NOV-NEXT: fcvt.w.s a3, fa2, rtz +; CHECK-NOV-NEXT: blt a2, a5, .LBB14_2 ; CHECK-NOV-NEXT: .LBB14_7: # %entry -; CHECK-NOV-NEXT: mv a2, a4 -; CHECK-NOV-NEXT: fcvt.w.s a5, fa0, rtz -; CHECK-NOV-NEXT: blt a3, a4, .LBB14_3 +; CHECK-NOV-NEXT: mv a2, a5 +; CHECK-NOV-NEXT: fcvt.w.s a4, fa3, rtz +; CHECK-NOV-NEXT: blt a3, a5, .LBB14_3 ; CHECK-NOV-NEXT: .LBB14_8: # %entry -; CHECK-NOV-NEXT: mv a3, a4 -; CHECK-NOV-NEXT: bge a5, a4, .LBB14_4 +; CHECK-NOV-NEXT: mv a3, a5 +; CHECK-NOV-NEXT: bge a4, a5, .LBB14_4 ; CHECK-NOV-NEXT: j .LBB14_5 ; ; CHECK-V-LABEL: ustest_f32i16: @@ -1974,72 +1974,72 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) { ; CHECK-NOV-NEXT: .cfi_remember_state ; CHECK-NOV-NEXT: lhu s1, 32(a1) ; CHECK-NOV-NEXT: lhu s2, 40(a1) -; CHECK-NOV-NEXT: lhu a2, 48(a1) -; CHECK-NOV-NEXT: lhu s3, 56(a1) -; CHECK-NOV-NEXT: lhu s4, 0(a1) -; CHECK-NOV-NEXT: lhu s5, 8(a1) +; CHECK-NOV-NEXT: lhu s3, 48(a1) +; CHECK-NOV-NEXT: lhu s4, 56(a1) +; CHECK-NOV-NEXT: lhu s5, 0(a1) +; CHECK-NOV-NEXT: lhu a2, 8(a1) ; CHECK-NOV-NEXT: lhu s6, 16(a1) ; CHECK-NOV-NEXT: lhu s7, 24(a1) ; CHECK-NOV-NEXT: mv s0, a0 ; CHECK-NOV-NEXT: fmv.w.x fa0, a2 ; CHECK-NOV-NEXT: call __extendhfsf2 ; CHECK-NOV-NEXT: fmv.s fs6, fa0 -; CHECK-NOV-NEXT: fmv.w.x fa0, s2 +; CHECK-NOV-NEXT: fmv.w.x fa0, s6 ; CHECK-NOV-NEXT: call __extendhfsf2 ; CHECK-NOV-NEXT: fmv.s fs5, fa0 -; CHECK-NOV-NEXT: fmv.w.x fa0, s1 +; CHECK-NOV-NEXT: fmv.w.x fa0, s7 ; CHECK-NOV-NEXT: call __extendhfsf2 ; CHECK-NOV-NEXT: fmv.s fs4, fa0 -; CHECK-NOV-NEXT: fmv.w.x fa0, s7 +; CHECK-NOV-NEXT: fmv.w.x fa0, s1 ; CHECK-NOV-NEXT: call __extendhfsf2 ; CHECK-NOV-NEXT: fmv.s fs3, fa0 -; CHECK-NOV-NEXT: fmv.w.x fa0, s6 +; CHECK-NOV-NEXT: fmv.w.x fa0, s2 ; CHECK-NOV-NEXT: call __extendhfsf2 ; CHECK-NOV-NEXT: fmv.s fs2, fa0 -; CHECK-NOV-NEXT: fmv.w.x fa0, s5 +; CHECK-NOV-NEXT: fmv.w.x fa0, s3 ; CHECK-NOV-NEXT: call __extendhfsf2 ; CHECK-NOV-NEXT: fmv.s fs1, fa0 ; CHECK-NOV-NEXT: fmv.w.x fa0, s4 ; CHECK-NOV-NEXT: call __extendhfsf2 ; CHECK-NOV-NEXT: fmv.s fs0, fa0 -; CHECK-NOV-NEXT: fmv.w.x fa0, s3 +; CHECK-NOV-NEXT: fmv.w.x fa0, s5 ; CHECK-NOV-NEXT: fcvt.l.s s1, fs6, rtz ; CHECK-NOV-NEXT: call __extendhfsf2 ; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz -; CHECK-NOV-NEXT: lui a4, 16 -; CHECK-NOV-NEXT: addi a4, a4, -1 -; CHECK-NOV-NEXT: bge a0, a4, .LBB17_10 +; CHECK-NOV-NEXT: lui a5, 16 +; CHECK-NOV-NEXT: addi a5, a5, -1 +; CHECK-NOV-NEXT: bge a0, a5, .LBB17_10 ; CHECK-NOV-NEXT: # %bb.1: # %entry ; CHECK-NOV-NEXT: fcvt.l.s a1, fs5, rtz -; CHECK-NOV-NEXT: bge s1, a4, .LBB17_11 +; CHECK-NOV-NEXT: bge s1, a5, .LBB17_11 ; CHECK-NOV-NEXT: .LBB17_2: # %entry ; CHECK-NOV-NEXT: fcvt.l.s a2, fs4, rtz -; CHECK-NOV-NEXT: bge a1, a4, .LBB17_12 +; CHECK-NOV-NEXT: bge a1, a5, .LBB17_12 ; CHECK-NOV-NEXT: .LBB17_3: # %entry ; CHECK-NOV-NEXT: fcvt.l.s a3, fs3, rtz -; CHECK-NOV-NEXT: bge a2, a4, .LBB17_13 +; CHECK-NOV-NEXT: bge a2, a5, .LBB17_13 ; CHECK-NOV-NEXT: .LBB17_4: # %entry -; CHECK-NOV-NEXT: fcvt.l.s a5, fs2, rtz -; CHECK-NOV-NEXT: bge a3, a4, .LBB17_14 +; CHECK-NOV-NEXT: fcvt.l.s a4, fs2, rtz +; CHECK-NOV-NEXT: bge a3, a5, .LBB17_14 ; CHECK-NOV-NEXT: .LBB17_5: # %entry ; CHECK-NOV-NEXT: fcvt.l.s a6, fs1, rtz -; CHECK-NOV-NEXT: bge a5, a4, .LBB17_15 +; CHECK-NOV-NEXT: bge a4, a5, .LBB17_15 ; CHECK-NOV-NEXT: .LBB17_6: # %entry ; CHECK-NOV-NEXT: fcvt.l.s a7, fs0, rtz -; CHECK-NOV-NEXT: bge a6, a4, .LBB17_16 +; CHECK-NOV-NEXT: bge a6, a5, .LBB17_16 ; CHECK-NOV-NEXT: .LBB17_7: # %entry -; CHECK-NOV-NEXT: blt a7, a4, .LBB17_9 +; CHECK-NOV-NEXT: blt a7, a5, .LBB17_9 ; CHECK-NOV-NEXT: .LBB17_8: # %entry -; CHECK-NOV-NEXT: mv a7, a4 +; CHECK-NOV-NEXT: mv a7, a5 ; CHECK-NOV-NEXT: .LBB17_9: # %entry -; CHECK-NOV-NEXT: sgtz a4, a0 -; CHECK-NOV-NEXT: sgtz t0, s1 -; CHECK-NOV-NEXT: sgtz t1, a1 -; CHECK-NOV-NEXT: sgtz t2, a2 -; CHECK-NOV-NEXT: sgtz t3, a3 -; CHECK-NOV-NEXT: sgtz t4, a5 -; CHECK-NOV-NEXT: sgtz t5, a6 -; CHECK-NOV-NEXT: sgtz t6, a7 +; CHECK-NOV-NEXT: sgtz a5, a7 +; CHECK-NOV-NEXT: sgtz t0, a6 +; CHECK-NOV-NEXT: sgtz t1, a4 +; CHECK-NOV-NEXT: sgtz t2, a3 +; CHECK-NOV-NEXT: sgtz t3, a2 +; CHECK-NOV-NEXT: sgtz t4, a1 +; CHECK-NOV-NEXT: sgtz t5, s1 +; CHECK-NOV-NEXT: sgtz t6, a0 ; CHECK-NOV-NEXT: neg t6, t6 ; CHECK-NOV-NEXT: neg t5, t5 ; CHECK-NOV-NEXT: neg t4, t4 @@ -2047,23 +2047,23 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) { ; CHECK-NOV-NEXT: neg t2, t2 ; CHECK-NOV-NEXT: neg t1, t1 ; CHECK-NOV-NEXT: neg t0, t0 -; CHECK-NOV-NEXT: neg a4, a4 -; CHECK-NOV-NEXT: and a7, t6, a7 -; CHECK-NOV-NEXT: and a6, t5, a6 -; CHECK-NOV-NEXT: and a5, t4, a5 -; CHECK-NOV-NEXT: and a3, t3, a3 -; CHECK-NOV-NEXT: and a2, t2, a2 -; CHECK-NOV-NEXT: and a1, t1, a1 -; CHECK-NOV-NEXT: and t0, t0, s1 -; CHECK-NOV-NEXT: and a0, a4, a0 -; CHECK-NOV-NEXT: sh a2, 8(s0) -; CHECK-NOV-NEXT: sh a1, 10(s0) -; CHECK-NOV-NEXT: sh t0, 12(s0) -; CHECK-NOV-NEXT: sh a0, 14(s0) -; CHECK-NOV-NEXT: sh a7, 0(s0) -; CHECK-NOV-NEXT: sh a6, 2(s0) -; CHECK-NOV-NEXT: sh a5, 4(s0) -; CHECK-NOV-NEXT: sh a3, 6(s0) +; CHECK-NOV-NEXT: neg a5, a5 +; CHECK-NOV-NEXT: and a0, t6, a0 +; CHECK-NOV-NEXT: and t5, t5, s1 +; CHECK-NOV-NEXT: and a1, t4, a1 +; CHECK-NOV-NEXT: and a2, t3, a2 +; CHECK-NOV-NEXT: and a3, t2, a3 +; CHECK-NOV-NEXT: and a4, t1, a4 +; CHECK-NOV-NEXT: and a6, t0, a6 +; CHECK-NOV-NEXT: and a5, a5, a7 +; CHECK-NOV-NEXT: sh a3, 8(s0) +; CHECK-NOV-NEXT: sh a4, 10(s0) +; CHECK-NOV-NEXT: sh a6, 12(s0) +; CHECK-NOV-NEXT: sh a5, 14(s0) +; CHECK-NOV-NEXT: sh a0, 0(s0) +; CHECK-NOV-NEXT: sh t5, 2(s0) +; CHECK-NOV-NEXT: sh a1, 4(s0) +; CHECK-NOV-NEXT: sh a2, 6(s0) ; CHECK-NOV-NEXT: ld ra, 120(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s0, 112(sp) # 8-byte Folded Reload ; CHECK-NOV-NEXT: ld s1, 104(sp) # 8-byte Folded Reload @@ -2101,32 +2101,32 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) { ; CHECK-NOV-NEXT: ret ; CHECK-NOV-NEXT: .LBB17_10: # %entry ; CHECK-NOV-NEXT: .cfi_restore_state -; CHECK-NOV-NEXT: mv a0, a4 +; CHECK-NOV-NEXT: mv a0, a5 ; CHECK-NOV-NEXT: fcvt.l.s a1, fs5, rtz -; CHECK-NOV-NEXT: blt s1, a4, .LBB17_2 +; CHECK-NOV-NEXT: blt s1, a5, .LBB17_2 ; CHECK-NOV-NEXT: .LBB17_11: # %entry -; CHECK-NOV-NEXT: mv s1, a4 +; CHECK-NOV-NEXT: mv s1, a5 ; CHECK-NOV-NEXT: fcvt.l.s a2, fs4, rtz -; CHECK-NOV-NEXT: blt a1, a4, .LBB17_3 +; CHECK-NOV-NEXT: blt a1, a5, .LBB17_3 ; CHECK-NOV-NEXT: .LBB17_12: # %entry -; CHECK-NOV-NEXT: mv a1, a4 +; CHECK-NOV-NEXT: mv a1, a5 ; CHECK-NOV-NEXT: fcvt.l.s a3, fs3, rtz -; CHECK-NOV-NEXT: blt a2, a4, .LBB17_4 +; CHECK-NOV-NEXT: blt a2, a5, .LBB17_4 ; CHECK-NOV-NEXT: .LBB17_13: # %entry -; CHECK-NOV-NEXT: mv a2, a4 -; CHECK-NOV-NEXT: fcvt.l.s a5, fs2, rtz -; CHECK-NOV-NEXT: blt a3, a4, .LBB17_5 +; CHECK-NOV-NEXT: mv a2, a5 +; CHECK-NOV-NEXT: fcvt.l.s a4, fs2, rtz +; CHECK-NOV-NEXT: blt a3, a5, .LBB17_5 ; CHECK-NOV-NEXT: .LBB17_14: # %entry -; CHECK-NOV-NEXT: mv a3, a4 +; CHECK-NOV-NEXT: mv a3, a5 ; CHECK-NOV-NEXT: fcvt.l.s a6, fs1, rtz -; CHECK-NOV-NEXT: blt a5, a4, .LBB17_6 +; CHECK-NOV-NEXT: blt a4, a5, .LBB17_6 ; CHECK-NOV-NEXT: .LBB17_15: # %entry -; CHECK-NOV-NEXT: mv a5, a4 +; CHECK-NOV-NEXT: mv a4, a5 ; CHECK-NOV-NEXT: fcvt.l.s a7, fs0, rtz -; CHECK-NOV-NEXT: blt a6, a4, .LBB17_7 +; CHECK-NOV-NEXT: blt a6, a5, .LBB17_7 ; CHECK-NOV-NEXT: .LBB17_16: # %entry -; CHECK-NOV-NEXT: mv a6, a4 -; CHECK-NOV-NEXT: bge a7, a4, .LBB17_8 +; CHECK-NOV-NEXT: mv a6, a5 +; CHECK-NOV-NEXT: bge a7, a5, .LBB17_8 ; CHECK-NOV-NEXT: j .LBB17_9 ; ; CHECK-V-LABEL: ustest_f16i16: diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll index 346e40a..02825b2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -5427,18 +5427,18 @@ for.cond.cleanup: ; preds = %vector.body define void @sink_splat_select_op1(ptr nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_select_op1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: lui a2, 1 -; CHECK-NEXT: add a2, a0, a2 -; CHECK-NEXT: li a3, 42 +; CHECK-NEXT: lui a3, 1 +; CHECK-NEXT: li a2, 42 +; CHECK-NEXT: add a3, a0, a3 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: .LBB117_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vmseq.vx v0, v8, a3 -; CHECK-NEXT: vmerge.vxm v8, v8, a1, v0 -; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: vle32.v v9, (a0) +; CHECK-NEXT: vmseq.vx v0, v9, a2 +; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: bne a0, a2, .LBB117_1 +; CHECK-NEXT: bne a0, a3, .LBB117_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup ; CHECK-NEXT: ret entry: @@ -5472,9 +5472,8 @@ define void @sink_splat_select_op2(ptr nocapture %a, i32 signext %x) { ; CHECK-NEXT: .LBB118_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v9, (a0) -; CHECK-NEXT: vmseq.vx v0, v9, a2 -; CHECK-NEXT: vmerge.vvm v9, v8, v9, v0 -; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: vmsne.vx v0, v9, a2 +; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bne a0, a1, .LBB118_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll index 45f158f..09f42ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll @@ -443,8 +443,8 @@ define <vscale x 1 x i64> @straightline_offset_add(ptr %p, i64 %offset) { ret <vscale x 1 x i64> %x } -define <vscale x 1 x i64> @straightline_offset_disjoint_or(ptr %p, i64 %offset) { -; CHECK-LABEL: @straightline_offset_disjoint_or( +define <vscale x 1 x i64> @straightline_offset_disjoint_or_1(ptr %p) { +; CHECK-LABEL: @straightline_offset_disjoint_or_1( ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 1 ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vp.strided.load.nxv1i64.p0.i64(ptr [[TMP1]], i64 8, <vscale x 1 x i1> splat (i1 true), i32 [[TMP2]]) @@ -464,6 +464,33 @@ define <vscale x 1 x i64> @straightline_offset_disjoint_or(ptr %p, i64 %offset) ret <vscale x 1 x i64> %x } +define <vscale x 1 x i64> @straightline_offset_disjoint_or(ptr %p, i1 %offset) { +; CHECK-LABEL: @straightline_offset_disjoint_or( +; CHECK-NEXT: [[AND:%.*]] = zext i1 [[OFFSET:%.*]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 4, [[AND]] +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vp.strided.load.nxv1i64.p0.i64(ptr [[TMP1]], i64 8, <vscale x 1 x i1> splat (i1 true), i32 [[TMP2]]) +; CHECK-NEXT: [[X:%.*]] = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> splat (i1 true), <vscale x 1 x i64> [[TMP3]], <vscale x 1 x i64> poison, i32 [[TMP2]]) +; CHECK-NEXT: ret <vscale x 1 x i64> [[X]] +; + %step = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64() + %step.shl = shl <vscale x 1 x i64> %step, splat (i64 1) + %add = add <vscale x 1 x i64> %step.shl, splat (i64 4) + %zext = zext i1 %offset to i64 + %splat.insert = insertelement <vscale x 1 x i64> poison, i64 %zext, i64 0 + %splat = shufflevector <vscale x 1 x i64> %splat.insert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer + %offsetv = or disjoint <vscale x 1 x i64> %add, %splat + %ptrs = getelementptr i32, ptr %p, <vscale x 1 x i64> %offsetv + %x = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0( + <vscale x 1 x ptr> %ptrs, + i32 8, + <vscale x 1 x i1> splat (i1 true), + <vscale x 1 x i64> poison + ) + ret <vscale x 1 x i64> %x +} + define <vscale x 1 x i64> @straightline_offset_shl(ptr %p) { ; CHECK-LABEL: @straightline_offset_shl( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32() diff --git a/llvm/test/CodeGen/RISCV/rvv/vec3-setcc-crash.ll b/llvm/test/CodeGen/RISCV/rvv/vec3-setcc-crash.ll index d0b184b..afe918b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vec3-setcc-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vec3-setcc-crash.ll @@ -13,22 +13,22 @@ define void @vec3_setcc_crash(ptr %in, ptr %out) { ; RV32: # %bb.0: ; RV32-NEXT: lw a0, 0(a0) ; RV32-NEXT: srli a2, a0, 16 -; RV32-NEXT: slli a3, a0, 16 -; RV32-NEXT: slli a4, a0, 24 -; RV32-NEXT: slli a5, a0, 8 -; RV32-NEXT: srli a6, a3, 24 -; RV32-NEXT: srai a3, a3, 24 +; RV32-NEXT: srli a3, a0, 8 +; RV32-NEXT: slli a4, a0, 16 +; RV32-NEXT: slli a5, a0, 24 +; RV32-NEXT: slli a6, a0, 8 ; RV32-NEXT: srai a4, a4, 24 ; RV32-NEXT: srai a5, a5, 24 +; RV32-NEXT: srai a6, a6, 24 +; RV32-NEXT: sgtz a6, a6 ; RV32-NEXT: sgtz a5, a5 ; RV32-NEXT: sgtz a4, a4 -; RV32-NEXT: sgtz a3, a3 -; RV32-NEXT: neg a3, a3 ; RV32-NEXT: neg a4, a4 ; RV32-NEXT: neg a5, a5 -; RV32-NEXT: and a3, a3, a6 -; RV32-NEXT: and a0, a4, a0 -; RV32-NEXT: and a2, a5, a2 +; RV32-NEXT: neg a6, a6 +; RV32-NEXT: and a3, a4, a3 +; RV32-NEXT: and a0, a5, a0 +; RV32-NEXT: and a2, a6, a2 ; RV32-NEXT: slli a3, a3, 8 ; RV32-NEXT: zext.b a0, a0 ; RV32-NEXT: or a0, a0, a3 @@ -39,23 +39,23 @@ define void @vec3_setcc_crash(ptr %in, ptr %out) { ; RV64-LABEL: vec3_setcc_crash: ; RV64: # %bb.0: ; RV64-NEXT: lw a0, 0(a0) -; RV64-NEXT: srliw a2, a0, 16 -; RV64-NEXT: slli a3, a0, 48 -; RV64-NEXT: slli a4, a0, 56 -; RV64-NEXT: slli a5, a0, 40 -; RV64-NEXT: srli a6, a3, 56 -; RV64-NEXT: srai a3, a3, 56 +; RV64-NEXT: srli a2, a0, 16 +; RV64-NEXT: srli a3, a0, 8 +; RV64-NEXT: slli a4, a0, 48 +; RV64-NEXT: slli a5, a0, 56 +; RV64-NEXT: slli a6, a0, 40 ; RV64-NEXT: srai a4, a4, 56 ; RV64-NEXT: srai a5, a5, 56 +; RV64-NEXT: srai a6, a6, 56 +; RV64-NEXT: sgtz a6, a6 ; RV64-NEXT: sgtz a5, a5 ; RV64-NEXT: sgtz a4, a4 -; RV64-NEXT: sgtz a3, a3 -; RV64-NEXT: neg a3, a3 ; RV64-NEXT: neg a4, a4 ; RV64-NEXT: neg a5, a5 -; RV64-NEXT: and a3, a3, a6 -; RV64-NEXT: and a0, a4, a0 -; RV64-NEXT: and a2, a5, a2 +; RV64-NEXT: neg a6, a6 +; RV64-NEXT: and a3, a4, a3 +; RV64-NEXT: and a0, a5, a0 +; RV64-NEXT: and a2, a6, a2 ; RV64-NEXT: slli a3, a3, 8 ; RV64-NEXT: zext.b a0, a0 ; RV64-NEXT: or a0, a0, a3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll index 5c1e41f..b83ddce 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll @@ -470,61 +470,61 @@ define <vscale x 16 x i64> @test_vp_splice_nxv16i64(<vscale x 16 x i64> %va, <vs ; CHECK-LABEL: test_vp_splice_nxv16i64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a4, vlenb -; CHECK-NEXT: slli a5, a4, 1 -; CHECK-NEXT: addi a5, a5, -1 ; CHECK-NEXT: slli a1, a4, 3 -; CHECK-NEXT: mv a7, a2 -; CHECK-NEXT: bltu a2, a5, .LBB22_2 +; CHECK-NEXT: slli a7, a4, 1 +; CHECK-NEXT: addi a7, a7, -1 +; CHECK-NEXT: add a5, a0, a1 +; CHECK-NEXT: mv a6, a2 +; CHECK-NEXT: bltu a2, a7, .LBB22_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a7, a5 +; CHECK-NEXT: mv a6, a7 ; CHECK-NEXT: .LBB22_2: ; CHECK-NEXT: addi sp, sp, -80 ; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; CHECK-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; CHECK-NEXT: addi s0, sp, 80 -; CHECK-NEXT: csrr a5, vlenb -; CHECK-NEXT: slli a5, a5, 5 -; CHECK-NEXT: sub sp, sp, a5 +; CHECK-NEXT: csrr a7, vlenb +; CHECK-NEXT: slli a7, a7, 5 +; CHECK-NEXT: sub sp, sp, a7 ; CHECK-NEXT: andi sp, sp, -64 -; CHECK-NEXT: add a5, a0, a1 -; CHECK-NEXT: slli a7, a7, 3 +; CHECK-NEXT: vl8re64.v v24, (a5) +; CHECK-NEXT: slli a5, a6, 3 ; CHECK-NEXT: addi a6, sp, 64 -; CHECK-NEXT: mv t0, a2 +; CHECK-NEXT: add a5, a6, a5 +; CHECK-NEXT: mv a7, a2 ; CHECK-NEXT: bltu a2, a4, .LBB22_4 ; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv t0, a4 +; CHECK-NEXT: mv a7, a4 ; CHECK-NEXT: .LBB22_4: -; CHECK-NEXT: vl8re64.v v24, (a5) -; CHECK-NEXT: add a5, a6, a7 ; CHECK-NEXT: vl8re64.v v0, (a0) -; CHECK-NEXT: vsetvli zero, t0, e64, m8, ta, ma +; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a6) ; CHECK-NEXT: sub a0, a2, a4 +; CHECK-NEXT: add a6, a6, a1 +; CHECK-NEXT: sub a7, a3, a4 ; CHECK-NEXT: sltu a2, a2, a0 ; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a0, a2, a0 -; CHECK-NEXT: add a6, a6, a1 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: and a2, a2, a0 +; CHECK-NEXT: sltu a0, a3, a7 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a7 +; CHECK-NEXT: add a7, a5, a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v16, (a6) -; CHECK-NEXT: mv a0, a3 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vse64.v v24, (a7) ; CHECK-NEXT: bltu a3, a4, .LBB22_6 ; CHECK-NEXT: # %bb.5: -; CHECK-NEXT: mv a0, a4 +; CHECK-NEXT: mv a3, a4 ; CHECK-NEXT: .LBB22_6: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v0, (a5) -; CHECK-NEXT: sub a2, a3, a4 -; CHECK-NEXT: add a5, a5, a1 -; CHECK-NEXT: sltu a3, a3, a2 -; CHECK-NEXT: addi a3, a3, -1 -; CHECK-NEXT: and a2, a3, a2 -; CHECK-NEXT: addi a3, sp, 104 -; CHECK-NEXT: add a1, a3, a1 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vse64.v v24, (a5) -; CHECK-NEXT: vle64.v v16, (a1) +; CHECK-NEXT: addi a2, sp, 104 +; CHECK-NEXT: add a1, a2, a1 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v8, (a3) +; CHECK-NEXT: vle64.v v16, (a1) +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; CHECK-NEXT: vle64.v v8, (a2) ; CHECK-NEXT: addi sp, s0, -80 ; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload @@ -537,66 +537,66 @@ define <vscale x 16 x i64> @test_vp_splice_nxv16i64(<vscale x 16 x i64> %va, <vs define <vscale x 16 x i64> @test_vp_splice_nxv16i64_negative_offset(<vscale x 16 x i64> %va, <vscale x 16 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 { ; CHECK-LABEL: test_vp_splice_nxv16i64_negative_offset: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a5, vlenb -; CHECK-NEXT: slli a6, a5, 1 -; CHECK-NEXT: addi a6, a6, -1 -; CHECK-NEXT: slli a1, a5, 3 -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: bltu a2, a6, .LBB23_2 +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: slli a1, a4, 3 +; CHECK-NEXT: slli a7, a4, 1 +; CHECK-NEXT: addi a7, a7, -1 +; CHECK-NEXT: add a5, a0, a1 +; CHECK-NEXT: mv a6, a2 +; CHECK-NEXT: bltu a2, a7, .LBB23_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a4, a6 +; CHECK-NEXT: mv a6, a7 ; CHECK-NEXT: .LBB23_2: ; CHECK-NEXT: addi sp, sp, -80 ; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; CHECK-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; CHECK-NEXT: addi s0, sp, 80 -; CHECK-NEXT: csrr a6, vlenb -; CHECK-NEXT: slli a6, a6, 5 -; CHECK-NEXT: sub sp, sp, a6 +; CHECK-NEXT: csrr a7, vlenb +; CHECK-NEXT: slli a7, a7, 5 +; CHECK-NEXT: sub sp, sp, a7 ; CHECK-NEXT: andi sp, sp, -64 -; CHECK-NEXT: add a6, a0, a1 -; CHECK-NEXT: slli a4, a4, 3 +; CHECK-NEXT: vl8re64.v v24, (a5) +; CHECK-NEXT: slli a5, a6, 3 ; CHECK-NEXT: addi a7, sp, 64 +; CHECK-NEXT: add a6, a7, a5 ; CHECK-NEXT: mv t0, a2 -; CHECK-NEXT: bltu a2, a5, .LBB23_4 +; CHECK-NEXT: bltu a2, a4, .LBB23_4 ; CHECK-NEXT: # %bb.3: -; CHECK-NEXT: mv t0, a5 +; CHECK-NEXT: mv t0, a4 ; CHECK-NEXT: .LBB23_4: -; CHECK-NEXT: vl8re64.v v24, (a6) -; CHECK-NEXT: add a6, a7, a4 ; CHECK-NEXT: vl8re64.v v0, (a0) ; CHECK-NEXT: vsetvli zero, t0, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a7) -; CHECK-NEXT: sub a0, a2, a5 +; CHECK-NEXT: sub a0, a2, a4 +; CHECK-NEXT: add a7, a7, a1 +; CHECK-NEXT: sub t0, a3, a4 ; CHECK-NEXT: sltu a2, a2, a0 ; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a0, a2, a0 -; CHECK-NEXT: add a7, a7, a1 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: and a2, a2, a0 +; CHECK-NEXT: sltu a0, a3, t0 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, t0 +; CHECK-NEXT: add t0, a6, a1 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v16, (a7) -; CHECK-NEXT: mv a0, a3 -; CHECK-NEXT: bltu a3, a5, .LBB23_6 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vse64.v v24, (t0) +; CHECK-NEXT: bltu a3, a4, .LBB23_6 ; CHECK-NEXT: # %bb.5: -; CHECK-NEXT: mv a0, a5 +; CHECK-NEXT: mv a3, a4 ; CHECK-NEXT: .LBB23_6: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: li a2, 8 +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v0, (a6) -; CHECK-NEXT: sub a2, a3, a5 -; CHECK-NEXT: add a5, a6, a1 -; CHECK-NEXT: sltu a3, a3, a2 -; CHECK-NEXT: addi a3, a3, -1 -; CHECK-NEXT: and a2, a3, a2 -; CHECK-NEXT: li a3, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vse64.v v24, (a5) -; CHECK-NEXT: bltu a4, a3, .LBB23_8 +; CHECK-NEXT: bltu a5, a2, .LBB23_8 ; CHECK-NEXT: # %bb.7: -; CHECK-NEXT: li a4, 8 +; CHECK-NEXT: li a5, 8 ; CHECK-NEXT: .LBB23_8: -; CHECK-NEXT: sub a2, a6, a4 +; CHECK-NEXT: sub a2, a6, a5 ; CHECK-NEXT: add a1, a2, a1 -; CHECK-NEXT: vle64.v v16, (a1) ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vle64.v v16, (a1) +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a2) ; CHECK-NEXT: addi sp, s0, -80 ; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/vploadff.ll b/llvm/test/CodeGen/RISCV/rvv/vploadff.ll new file mode 100644 index 0000000..9e08938 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vploadff.ll @@ -0,0 +1,1008 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +define { <vscale x 1 x i8>, i32 } @vploadff_nxv1i8(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i8>, i32 } @llvm.vp.load.ff.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x i8>, i32 } %load +} + +define { <vscale x 1 x i8>, i32 } @vploadff_nxv1i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i8>, i32 } @llvm.vp.load.ff.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x i8>, i32 } %load +} + +define { <vscale x 2 x i8>, i32 } @vploadff_nxv2i8(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i8>, i32 } @llvm.vp.load.ff.nxv2i8.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x i8>, i32 } %load +} + +define { <vscale x 2 x i8>, i32 } @vploadff_nxv2i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i8>, i32 } @llvm.vp.load.ff.nxv2i8.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x i8>, i32 } %load +} + +define { <vscale x 4 x i8>, i32 } @vploadff_nxv4i8(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i8>, i32 } @llvm.vp.load.ff.nxv4i8.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x i8>, i32 } %load +} + +define { <vscale x 4 x i8>, i32 } @vploadff_nxv4i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i8>, i32 } @llvm.vp.load.ff.nxv4i8.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x i8>, i32 } %load +} + +define { <vscale x 8 x i8>, i32 } @vploadff_nxv8i8(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i8>, i32 } @llvm.vp.load.ff.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x i8>, i32 } %load +} + +define { <vscale x 8 x i8>, i32 } @vploadff_nxv8i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i8>, i32 } @llvm.vp.load.ff.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x i8>, i32 } %load +} + +define { <vscale x 16 x i8>, i32 } @vploadff_nxv16i8(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x i8>, i32 } @llvm.vp.load.ff.nxv16i8.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) + ret { <vscale x 16 x i8>, i32 } %load +} + +define { <vscale x 16 x i8>, i32 } @vploadff_nxv16i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x i8>, i32 } @llvm.vp.load.ff.nxv16i8.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 16 x i8>, i32 } %load +} + +define { <vscale x 32 x i8>, i32 } @vploadff_nxv32i8(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x i8>, i32 } @llvm.vp.load.ff.nxv32i8.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl) + ret { <vscale x 32 x i8>, i32 } %load +} + +define { <vscale x 32 x i8>, i32 } @vploadff_nxv32i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x i8>, i32 } @llvm.vp.load.ff.nxv32i8.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 32 x i8>, i32 } %load +} + +define { <vscale x 64 x i8>, i32 } @vploadff_nxv64i8(ptr %ptr, <vscale x 64 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 64 x i8>, i32 } @llvm.vp.load.ff.nxv64i8.p0(ptr %ptr, <vscale x 64 x i1> %m, i32 %evl) + ret { <vscale x 64 x i8>, i32 } %load +} + +define { <vscale x 64 x i8>, i32 } @vploadff_nxv64i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv64i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 64 x i8>, i32 } @llvm.vp.load.ff.nxv64i8.p0(ptr %ptr, <vscale x 64 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 64 x i8>, i32 } %load +} + +define <vscale x 128 x i8> @vploadff_nxv128i8(ptr %ptr, ptr %evl_out, <vscale x 128 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv128i8: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a3, vlenb +; CHECK-NEXT: slli a3, a3, 3 +; CHECK-NEXT: bltu a2, a3, .LBB14_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: .LBB14_2: +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a1) +; CHECK-NEXT: ret + %load = call { <vscale x 128 x i8>, i32 } @llvm.vp.load.ff.nxv128i8.p0(ptr %ptr, <vscale x 128 x i1> %m, i32 %evl) + %result0 = extractvalue { <vscale x 128 x i8>, i32 } %load, 0 + %result1 = extractvalue { <vscale x 128 x i8>, i32 } %load, 1 + store i32 %result1, ptr %evl_out + ret <vscale x 128 x i8> %result0 +} + +define <vscale x 128 x i8> @vploadff_nxv128i8_allones_mask(ptr %ptr, ptr %evl_out, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv128i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a3, vlenb +; CHECK-NEXT: slli a3, a3, 3 +; CHECK-NEXT: bltu a2, a3, .LBB15_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: .LBB15_2: +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a1) +; CHECK-NEXT: ret + %load = call { <vscale x 128 x i8>, i32 } @llvm.vp.load.ff.nxv128i8.p0(ptr %ptr, <vscale x 128 x i1> splat (i1 true), i32 %evl) + %result0 = extractvalue { <vscale x 128 x i8>, i32 } %load, 0 + %result1 = extractvalue { <vscale x 128 x i8>, i32 } %load, 1 + store i32 %result1, ptr %evl_out + ret <vscale x 128 x i8> %result0 +} + +define { <vscale x 1 x i16>, i32 } @vploadff_nxv1i16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i16>, i32 } @llvm.vp.load.ff.nxv1i16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x i16>, i32 } %load +} + +define { <vscale x 1 x i16>, i32 } @vploadff_nxv1i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i16>, i32 } @llvm.vp.load.ff.nxv1i16.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x i16>, i32 } %load +} + +define { <vscale x 2 x i16>, i32 } @vploadff_nxv2i16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i16>, i32 } @llvm.vp.load.ff.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x i16>, i32 } %load +} + +define { <vscale x 2 x i16>, i32 } @vploadff_nxv2i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i16>, i32 } @llvm.vp.load.ff.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x i16>, i32 } %load +} + +define { <vscale x 4 x i16>, i32 } @vploadff_nxv4i16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i16>, i32 } @llvm.vp.load.ff.nxv4i16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x i16>, i32 } %load +} + +define { <vscale x 4 x i16>, i32 } @vploadff_nxv4i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i16>, i32 } @llvm.vp.load.ff.nxv4i16.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x i16>, i32 } %load +} + +define { <vscale x 8 x i16>, i32 } @vploadff_nxv8i16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i16>, i32 } @llvm.vp.load.ff.nxv8i16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x i16>, i32 } %load +} + +define { <vscale x 8 x i16>, i32 } @vploadff_nxv8i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i16>, i32 } @llvm.vp.load.ff.nxv8i16.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x i16>, i32 } %load +} + +define { <vscale x 16 x i16>, i32 } @vploadff_nxv16i16(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x i16>, i32 } @llvm.vp.load.ff.nxv16i16.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) + ret { <vscale x 16 x i16>, i32 } %load +} + +define { <vscale x 16 x i16>, i32 } @vploadff_nxv16i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x i16>, i32 } @llvm.vp.load.ff.nxv16i16.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 16 x i16>, i32 } %load +} + +define { <vscale x 32 x i16>, i32 } @vploadff_nxv32i16(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x i16>, i32 } @llvm.vp.load.ff.nxv32i16.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl) + ret { <vscale x 32 x i16>, i32 } %load +} + +define { <vscale x 32 x i16>, i32 } @vploadff_nxv32i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x i16>, i32 } @llvm.vp.load.ff.nxv32i16.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 32 x i16>, i32 } %load +} + +define { <vscale x 1 x i32>, i32 } @vploadff_nxv1i32(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i32>, i32 } @llvm.vp.load.ff.nxv1i32.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x i32>, i32 } %load +} + +define { <vscale x 1 x i32>, i32 } @vploadff_nxv1i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i32>, i32 } @llvm.vp.load.ff.nxv1i32.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x i32>, i32 } %load +} + +define { <vscale x 2 x i32>, i32 } @vploadff_nxv2i32(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i32>, i32 } @llvm.vp.load.ff.nxv2i32.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x i32>, i32 } %load +} + +define { <vscale x 2 x i32>, i32 } @vploadff_nxv2i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i32>, i32 } @llvm.vp.load.ff.nxv2i32.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x i32>, i32 } %load +} + +define { <vscale x 4 x i32>, i32 } @vploadff_nxv4i32(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i32>, i32 } @llvm.vp.load.ff.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x i32>, i32 } %load +} + +define { <vscale x 4 x i32>, i32 } @vploadff_nxv4i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i32>, i32 } @llvm.vp.load.ff.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x i32>, i32 } %load +} + +define { <vscale x 8 x i32>, i32 } @vploadff_nxv8i32(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i32>, i32 } @llvm.vp.load.ff.nxv8i32.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x i32>, i32 } %load +} + +define { <vscale x 8 x i32>, i32 } @vploadff_nxv8i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i32>, i32 } @llvm.vp.load.ff.nxv8i32.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x i32>, i32 } %load +} + +define { <vscale x 16 x i32>, i32 } @vploadff_nxv16i32(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x i32>, i32 } @llvm.vp.load.ff.nxv16i32.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) + ret { <vscale x 16 x i32>, i32 } %load +} + +define { <vscale x 16 x i32>, i32 } @vploadff_nxv16i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x i32>, i32 } @llvm.vp.load.ff.nxv16i32.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 16 x i32>, i32 } %load +} + +define { <vscale x 1 x i64>, i32 } @vploadff_nxv1i64(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i64>, i32 } @llvm.vp.load.ff.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x i64>, i32 } %load +} + +define { <vscale x 1 x i64>, i32 } @vploadff_nxv1i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i64>, i32 } @llvm.vp.load.ff.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x i64>, i32 } %load +} + +define { <vscale x 2 x i64>, i32 } @vploadff_nxv2i64(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i64>, i32 } @llvm.vp.load.ff.nxv2i64.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x i64>, i32 } %load +} + +define { <vscale x 2 x i64>, i32 } @vploadff_nxv2i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i64>, i32 } @llvm.vp.load.ff.nxv2i64.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x i64>, i32 } %load +} + +define { <vscale x 4 x i64>, i32 } @vploadff_nxv4i64(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i64>, i32 } @llvm.vp.load.ff.nxv4i64.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x i64>, i32 } %load +} + +define { <vscale x 4 x i64>, i32 } @vploadff_nxv4i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i64>, i32 } @llvm.vp.load.ff.nxv4i64.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x i64>, i32 } %load +} + +define { <vscale x 8 x i64>, i32 } @vploadff_nxv8i64(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i64>, i32 } @llvm.vp.load.ff.nxv8i64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x i64>, i32 } %load +} + +define { <vscale x 8 x i64>, i32 } @vploadff_nxv8i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i64>, i32 } @llvm.vp.load.ff.nxv8i64.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x i64>, i32 } %load +} + +define { <vscale x 1 x half>, i32 } @vploadff_nxv1f16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x half>, i32 } @llvm.vp.load.ff.nxv1f16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x half>, i32 } %load +} + +define { <vscale x 1 x half>, i32 } @vploadff_nxv1f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x half>, i32 } @llvm.vp.load.ff.nxv1f16.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x half>, i32 } %load +} + +define { <vscale x 2 x half>, i32 } @vploadff_nxv2f16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x half>, i32 } @llvm.vp.load.ff.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x half>, i32 } %load +} + +define { <vscale x 2 x half>, i32 } @vploadff_nxv2f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x half>, i32 } @llvm.vp.load.ff.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x half>, i32 } %load +} + +define { <vscale x 4 x half>, i32 } @vploadff_nxv4f16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x half>, i32 } @llvm.vp.load.ff.nxv4f16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x half>, i32 } %load +} + +define { <vscale x 4 x half>, i32 } @vploadff_nxv4f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x half>, i32 } @llvm.vp.load.ff.nxv4f16.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x half>, i32 } %load +} + +define { <vscale x 8 x half>, i32 } @vploadff_nxv8f16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x half>, i32 } @llvm.vp.load.ff.nxv8f16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x half>, i32 } %load +} + +define { <vscale x 8 x half>, i32 } @vploadff_nxv8f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x half>, i32 } @llvm.vp.load.ff.nxv8f16.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x half>, i32 } %load +} + +define { <vscale x 16 x half>, i32 } @vploadff_nxv16f16(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x half>, i32 } @llvm.vp.load.ff.nxv16f16.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) + ret { <vscale x 16 x half>, i32 } %load +} + +define { <vscale x 16 x half>, i32 } @vploadff_nxv16f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x half>, i32 } @llvm.vp.load.ff.nxv16f16.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 16 x half>, i32 } %load +} + +define { <vscale x 32 x half>, i32 } @vploadff_nxv32f16(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x half>, i32 } @llvm.vp.load.ff.nxv32f16.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl) + ret { <vscale x 32 x half>, i32 } %load +} + +define { <vscale x 32 x half>, i32 } @vploadff_nxv32f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x half>, i32 } @llvm.vp.load.ff.nxv32f16.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 32 x half>, i32 } %load +} + +define { <vscale x 1 x float>, i32 } @vploadff_nxv1f32(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x float>, i32 } @llvm.vp.load.ff.nxv1f32.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x float>, i32 } %load +} + +define { <vscale x 1 x float>, i32 } @vploadff_nxv1f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x float>, i32 } @llvm.vp.load.ff.nxv1f32.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x float>, i32 } %load +} + +define { <vscale x 2 x float>, i32 } @vploadff_nxv2f32(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x float>, i32 } @llvm.vp.load.ff.nxv2f32.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x float>, i32 } %load +} + +define { <vscale x 2 x float>, i32 } @vploadff_nxv2f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x float>, i32 } @llvm.vp.load.ff.nxv2f32.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x float>, i32 } %load +} + +define { <vscale x 4 x float>, i32 } @vploadff_nxv4f32(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x float>, i32 } @llvm.vp.load.ff.nxv4f32.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x float>, i32 } %load +} + +define { <vscale x 4 x float>, i32 } @vploadff_nxv4f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x float>, i32 } @llvm.vp.load.ff.nxv4f32.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x float>, i32 } %load +} + +define { <vscale x 8 x float>, i32 } @vploadff_nxv8f32(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x float>, i32 } @llvm.vp.load.ff.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x float>, i32 } %load +} + +define { <vscale x 8 x float>, i32 } @vploadff_nxv8f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x float>, i32 } @llvm.vp.load.ff.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x float>, i32 } %load +} + +define { <vscale x 16 x float>, i32 } @vploadff_nxv16f32(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x float>, i32 } @llvm.vp.load.ff.nxv16f32.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) + ret { <vscale x 16 x float>, i32 } %load +} + +define { <vscale x 16 x float>, i32 } @vploadff_nxv16f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x float>, i32 } @llvm.vp.load.ff.nxv16f32.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 16 x float>, i32 } %load +} + +define { <vscale x 1 x double>, i32 } @vploadff_nxv1f64(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x double>, i32 } @llvm.vp.load.ff.nxv1f64.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x double>, i32 } %load +} + +define { <vscale x 1 x double>, i32 } @vploadff_nxv1f64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1f64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x double>, i32 } @llvm.vp.load.ff.nxv1f64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x double>, i32 } %load +} + +define { <vscale x 2 x double>, i32 } @vploadff_nxv2f64(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x double>, i32 } @llvm.vp.load.ff.nxv2f64.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x double>, i32 } %load +} + +define { <vscale x 2 x double>, i32 } @vploadff_nxv2f64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2f64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x double>, i32 } @llvm.vp.load.ff.nxv2f64.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x double>, i32 } %load +} + +define { <vscale x 4 x double>, i32 } @vploadff_nxv4f64(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x double>, i32 } @llvm.vp.load.ff.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x double>, i32 } %load +} + +define { <vscale x 4 x double>, i32 } @vploadff_nxv4f64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4f64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x double>, i32 } @llvm.vp.load.ff.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x double>, i32 } %load +} + +define { <vscale x 8 x double>, i32 } @vploadff_nxv8f64(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x double>, i32 } @llvm.vp.load.ff.nxv8f64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x double>, i32 } %load +} + +define { <vscale x 8 x double>, i32 } @vploadff_nxv8f64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8f64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x double>, i32 } @llvm.vp.load.ff.nxv8f64.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x double>, i32 } %load +} + +define { <vscale x 1 x bfloat>, i32 } @vploadff_nxv1bf16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x bfloat>, i32 } @llvm.vp.load.ff.nxv1bf16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x bfloat>, i32 } %load +} + +define { <vscale x 1 x bfloat>, i32 } @vploadff_nxv1bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x bfloat>, i32 } @llvm.vp.load.ff.nxv1bf16.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x bfloat>, i32 } %load +} + +define { <vscale x 2 x bfloat>, i32 } @vploadff_nxv2bf16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x bfloat>, i32 } @llvm.vp.load.ff.nxv2bf16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x bfloat>, i32 } %load +} + +define { <vscale x 2 x bfloat>, i32 } @vploadff_nxv2bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x bfloat>, i32 } @llvm.vp.load.ff.nxv2bf16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x bfloat>, i32 } %load +} + +define { <vscale x 4 x bfloat>, i32 } @vploadff_nxv4bf16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x bfloat>, i32 } @llvm.vp.load.ff.nxv4bf16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x bfloat>, i32 } %load +} + +define { <vscale x 4 x bfloat>, i32 } @vploadff_nxv4bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x bfloat>, i32 } @llvm.vp.load.ff.nxv4bf16.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x bfloat>, i32 } %load +} + +define { <vscale x 8 x bfloat>, i32 } @vploadff_nxv8bf16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x bfloat>, i32 } @llvm.vp.load.ff.nxv8bf16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x bfloat>, i32 } %load +} + +define { <vscale x 8 x bfloat>, i32 } @vploadff_nxv8bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x bfloat>, i32 } @llvm.vp.load.ff.nxv8bf16.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x bfloat>, i32 } %load +} + +define { <vscale x 16 x bfloat>, i32 } @vploadff_nxv16bf16(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x bfloat>, i32 } @llvm.vp.load.ff.nxv16bf16.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) + ret { <vscale x 16 x bfloat>, i32 } %load +} + +define { <vscale x 16 x bfloat>, i32 } @vploadff_nxv16bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x bfloat>, i32 } @llvm.vp.load.ff.nxv16bf16.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 16 x bfloat>, i32 } %load +} + +define { <vscale x 32 x bfloat>, i32 } @vploadff_nxv32bf16(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x bfloat>, i32 } @llvm.vp.load.ff.nxv32bf16.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl) + ret { <vscale x 32 x bfloat>, i32 } %load +} + +define { <vscale x 32 x bfloat>, i32 } @vploadff_nxv32bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x bfloat>, i32 } @llvm.vp.load.ff.nxv32bf16.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 32 x bfloat>, i32 } %load +} + +define { <vscale x 3 x i8>, i32 } @vploadff_nxv3i8(ptr %ptr, <vscale x 3 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv3i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 3 x i8>, i32 } @llvm.vp.load.ff.nxv3i8.p0(ptr %ptr, <vscale x 3 x i1> %m, i32 %evl) + ret { <vscale x 3 x i8>, i32 } %load +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll index f29c74a..697c582 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll @@ -21,7 +21,7 @@ define <vscale x 4 x i32> @intrinsic_vsha2cl_vv_nxv4i32_nxv4i32(<vscale x 4 x i3 ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma -; CHECK-NEXT: vsha2ch.vv v8, v10, v12 +; CHECK-NEXT: vsha2cl.vv v8, v10, v12 ; CHECK-NEXT: ret entry: %a = call <vscale x 4 x i32> @llvm.riscv.vsha2cl.nxv4i32.nxv4i32( @@ -45,7 +45,7 @@ define <vscale x 8 x i32> @intrinsic_vsha2cl_vv_nxv8i32_nxv8i32(<vscale x 8 x i3 ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma -; CHECK-NEXT: vsha2ch.vv v8, v12, v16 +; CHECK-NEXT: vsha2cl.vv v8, v12, v16 ; CHECK-NEXT: ret entry: %a = call <vscale x 8 x i32> @llvm.riscv.vsha2cl.nxv8i32.nxv8i32( @@ -70,7 +70,7 @@ define <vscale x 16 x i32> @intrinsic_vsha2cl_vv_nxv16i32_nxv16i32(<vscale x 16 ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma -; CHECK-NEXT: vsha2ch.vv v8, v16, v24 +; CHECK-NEXT: vsha2cl.vv v8, v16, v24 ; CHECK-NEXT: ret entry: %a = call <vscale x 16 x i32> @llvm.riscv.vsha2cl.nxv16i32.nxv16i32( @@ -94,7 +94,7 @@ define <vscale x 4 x i64> @intrinsic_vsha2cl_vv_nxv4i64_nxv4i64(<vscale x 4 x i6 ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma -; CHECK-NEXT: vsha2ch.vv v8, v12, v16 +; CHECK-NEXT: vsha2cl.vv v8, v12, v16 ; CHECK-NEXT: ret entry: %a = call <vscale x 4 x i64> @llvm.riscv.vsha2cl.nxv4i64.nxv4i64( @@ -119,7 +119,7 @@ define <vscale x 8 x i64> @intrinsic_vsha2cl_vv_nxv8i64_nxv8i64(<vscale x 8 x i6 ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma -; CHECK-NEXT: vsha2ch.vv v8, v16, v24 +; CHECK-NEXT: vsha2cl.vv v8, v16, v24 ; CHECK-NEXT: ret entry: %a = call <vscale x 8 x i64> @llvm.riscv.vsha2cl.nxv8i64.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll index c9c49e8..cb046cd 100644 --- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll +++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll @@ -204,18 +204,16 @@ define i64 @load_i64(ptr %p) { ; RV64IZBKB-NEXT: lbu a2, 5(a0) ; RV64IZBKB-NEXT: lbu a3, 6(a0) ; RV64IZBKB-NEXT: lbu a4, 7(a0) -; RV64IZBKB-NEXT: lbu a5, 0(a0) -; RV64IZBKB-NEXT: lbu a6, 1(a0) -; RV64IZBKB-NEXT: lbu a7, 2(a0) -; RV64IZBKB-NEXT: lbu a0, 3(a0) +; RV64IZBKB-NEXT: lbu a5, 1(a0) +; RV64IZBKB-NEXT: lbu a6, 2(a0) +; RV64IZBKB-NEXT: lbu a7, 3(a0) +; RV64IZBKB-NEXT: lbu a0, 0(a0) +; RV64IZBKB-NEXT: packh a3, a3, a4 ; RV64IZBKB-NEXT: packh a1, a1, a2 -; RV64IZBKB-NEXT: packh a2, a3, a4 -; RV64IZBKB-NEXT: packh a3, a5, a6 -; RV64IZBKB-NEXT: packh a0, a7, a0 -; RV64IZBKB-NEXT: slli a2, a2, 16 -; RV64IZBKB-NEXT: slli a0, a0, 16 -; RV64IZBKB-NEXT: or a1, a2, a1 -; RV64IZBKB-NEXT: or a0, a0, a3 +; RV64IZBKB-NEXT: packh a2, a6, a7 +; RV64IZBKB-NEXT: packh a0, a0, a5 +; RV64IZBKB-NEXT: packw a1, a1, a3 +; RV64IZBKB-NEXT: packw a0, a0, a2 ; RV64IZBKB-NEXT: pack a0, a0, a1 ; RV64IZBKB-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/xqcilsm-memset.ll b/llvm/test/CodeGen/RISCV/xqcilsm-memset.ll new file mode 100644 index 0000000..988bb6f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/xqcilsm-memset.ll @@ -0,0 +1,900 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefixes=RV32I +; RUN: llc -mtriple=riscv32 -verify-machineinstrs -mattr=+experimental-xqcilsm < %s \ +; RUN: | FileCheck %s -check-prefixes=RV32IXQCILSM + +%struct.anon = type { [16 x i32] } +%struct.anon.0 = type { [47 x i32] } +%struct.anon.1 = type { [48 x i32] } +%struct.anon.2 = type { [64 x i8] } +%struct.struct1_t = type { [16 x i32] } + +@struct1 = common dso_local local_unnamed_addr global %struct.anon zeroinitializer, align 4 +@struct4b = common dso_local local_unnamed_addr global %struct.anon.0 zeroinitializer, align 4 +@struct4b1 = common dso_local local_unnamed_addr global %struct.anon.1 zeroinitializer, align 4 +@struct2 = common dso_local local_unnamed_addr global %struct.anon.2 zeroinitializer, align 1 +@arr1 = common dso_local local_unnamed_addr global [100 x i32] zeroinitializer, align 4 +@struct1_ = common dso_local local_unnamed_addr global %struct.struct1_t zeroinitializer, align 4 + +define void @test1(ptr nocapture %p, i32 %n) nounwind { +; RV32I-LABEL: test1: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: mv a2, a1 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test1: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: mv a2, a1 +; RV32IXQCILSM-NEXT: li a1, 0 +; RV32IXQCILSM-NEXT: tail memset +entry: + tail call void @llvm.memset.p0.i32(ptr align 1 %p, i8 0, i32 %n, i1 false) + ret void +} + +declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1) + +define void @test2(ptr nocapture %p) nounwind { +; RV32I-LABEL: test2: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 165 +; RV32I-NEXT: li a2, 128 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test2: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a1, 678490 +; RV32IXQCILSM-NEXT: addi a1, a1, 1445 +; RV32IXQCILSM-NEXT: qc.setwmi a1, 16, 0(a0) +; RV32IXQCILSM-NEXT: qc.setwmi a1, 16, 64(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 %p, i8 -91, i32 128, i1 false) + ret void +} + +define void @test2a(ptr nocapture %p) nounwind { +; RV32I-LABEL: test2a: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 165 +; RV32I-NEXT: li a2, 188 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test2a: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a1, 678490 +; RV32IXQCILSM-NEXT: addi a1, a1, 1445 +; RV32IXQCILSM-NEXT: qc.setwmi a1, 16, 0(a0) +; RV32IXQCILSM-NEXT: qc.setwmi a1, 15, 64(a0) +; RV32IXQCILSM-NEXT: qc.setwmi a1, 16, 124(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 %p, i8 -91, i32 188, i1 false) + ret void +} + +define void @test2b(ptr nocapture %p) nounwind { +; RV32I-LABEL: test2b: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 165 +; RV32I-NEXT: li a2, 192 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test2b: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: li a1, 165 +; RV32IXQCILSM-NEXT: li a2, 192 +; RV32IXQCILSM-NEXT: tail memset +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 %p, i8 -91, i32 192, i1 false) + ret void +} + +define void @test2c(ptr nocapture %p) nounwind { +; RV32I-LABEL: test2c: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 165 +; RV32I-NEXT: li a2, 128 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test2c: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a1, 678490 +; RV32IXQCILSM-NEXT: addi a1, a1, 1445 +; RV32IXQCILSM-NEXT: qc.setwmi a1, 16, 0(a0) +; RV32IXQCILSM-NEXT: qc.setwmi a1, 16, 64(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 %p, i8 -91, i32 128, i1 false) + ret void +} + +define void @test2d(ptr nocapture %p) nounwind { +; RV32I-LABEL: test2d: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, -91 +; RV32I-NEXT: lui a2, 1048570 +; RV32I-NEXT: lui a3, 678490 +; RV32I-NEXT: addi a2, a2, 1445 +; RV32I-NEXT: addi a3, a3, 1445 +; RV32I-NEXT: sw a3, 0(a0) +; RV32I-NEXT: sw a3, 4(a0) +; RV32I-NEXT: sh a2, 8(a0) +; RV32I-NEXT: sb a1, 10(a0) +; RV32I-NEXT: ret +; +; RV32IXQCILSM-LABEL: test2d: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: li a1, -91 +; RV32IXQCILSM-NEXT: lui a2, 1048570 +; RV32IXQCILSM-NEXT: lui a3, 678490 +; RV32IXQCILSM-NEXT: addi a2, a2, 1445 +; RV32IXQCILSM-NEXT: addi a3, a3, 1445 +; RV32IXQCILSM-NEXT: sw a3, 0(a0) +; RV32IXQCILSM-NEXT: sw a3, 4(a0) +; RV32IXQCILSM-NEXT: sh a2, 8(a0) +; RV32IXQCILSM-NEXT: sb a1, 10(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 %p, i8 -91, i32 11, i1 false) + ret void +} + + +define ptr @test3(ptr %p) nounwind { +; RV32I-LABEL: test3: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a2, 256 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test3: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: li a2, 256 +; RV32IXQCILSM-NEXT: li a1, 0 +; RV32IXQCILSM-NEXT: tail memset +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 %p, i8 0, i32 256, i1 false) + ret ptr %p +} + +define ptr @test3a(ptr %p) nounwind { +; RV32I-LABEL: test3a: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a2, 128 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test3a: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: qc.setwmi zero, 16, 0(a0) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 16, 64(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 %p, i8 0, i32 128, i1 false) + ret ptr %p +} + +define void @test4() nounwind { +; RV32I-LABEL: test4: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(struct1) +; RV32I-NEXT: addi a0, a0, %lo(struct1) +; RV32I-NEXT: li a2, 64 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test4: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(struct1) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(struct1) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 16, 0(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @struct1, i8 0, i32 64, i1 false) + ret void +} + +define void @test4a(ptr nocapture %s) nounwind { +; RV32I-LABEL: test4a: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 166 +; RV32I-NEXT: li a2, 64 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test4a: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a1, 682602 +; RV32IXQCILSM-NEXT: addi a1, a1, 1702 +; RV32IXQCILSM-NEXT: qc.setwmi a1, 16, 0(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 %s, i8 -90, i32 64, i1 false) + ret void +} + +declare void @llvm.lifetime.start.p0(i64, ptr nocapture) + +declare void @llvm.lifetime.end.p0(i64, ptr nocapture) + +define void @test4b() nounwind { +; RV32I-LABEL: test4b: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: lui a0, %hi(struct4b) +; RV32I-NEXT: addi a0, a0, %lo(struct4b) +; RV32I-NEXT: li a2, 188 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: call memset +; RV32I-NEXT: lui a0, %hi(struct4b1) +; RV32I-NEXT: addi a0, a0, %lo(struct4b1) +; RV32I-NEXT: li a2, 192 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test4b: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a1, %hi(struct4b) +; RV32IXQCILSM-NEXT: addi a1, a1, %lo(struct4b) +; RV32IXQCILSM-NEXT: lui a0, %hi(struct4b1) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(struct4b1) +; RV32IXQCILSM-NEXT: li a2, 192 +; RV32IXQCILSM-NEXT: qc.setwmi zero, 16, 0(a1) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 15, 64(a1) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 16, 124(a1) +; RV32IXQCILSM-NEXT: li a1, 0 +; RV32IXQCILSM-NEXT: tail memset +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @struct4b, i8 0, i32 188, i1 false) + tail call void @llvm.memset.p0.i32(ptr align 4 @struct4b1, i8 0, i32 192, i1 false) + ret void +} + +define void @test5() nounwind { +; RV32I-LABEL: test5: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(struct2) +; RV32I-NEXT: addi a0, a0, %lo(struct2) +; RV32I-NEXT: li a2, 64 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test5: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(struct2) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(struct2) +; RV32IXQCILSM-NEXT: li a2, 64 +; RV32IXQCILSM-NEXT: li a1, 0 +; RV32IXQCILSM-NEXT: tail memset +entry: + tail call void @llvm.memset.p0.i32(ptr align 1 @struct2, i8 0, i32 64, i1 false) + ret void +} + +define i32 @test6() nounwind { +; RV32I-LABEL: test6: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw zero, 12(sp) +; RV32I-NEXT: li a0, 0 +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IXQCILSM-LABEL: test6: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: addi sp, sp, -16 +; RV32IXQCILSM-NEXT: sw zero, 12(sp) +; RV32IXQCILSM-NEXT: li a0, 0 +; RV32IXQCILSM-NEXT: addi sp, sp, 16 +; RV32IXQCILSM-NEXT: ret +entry: + %x = alloca i32, align 4 + call void @llvm.memset.p0.i32(ptr align 4 %x, i8 0, i32 4, i1 false) + %0 = load i32, ptr %x, align 4 + ret i32 %0 +} + +define zeroext i8 @test6b_c() nounwind { +; RV32I-LABEL: test6b_c: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sb zero, 12(sp) +; RV32I-NEXT: lbu a0, 12(sp) +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IXQCILSM-LABEL: test6b_c: +; RV32IXQCILSM: # %bb.0: +; RV32IXQCILSM-NEXT: addi sp, sp, -16 +; RV32IXQCILSM-NEXT: sb zero, 12(sp) +; RV32IXQCILSM-NEXT: lbu a0, 12(sp) +; RV32IXQCILSM-NEXT: addi sp, sp, 16 +; RV32IXQCILSM-NEXT: ret + %x = alloca i8, align 4 + call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %x) + call void @llvm.memset.p0.i32(ptr nonnull align 4 %x, i8 0, i32 1, i1 false) + %x.0.x.0. = load volatile i8, ptr %x, align 4 + call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %x) + ret i8 %x.0.x.0. +} + +define signext i16 @test6b_s() nounwind { +; RV32I-LABEL: test6b_s: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sh zero, 12(sp) +; RV32I-NEXT: lh a0, 12(sp) +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IXQCILSM-LABEL: test6b_s: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: addi sp, sp, -16 +; RV32IXQCILSM-NEXT: sh zero, 12(sp) +; RV32IXQCILSM-NEXT: lh a0, 12(sp) +; RV32IXQCILSM-NEXT: addi sp, sp, 16 +; RV32IXQCILSM-NEXT: ret +entry: + %x = alloca i16, align 4 + call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %x) + store i16 0, ptr %x, align 4 + %x.0.x.0. = load volatile i16, ptr %x, align 4 + call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %x) + ret i16 %x.0.x.0. +} + +define i32 @test6b_l() nounwind { +; RV32I-LABEL: test6b_l: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw zero, 12(sp) +; RV32I-NEXT: lw a0, 12(sp) +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IXQCILSM-LABEL: test6b_l: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: addi sp, sp, -16 +; RV32IXQCILSM-NEXT: sw zero, 12(sp) +; RV32IXQCILSM-NEXT: lw a0, 12(sp) +; RV32IXQCILSM-NEXT: addi sp, sp, 16 +; RV32IXQCILSM-NEXT: ret +entry: + %x = alloca i32, align 4 + call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x) + store i32 0, ptr %x, align 4 + %x.0.x.0. = load volatile i32, ptr %x, align 4 + call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x) + ret i32 %x.0.x.0. +} + +define i64 @test6b_ll() nounwind { +; RV32I-LABEL: test6b_ll: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw zero, 8(sp) +; RV32I-NEXT: sw zero, 12(sp) +; RV32I-NEXT: lw a0, 8(sp) +; RV32I-NEXT: lw a1, 12(sp) +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IXQCILSM-LABEL: test6b_ll: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: addi sp, sp, -16 +; RV32IXQCILSM-NEXT: sw zero, 8(sp) +; RV32IXQCILSM-NEXT: sw zero, 12(sp) +; RV32IXQCILSM-NEXT: lw a0, 8(sp) +; RV32IXQCILSM-NEXT: lw a1, 12(sp) +; RV32IXQCILSM-NEXT: addi sp, sp, 16 +; RV32IXQCILSM-NEXT: ret +entry: + %x = alloca i64, align 8 + call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %x) + call void @llvm.memset.p0.i32(ptr nonnull align 8 %x, i8 0, i32 8, i1 false) + %x.0.x.0. = load volatile i64, ptr %x, align 8 + call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %x) + ret i64 %x.0.x.0. +} + +define zeroext i8 @test6c_c() nounwind { +; RV32I-LABEL: test6c_c: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sb zero, 15(sp) +; RV32I-NEXT: li a0, 0 +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IXQCILSM-LABEL: test6c_c: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: addi sp, sp, -16 +; RV32IXQCILSM-NEXT: sb zero, 15(sp) +; RV32IXQCILSM-NEXT: li a0, 0 +; RV32IXQCILSM-NEXT: addi sp, sp, 16 +; RV32IXQCILSM-NEXT: ret +entry: + %x = alloca i8 + call void @llvm.memset.p0.i32(ptr align 1 %x, i8 0, i32 1, i1 false) + %0 = load i8, ptr %x, align 1 + ret i8 %0 +} + +define signext i16 @test6c_s() nounwind { +; RV32I-LABEL: test6c_s: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sh zero, 14(sp) +; RV32I-NEXT: li a0, 0 +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IXQCILSM-LABEL: test6c_s: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: addi sp, sp, -16 +; RV32IXQCILSM-NEXT: sh zero, 14(sp) +; RV32IXQCILSM-NEXT: li a0, 0 +; RV32IXQCILSM-NEXT: addi sp, sp, 16 +; RV32IXQCILSM-NEXT: ret +entry: + %x = alloca i16 + call void @llvm.memset.p0.i32(ptr align 2 %x, i8 0, i32 2, i1 false) + %0 = load i16, ptr %x, align 2 + ret i16 %0 +} + +define i32 @test6c_l() nounwind { +; RV32I-LABEL: test6c_l: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw zero, 12(sp) +; RV32I-NEXT: li a0, 0 +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IXQCILSM-LABEL: test6c_l: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: addi sp, sp, -16 +; RV32IXQCILSM-NEXT: sw zero, 12(sp) +; RV32IXQCILSM-NEXT: li a0, 0 +; RV32IXQCILSM-NEXT: addi sp, sp, 16 +; RV32IXQCILSM-NEXT: ret +entry: + %x = alloca i32, align 4 + call void @llvm.memset.p0.i32(ptr align 4 %x, i8 0, i32 4, i1 false) + %0 = load i32, ptr %x, align 4 + ret i32 %0 +} + +define i64 @test6c_ll() nounwind { +; RV32I-LABEL: test6c_ll: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw zero, 8(sp) +; RV32I-NEXT: sw zero, 12(sp) +; RV32I-NEXT: li a0, 0 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IXQCILSM-LABEL: test6c_ll: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: addi sp, sp, -16 +; RV32IXQCILSM-NEXT: sw zero, 8(sp) +; RV32IXQCILSM-NEXT: sw zero, 12(sp) +; RV32IXQCILSM-NEXT: li a0, 0 +; RV32IXQCILSM-NEXT: li a1, 0 +; RV32IXQCILSM-NEXT: addi sp, sp, 16 +; RV32IXQCILSM-NEXT: ret +entry: + %x = alloca i64, align 8 + call void @llvm.memset.p0.i32(ptr align 8 %x, i8 0, i32 8, i1 false) + %0 = load i64, ptr %x, align 8 + ret i64 %0 +} + +define void @test7() nounwind { +; RV32I-LABEL: test7: +; RV32I: # %bb.0: +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: sw zero, %lo(arr1)(a0) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: sw zero, 4(a0) +; RV32I-NEXT: ret +; +; RV32IXQCILSM-LABEL: test7: +; RV32IXQCILSM: # %bb.0: +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: sw zero, %lo(arr1)(a0) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: sw zero, 4(a0) +; RV32IXQCILSM-NEXT: ret + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 0, i32 8, i1 false) + ret void +} + +define void @test7a() nounwind { +; RV32I-LABEL: test7a: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCILSM-LABEL: test7a: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: ret +entry: + call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 0, i32 0, i1 false) + ret void +} + +define void @test7a_unalign() nounwind { +; RV32I-LABEL: test7a_unalign: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: li a1, -1 +; RV32I-NEXT: sw a1, %lo(arr1)(a0) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: sw a1, 4(a0) +; RV32I-NEXT: sw a1, 8(a0) +; RV32I-NEXT: sw a1, 12(a0) +; RV32I-NEXT: sb a1, 16(a0) +; RV32I-NEXT: ret +; +; RV32IXQCILSM-LABEL: test7a_unalign: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: li a1, -1 +; RV32IXQCILSM-NEXT: sw a1, %lo(arr1)(a0) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: sw a1, 4(a0) +; RV32IXQCILSM-NEXT: sw a1, 8(a0) +; RV32IXQCILSM-NEXT: sw a1, 12(a0) +; RV32IXQCILSM-NEXT: sb a1, 16(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 -1, i32 17, i1 false) + ret void +} + +define void @test7b() nounwind { +; RV32I-LABEL: test7b: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: li a1, 255 +; RV32I-NEXT: li a2, 68 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test7b: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: li a1, -1 +; RV32IXQCILSM-NEXT: qc.setwmi a1, 16, 0(a0) +; RV32IXQCILSM-NEXT: qc.setwmi a1, 1, 64(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 -1, i32 68, i1 false) + ret void +} + +define void @test7c() nounwind { +; RV32I-LABEL: test7c: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: li a1, 128 +; RV32I-NEXT: li a2, 128 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test7c: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: lui a1, 526344 +; RV32IXQCILSM-NEXT: addi a1, a1, 128 +; RV32IXQCILSM-NEXT: qc.setwmi a1, 16, 0(a0) +; RV32IXQCILSM-NEXT: qc.setwmi a1, 16, 64(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 -128, i32 128, i1 false) + ret void +} + +define void @test7d() nounwind { +; RV32I-LABEL: test7d: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: li a1, 13 +; RV32I-NEXT: li a2, 148 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test7d: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: lui a1, 53457 +; RV32IXQCILSM-NEXT: addi a1, a1, -755 +; RV32IXQCILSM-NEXT: qc.setwmi a1, 16, 0(a0) +; RV32IXQCILSM-NEXT: qc.setwmi a1, 15, 64(a0) +; RV32IXQCILSM-NEXT: qc.setwmi a1, 6, 124(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 13, i32 148, i1 false) + ret void +} + +define void @test7e() nounwind { +; RV32I-LABEL: test7e: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: li a1, 239 +; RV32I-NEXT: li a2, 100 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test7e: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: lui a1, 982783 +; RV32IXQCILSM-NEXT: addi a1, a1, -17 +; RV32IXQCILSM-NEXT: qc.setwmi a1, 16, 0(a0) +; RV32IXQCILSM-NEXT: qc.setwmi a1, 9, 64(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 -17, i32 100, i1 false) + ret void +} + +define void @test8() nounwind { +; RV32I-LABEL: test8: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: sw zero, %lo(arr1)(a0) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: sw zero, 4(a0) +; RV32I-NEXT: sw zero, 8(a0) +; RV32I-NEXT: sw zero, 12(a0) +; RV32I-NEXT: ret +; +; RV32IXQCILSM-LABEL: test8: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: sw zero, %lo(arr1)(a0) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: sw zero, 4(a0) +; RV32IXQCILSM-NEXT: sw zero, 8(a0) +; RV32IXQCILSM-NEXT: sw zero, 12(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 0, i32 16, i1 false) + ret void +} + +define void @test9() nounwind { +; RV32I-LABEL: test9: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: sw zero, %lo(arr1)(a0) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: sw zero, 20(a0) +; RV32I-NEXT: sw zero, 24(a0) +; RV32I-NEXT: sw zero, 28(a0) +; RV32I-NEXT: sw zero, 4(a0) +; RV32I-NEXT: sw zero, 8(a0) +; RV32I-NEXT: sw zero, 12(a0) +; RV32I-NEXT: sw zero, 16(a0) +; RV32I-NEXT: ret +; +; RV32IXQCILSM-LABEL: test9: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: sw zero, %lo(arr1)(a0) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: sw zero, 20(a0) +; RV32IXQCILSM-NEXT: sw zero, 24(a0) +; RV32IXQCILSM-NEXT: sw zero, 28(a0) +; RV32IXQCILSM-NEXT: sw zero, 4(a0) +; RV32IXQCILSM-NEXT: sw zero, 8(a0) +; RV32IXQCILSM-NEXT: sw zero, 12(a0) +; RV32IXQCILSM-NEXT: sw zero, 16(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 0, i32 32, i1 false) + ret void +} + +define void @test10() nounwind { +; RV32I-LABEL: test10: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: li a2, 60 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test10: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 15, 0(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 0, i32 60, i1 false) + ret void +} + +define void @test11() nounwind { +; RV32I-LABEL: test11: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: li a2, 64 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test11: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 16, 0(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 0, i32 64, i1 false) + ret void +} + +define void @test12() nounwind { +; RV32I-LABEL: test12: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: li a2, 120 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test12: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 16, 0(a0) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 14, 64(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 0, i32 120, i1 false) + ret void +} + +define void @test13() nounwind { +; RV32I-LABEL: test13: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: li a2, 124 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test13: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 16, 0(a0) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 15, 64(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 0, i32 124, i1 false) + ret void +} + +define void @test14() nounwind { +; RV32I-LABEL: test14: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: li a2, 180 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test14: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 16, 0(a0) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 15, 64(a0) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 14, 124(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 0, i32 180, i1 false) + ret void +} + +define void @test15() nounwind { +; RV32I-LABEL: test15: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: li a2, 184 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test15: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 16, 0(a0) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 15, 64(a0) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 15, 124(a0) +; RV32IXQCILSM-NEXT: ret +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 0, i32 184, i1 false) + ret void +} + +define void @test15a() nounwind { +; RV32I-LABEL: test15a: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: li a1, 165 +; RV32I-NEXT: li a2, 192 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test15a: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: li a1, 165 +; RV32IXQCILSM-NEXT: li a2, 192 +; RV32IXQCILSM-NEXT: tail memset +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 -91, i32 192, i1 false) + ret void +} + +define void @test15b() nounwind { +; RV32I-LABEL: test15b: +; RV32I: # %bb.0: +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: li a2, 188 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test15b: +; RV32IXQCILSM: # %bb.0: +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 16, 0(a0) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 15, 64(a0) +; RV32IXQCILSM-NEXT: qc.setwmi zero, 16, 124(a0) +; RV32IXQCILSM-NEXT: ret + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 0, i32 188, i1 false) + ret void +} + +define void @test15c() nounwind { +; RV32I-LABEL: test15c: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a0, %hi(arr1) +; RV32I-NEXT: addi a0, a0, %lo(arr1) +; RV32I-NEXT: li a2, 192 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: tail memset +; +; RV32IXQCILSM-LABEL: test15c: +; RV32IXQCILSM: # %bb.0: # %entry +; RV32IXQCILSM-NEXT: lui a0, %hi(arr1) +; RV32IXQCILSM-NEXT: addi a0, a0, %lo(arr1) +; RV32IXQCILSM-NEXT: li a2, 192 +; RV32IXQCILSM-NEXT: li a1, 0 +; RV32IXQCILSM-NEXT: tail memset +entry: + tail call void @llvm.memset.p0.i32(ptr align 4 @arr1, i8 0, i32 192, i1 false) + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/GlobalVarAddrspace.ll b/llvm/test/CodeGen/SPIRV/GlobalVarAddrspace.ll new file mode 100644 index 0000000..2bccfde --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/GlobalVarAddrspace.ll @@ -0,0 +1,23 @@ +; This test case checks that LLVM -> SPIR-V translation produces valid +; SPIR-V module, where a global variable, defined with non-default +; address space, have correct non-function storage class. +; +; No additional checks are needed in addition to simple translation +; to SPIR-V. In case of an error newly produced SPIR-V module validation +; would fail due to spirv-val that detects problematic SPIR-V code from +; translator and reports it as the following error: +; +; "Variables can not have a function[7] storage class outside of a function". +; +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: %[[#Ptr:]] = OpTypePointer CrossWorkgroup %[[#]] +; CHECK: %[[#]] = OpVariable %[[#Ptr]] CrossWorkgroup %[[#]] + +@G = addrspace(1) global i1 true + +define spir_func i1 @f(i1 %0) { + store i1 %0, ptr addrspace(1) @G, align 1 + ret i1 %0 +} diff --git a/llvm/test/CodeGen/SPIRV/SamplerArgNonKernel.ll b/llvm/test/CodeGen/SPIRV/SamplerArgNonKernel.ll new file mode 100644 index 0000000..5b3a5d8 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/SamplerArgNonKernel.ll @@ -0,0 +1,37 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +;CHECK: OpEntryPoint Kernel %[[#KernelId:]] +;CHECK: %[[#image2d_t:]] = OpTypeImage +;CHECK: %[[#sampler_t:]] = OpTypeSampler +;CHECK: %[[#sampled_image_t:]] = OpTypeSampledImage + +define spir_func float @test(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %Img, target("spirv.Sampler") %Smp) { +;CHECK-NOT: %[[#KernelId]] = OpFunction %[[#]] +;CHECK: OpFunction +;CHECK: %[[#image:]] = OpFunctionParameter %[[#image2d_t]] +;CHECK: %[[#sampler:]] = OpFunctionParameter %[[#sampler_t]] +entry: + %call = call spir_func <4 x i32> @_Z11read_imagef11ocl_image2d11ocl_samplerDv2_i(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %Img, target("spirv.Sampler") %Smp, <2 x i32> zeroinitializer) +;CHECK: %[[#sampled_image:]] = OpSampledImage %[[#sampled_image_t]] %[[#image]] %[[#sampler]] +;CHECK: %[[#]] = OpImageSampleExplicitLod %[[#]] %[[#sampled_image]] %[[#]] Lod %[[#]] + + %0 = extractelement <4 x i32> %call, i32 0 + %conv = sitofp i32 %0 to float + ret float %conv +} + +declare spir_func <4 x i32> @_Z11read_imagef11ocl_image2d11ocl_samplerDv2_i(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0), i32, <2 x i32>) + +define spir_kernel void @test2(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %Img, target("spirv.Sampler") %Smp, ptr addrspace(1) %result) { +;CHECK: %[[#KernelId]] = OpFunction %[[#]] +entry: + %call = call spir_func float @test(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %Img, target("spirv.Sampler") %Smp) + %0 = load float, ptr addrspace(1) %result, align 4 + %add = fadd float %0, %call + store float %add, ptr addrspace(1) %result, align 4 + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/SpecConstants/spec-constant-length-array.ll b/llvm/test/CodeGen/SPIRV/SpecConstants/spec-constant-length-array.ll new file mode 100644 index 0000000..fccddd7 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/SpecConstants/spec-constant-length-array.ll @@ -0,0 +1,56 @@ +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_variable_length_array %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_variable_length_array %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpCapability VariableLengthArrayINTEL +; CHECK: OpExtension "SPV_INTEL_variable_length_array" + +; CHECK-DAG: OpDecorate %[[#]] SpecId 0 +; CHECK-DAG: OpDecorate %[[#]] SpecId 1 +; CHECK-DAG: OpDecorate %[[#]] SpecId 2 +; CHECK-DAG: OpDecorate %[[#A0:]] Alignment 4 +; CHECK-DAG: OpDecorate %[[#A1:]] Alignment 2 +; CHECK-DAG: OpDecorate %[[#A2:]] Alignment 16 + +; CHECK: %[[#VOID_TY:]] = OpTypeVoid +; CHECK: %[[#FUNC_TY:]] = OpTypeFunction %[[#VOID_TY]] +; CHECK-DAG: %[[#I64:]] = OpTypeInt 64 0 +; CHECK-DAG: %[[#I32:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#I8:]] = OpTypeInt 8 0 +; CHECK-DAG: %[[#F64:]] = OpTypeFloat 64 +; CHECK-DAG: %[[#STRUCT_TY:]] = OpTypeStruct %[[#F64]] %[[#F64]] +; CHECK-DAG: %[[#PTR_STRUCT:]] = OpTypePointer Function %[[#STRUCT_TY]] +; CHECK-DAG: %[[#PTR_I8:]] = OpTypePointer Function %[[#I8]] +; CHECK-DAG: %[[#F32:]] = OpTypeFloat 32 +; CHECK-DAG: %[[#PTR_F32:]] = OpTypePointer Function %[[#F32]] + +; CHECK-DAG: %[[#SC0:]] = OpSpecConstant %[[#I64]] 1 +; CHECK-DAG: %[[#SC1:]] = OpSpecConstant %[[#I32]] 2 +; CHECK-DAG: %[[#SC2:]] = OpSpecConstant %[[#I8]] 4 + +; CHECK: %[[#]] = OpFunction %[[#VOID_TY]] None %[[#FUNC_TY]] +; CHECK: %[[#LABEL:]] = OpLabel + +; CHECK: %[[#A0]] = OpVariableLengthArrayINTEL %[[#PTR_F32]] %[[#SC0]] +; CHECK: %[[#A1]] = OpVariableLengthArrayINTEL %[[#PTR_I8]] %[[#SC1]] +; CHECK: %[[#A2]] = OpVariableLengthArrayINTEL %[[#PTR_STRUCT]] %[[#SC2]] + +%struct_type = type { double, double } + +define spir_kernel void @test() { + entry: + %length0 = call i64 @_Z20__spirv_SpecConstantix(i32 0, i64 1), !SYCL_SPEC_CONST_SYM_ID !0 + %length1 = call i32 @_Z20__spirv_SpecConstantii(i32 1, i32 2), !SYCL_SPEC_CONST_SYM_ID !1 + %length2 = call i8 @_Z20__spirv_SpecConstantic(i32 2, i8 4), !SYCL_SPEC_CONST_SYM_ID !2 + %scla0 = alloca float, i64 %length0, align 4 + %scla1 = alloca i8, i32 %length1, align 2 + %scla2 = alloca %struct_type, i8 %length2, align 16 + ret void +} + +declare i8 @_Z20__spirv_SpecConstantic(i32, i8) +declare i32 @_Z20__spirv_SpecConstantii(i32, i32) +declare i64 @_Z20__spirv_SpecConstantix(i32, i64) + +!0 = !{!"i64_spec_const", i32 0} +!1 = !{!"i32_spec_const", i32 1} +!2 = !{!"i8_spec_const", i32 2} diff --git a/llvm/test/CodeGen/SPIRV/align-duplicate.ll b/llvm/test/CodeGen/SPIRV/align-duplicate.ll new file mode 100644 index 0000000..8a8d8ae --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/align-duplicate.ll @@ -0,0 +1,16 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; Test that duplicate align information does not result in SPIR-V validation +; errors due to duplicate Alignment Decorations. + +;CHECK: OpDecorate %[[#Var:]] Alignment +;CHECK: %[[#Var]] = OpVariable %[[#]] + +define spir_func void @f() { + %res = alloca i16, align 2, !spirv.Decorations !1 + ret void +} + +!1 = !{!2} +!2 = !{i32 44, i32 2} diff --git a/llvm/test/CodeGen/SPIRV/array_type.ll b/llvm/test/CodeGen/SPIRV/array_type.ll new file mode 100644 index 0000000..251b48f --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/array_type.ll @@ -0,0 +1,78 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64v1.2-unknown-unknown %s -o - -filetype=obj | spirv-val --target-env opencl2.2 %} + +; CHECK: OpCapability Kernel +; CHECK-NOT: OpCapability Shader +; CHECK-DAG: %[[#float16:]] = OpTypeFloat 16 +; CHECK-DAG: %[[#SyclHalfTy:]] = OpTypeStruct %[[#float16]] +; CHECK-DAG: %[[#i16:]] = OpTypeInt 16 +; CHECK-DAG: %[[#i32:]] = OpTypeInt 32 +; CHECK-DAG: %[[#i64:]] = OpTypeInt 64 +; CHECK-DAG: %[[#ConstNull:]] = OpConstantNull %[[#i64]] +; CHECK-DAG: %[[#ConstOne:]] = OpConstant %[[#i64]] 1 +; CHECK-DAG: %[[#ConstFive:]] = OpConstant %[[#i16]] 5 +; CHECK-DAG: %[[#SyclHalfTyPtr:]] = OpTypePointer Function %[[#SyclHalfTy]] +; CHECK-DAG: %[[#i32Ptr:]] = OpTypePointer Function %[[#i32]] +; CHECK-DAG: %[[#StorePtrTy:]] = OpTypePointer Function %[[#i16]] + +%"class.sycl::_V1::detail::half_impl::half" = type { half } + +; Function Attrs: mustprogress norecurse nounwind +define spir_kernel void @foo(ptr %p){ +; CHECK: OpFunction +; CHECK: %[[#Ptr:]] = OpFunctionParameter +; CHECK: OpLabel +; CHECK: %[[#BitcastOp:]] = OpInBoundsPtrAccessChain %[[#SyclHalfTyPtr]] %[[#Ptr]] %[[#ConstNull]] %[[#ConstNull]] +; CHECK: %[[#StorePtr:]] = OpBitcast %[[#StorePtrTy]] %[[#BitcastOp]] +; CHECK: OpStore %[[#StorePtr]] %[[#ConstFive]] +; CHECK: OpReturn +entry: + %0 = getelementptr inbounds [0 x [32 x %"class.sycl::_V1::detail::half_impl::half"]], ptr %p, i64 0, i64 0, i64 0 + store i16 5, ptr %0 + ret void +} + +; Function Attrs: mustprogress norecurse nounwind +define spir_kernel void @foo2(ptr %p){ +; CHECK: OpFunction +; CHECK: %[[#Ptr:]] = OpFunctionParameter +; CHECK: OpLabel +; CHECK: %[[#BitcastOp:]] = OpInBoundsPtrAccessChain %[[#SyclHalfTyPtr]] %[[#Ptr]] %[[#ConstOne]] %[[#ConstOne]] +; CHECK: %[[#StorePtr:]] = OpBitcast %[[#StorePtrTy]] %[[#BitcastOp]] +; CHECK: OpStore %[[#StorePtr]] %[[#ConstFive]] +; CHECK: OpReturn +entry: + %0 = getelementptr inbounds [0 x [32 x %"class.sycl::_V1::detail::half_impl::half"]], ptr %p, i64 0, i64 1, i64 1 + store i16 5, ptr %0 + ret void +} + +; Function Attrs: mustprogress norecurse nounwind +define spir_kernel void @foo3(ptr %p){ +; CHECK: OpFunction +; CHECK: %[[#Ptr:]] = OpFunctionParameter +; CHECK: OpLabel +; CHECK: %[[#BitcastOp:]] = OpInBoundsPtrAccessChain %[[#i32Ptr]] %[[#Ptr]] %[[#ConstNull]] %[[#ConstNull]] +; CHECK: %[[#StorePtr:]] = OpBitcast %[[#StorePtrTy]] %[[#BitcastOp]] +; CHECK: OpStore %[[#StorePtr]] %[[#ConstFive]] +; CHECK: OpReturn +entry: + %0 = getelementptr inbounds [0 x [32 x i32]], ptr %p, i64 0, i64 0, i64 0 + store i16 5, ptr %0 + ret void +} + +; Function Attrs: mustprogress norecurse nounwind +define spir_kernel void @foo4(ptr %p){ +; CHECK: OpFunction +; CHECK: %[[#Ptr:]] = OpFunctionParameter +; CHECK: OpLabel +; CHECK: %[[#BitcastOp:]] = OpInBoundsPtrAccessChain %[[#i32Ptr]] %[[#Ptr]] %[[#ConstOne]] %[[#ConstOne]] +; CHECK: %[[#StorePtr:]] = OpBitcast %[[#StorePtrTy]] %[[#BitcastOp]] +; CHECK: OpStore %[[#StorePtr]] %[[#ConstFive]] +; CHECK: OpReturn +entry: + %0 = getelementptr inbounds [0 x [32 x i32]], ptr %p, i64 0, i64 1, i64 1 + store i16 5, ptr %0 + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/duplicate-types.ll b/llvm/test/CodeGen/SPIRV/duplicate-types.ll new file mode 100644 index 0000000..df1ae04 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/duplicate-types.ll @@ -0,0 +1,16 @@ +; Check that we don't end up with duplicated array types in TypeMap. +; No FileCheck needed, we only want to check the absence of errors. +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: %[[#]] = OpTypeArray %[[#]] %[[#]] +; CHECK-NOT: OpTypeArray + +%duplicate = type { [2 x ptr addrspace(4)] } + +define spir_kernel void @foo() { +entry: + alloca [2 x ptr addrspace(4)], align 8 + alloca %duplicate, align 8 + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/entry-point-interfaces.ll b/llvm/test/CodeGen/SPIRV/entry-point-interfaces.ll new file mode 100644 index 0000000..f1e0927 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/entry-point-interfaces.ll @@ -0,0 +1,31 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpEntryPoint Kernel %[[#Func:]] "test" %[[#Interface1:]] %[[#Interface2:]] %[[#Interface3:]] %[[#Interface4:]] +; CHECK-DAG: OpName %[[#Func]] "test" +; CHECK-DAG: OpName %[[#Interface1]] "var" +; CHECK-DAG: OpName %[[#Interface3]] "var2" +; CHECK-DAG: OpName %[[#Interface2]] "var.const" +; CHECK-DAG: OpName %[[#Interface4]] "var2.const" +; CHECK-DAG: %[[#TypeInt:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#Const1:]] = OpConstant %[[#TypeInt]] 1 +; CHECK-DAG: %[[#Const2:]] = OpConstant %[[#TypeInt]] 3 + +; CHECK: %[[#Interface1]] = OpVariable %[[#]] UniformConstant %[[#Const1]] +; CHECK: %[[#Interface3]] = OpVariable %[[#]] UniformConstant %[[#Const2]] +; CHECK: %[[#Interface2]] = OpVariable %[[#]] UniformConstant %[[#Const1]] +; CHECK: %[[#Interface4]] = OpVariable %[[#]] UniformConstant %[[#Const2]] + +@var = dso_local addrspace(2) constant i32 1, align 4 +@var2 = dso_local addrspace(2) constant i32 3, align 4 +@var.const = private unnamed_addr addrspace(2) constant i32 1, align 4 +@var2.const = private unnamed_addr addrspace(2) constant i32 3, align 4 + +define dso_local spir_kernel void @test() { +entry: + %0 = load i32, ptr addrspace(2) @var.const, align 4 + %1 = load i32, ptr addrspace(2) @var2.const, align 4 + %mul = mul nsw i32 %0, %1 + %mul1 = mul nsw i32 %mul, 2 + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll index 40e2aff..7adb039 100644 --- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll @@ -1,18 +1,11 @@ ; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s --spirv-ext=+SPV_KHR_bit_instructions -o - | FileCheck %s --check-prefix=CHECK-EXTENSION -; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-NO-EXTENSION +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64v1.2-unknown-unknown %s --spirv-ext=+SPV_KHR_bit_instructions -o - -filetype=obj | spirv-val --target-env opencl2.2 %} ; CHECK-EXTENSION: OpCapability BitInstructions ; CHECK-EXTENSION-NEXT: OpExtension "SPV_KHR_bit_instructions" ; CHECK-EXTENSION-NOT: OpCabilitity Shader -; CHECK-NO-EXTENSION: OpCapability Shader -; CHECK-NO-EXTENSION-NOT: OpCabilitity BitInstructions -; CHECK-NO-EXTENSION-NOT: OpExtension "SPV_KHR_bit_instructions" - - ; CHECK-EXTENSION: %[[#int:]] = OpTypeInt 32 ; CHECK-EXTENSION: OpBitReverse %[[#int]] -; CHECK-NO-EXTENSION: %[[#int:]] = OpTypeInt 32 -; CHECK-NO-EXTENSION: OpBitReverse %[[#int]] define spir_kernel void @testBitRev(i32 %a, i32 %b, i32 %c, i32 addrspace(1)* nocapture %res) local_unnamed_addr { entry: diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions/cl_khr_extended_bit_ops_spv-friendly_only.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions/cl_khr_extended_bit_ops_spv-friendly_only.ll index 65cccc8..3bd1bd6 100644 --- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions/cl_khr_extended_bit_ops_spv-friendly_only.ll +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions/cl_khr_extended_bit_ops_spv-friendly_only.ll @@ -1,12 +1,8 @@ ; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s --spirv-ext=+SPV_KHR_bit_instructions -o - | FileCheck %s --check-prefix=CHECK-EXTENSION -; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-NO-EXTENSION -; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s --spirv-ext=+SPV_KHR_bit_instructions -o - -filetype=obj | spirv-val %} +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64v1.2-unknown-unknown %s --spirv-ext=+SPV_KHR_bit_instructions -o - -filetype=obj | spirv-val --target-env opencl2.2 %} ; ; CHECK-EXTENSION: Capability BitInstructions ; CHECK-EXTENSION: Extension "SPV_KHR_bit_instructions" -; CHECK-NO-EXTENSION-NOT: Capability BitInstructions -; CHECK-NO-EXTENSION-NOT: Extension "SPV_KHR_bit_instructions" -; CHECK-NO-EXTENSION: Capability Shader ; ; CHECK-EXTENSION: %[[#]] = OpFunction %[[#]] None %[[#]] ; CHECK-EXTENSION: %[[#reversebase:]] = OpFunctionParameter %[[#]] @@ -15,24 +11,11 @@ ; kernel void testBitReverse_SPIRVFriendly(long4 b, global long4 *res) { ; *res = bit_reverse(b); ; } -define spir_kernel void @testBitReverse_SPIRVFriendly(<4 x i64> %b, ptr addrspace(1) nocapture align 32 %res) #3 { +define spir_kernel void @testBitReverse_SPIRVFriendly(<4 x i64> %b, ptr addrspace(1) %res) { entry: %call = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %b) - store <4 x i64> %call, ptr addrspace(1) %res, align 32 + store <4 x i64> %call, ptr addrspace(1) %res ret void } -declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>) #4 - - -attributes #3 = { nounwind } -attributes #4 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } - -!llvm.module.flags = !{!0} -!opencl.ocl.version = !{!1} -!opencl.spir.version = !{!1} -!llvm.ident = !{!2} - -!0 = !{i32 1, !"wchar_size", i32 4} -!1 = !{i32 2, i32 0} -!2 = !{!"clang version 20.0.0git (https://github.com/llvm/llvm-project.git cc61409d353a40f62d3a137f3c7436aa00df779d)"} +declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>) diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions/cl_khr_extended_bit_ops_spv-friendly_only_no_extension.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions/cl_khr_extended_bit_ops_spv-friendly_only_no_extension.ll new file mode 100644 index 0000000..61ef273 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions/cl_khr_extended_bit_ops_spv-friendly_only_no_extension.ll @@ -0,0 +1,16 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-NO-EXTENSION +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} +; +; CHECK-NO-EXTENSION-NOT: Capability BitInstructions +; CHECK-NO-EXTENSION-NOT: Extension "SPV_KHR_bit_instructions" +; CHECK-NO-EXTENSION: Capability Shader + +define internal spir_func void @testBitReverse_SPIRVFriendly() #3 { +entry: + %call = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> <i64 1, i64 2, i64 3, i64 4>) + ret void +} + +declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>) + +attributes #3 = { nounwind "hlsl.shader"="compute" } diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions_no_extension.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions_no_extension.ll new file mode 100644 index 0000000..452df0a --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions_no_extension.ll @@ -0,0 +1,23 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s --check-prefix=CHECK-NO-EXTENSION +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val --target-env vulkan1.3 %} + + +; CHECK-NO-EXTENSION: OpCapability Shader +; CHECK-NO-EXTENSION-NOT: OpCabilitity BitInstructions +; CHECK-NO-EXTENSION-NOT: OpExtension "SPV_KHR_bit_instructions" +; CHECK-NO-EXTENSION: %[[#int:]] = OpTypeInt 32 +; CHECK-NO-EXTENSION: OpBitReverse %[[#int]] + +define hidden spir_func void @testBitRev(i32 %a, i32 %b, i32 %c, ptr %res) local_unnamed_addr { +entry: + %call = tail call i32 @llvm.bitreverse.i32(i32 %b) + store i32 %call, ptr %res, align 4 + ret void +} + +define void @main() #1 { + ret void +} + +declare i32 @llvm.bitreverse.i32(i32) +attributes #1 = { "hlsl.numthreads"="8,1,1" "hlsl.shader"="compute" } diff --git a/llvm/test/CodeGen/SPIRV/get_global_size.ll b/llvm/test/CodeGen/SPIRV/get_global_size.ll new file mode 100644 index 0000000..959371a7 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/get_global_size.ll @@ -0,0 +1,50 @@ +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: %[[#int32:]] = OpTypeInt 32 0 +; CHECK: %[[#int64:]] = OpTypeInt 64 0 +; CHECK: %[[#vec3:]] = OpTypeVector %[[#int64]] 3 +; CHECK: %[[#ptr_input_vec3:]] = OpTypePointer Input %[[#vec3]] +; CHECK: %[[#global_size_var:]] = OpVariable %[[#ptr_input_vec3]] Input + +; CHECK: %[[#load_gs1:]] = OpLoad %[[#vec3]] %[[#global_size_var]] Aligned 1 +; CHECK: %[[#extract3:]] = OpCompositeExtract %[[#int64]] %[[#load_gs1]] 0 + +; CHECK: %[[#bitcast1:]] = OpBitcast %[[#]] %[[#]] +; CHECK: %[[#load_out1:]] = OpLoad %[[#]] %[[#bitcast1]] Aligned 8 +; CHECK: %[[#gep1:]] = OpInBoundsPtrAccessChain %[[#]] %[[#load_out1]] %[[#]] +; CHECK: OpStore %[[#gep1]] %[[#extract3]] Aligned 8 + +; CHECK: %[[#load_param_x:]] = OpLoad %[[#int32]] %[[#]] +; CHECK: %[[#load_gs2:]] = OpLoad %[[#vec3]] %[[#global_size_var]] Aligned 1 +; CHECK: %[[#dyn_extract:]] = OpVectorExtractDynamic %[[#int64]] %[[#load_gs2]] %[[#load_param_x]] +; CHECK: %[[#cmp:]] = OpULessThan %[[#]] %[[#load_param_x]] %[[#]] +; CHECK: %[[#select2:]] = OpSelect %[[#int64]] %[[#cmp]] %[[#dyn_extract]] %[[#]] +; CHECK: %[[#bitcast2:]] = OpBitcast %[[#]] %[[#]] +; CHECK: %[[#load_out2:]] = OpLoad %[[#]] %[[#bitcast2]] Aligned 8 +; CHECK: %[[#gep2:]] = OpInBoundsPtrAccessChain %[[#]] %[[#load_out2]] %[[#]] +; CHECK: OpStore %[[#gep2]] %[[#select2]] Aligned 8 + +define dso_local spir_kernel void @ggs(ptr noundef align 8 %out, i32 noundef %x) { +entry: + %out.addr = alloca ptr, align 8 + %x.addr = alloca i32, align 4 + store ptr %out, ptr %out.addr, align 8 + store i32 %x, ptr %x.addr, align 4 + %call = call i64 @_Z15get_global_sizej(i32 noundef 0) + %0 = load ptr, ptr %out.addr, align 8 + %arrayidx = getelementptr inbounds i64, ptr %0, i64 0 + store i64 %call, ptr %arrayidx, align 8 + %call1 = call i64 @_Z15get_global_sizej(i32 noundef 3) + %1 = load ptr, ptr %out.addr, align 8 + %arrayidx2 = getelementptr inbounds i64, ptr %1, i64 1 + store i64 %call1, ptr %arrayidx2, align 8 + %2 = load i32, ptr %x.addr, align 4 + %call3 = call i64 @_Z15get_global_sizej(i32 noundef %2) + %3 = load ptr, ptr %out.addr, align 8 + %arrayidx4 = getelementptr inbounds i64, ptr %3, i64 2 + store i64 %call3, ptr %arrayidx4, align 8 + ret void +} + +declare i64 @_Z15get_global_sizej(i32 noundef) diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll new file mode 100644 index 0000000..00e9185 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll @@ -0,0 +1,75 @@ +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv1.6-vulkan1.3-library %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv1.6-vulkan1.3-library %s -o - -filetype=obj | spirv-val --target-env vulkan1.3 %} + +@.str = private unnamed_addr constant [2 x i8] c"b\00", align 1 +@.str.2 = private unnamed_addr constant [2 x i8] c"c\00", align 1 +@.str.4 = private unnamed_addr constant [2 x i8] c"d\00", align 1 +@.str.6 = private unnamed_addr constant [2 x i8] c"e\00", align 1 +@.str.8 = private unnamed_addr constant [2 x i8] c"f\00", align 1 +@.str.10 = private unnamed_addr constant [2 x i8] c"g\00", align 1 +@.str.12 = private unnamed_addr constant [2 x i8] c"h\00", align 1 +@.str.14 = private unnamed_addr constant [2 x i8] c"i\00", align 1 + +; CHECK-DAG: OpName [[b:%[0-9]+]] "b" +; CHECK-DAG: OpName [[c:%[0-9]+]] "c" +; CHECK-DAG: OpName [[d:%[0-9]+]] "d" +; CHECK-DAG: OpName [[e:%[0-9]+]] "e" +; CHECK-DAG: OpName [[f:%[0-9]+]] "f" +; CHECK-DAG: OpName [[g:%[0-9]+]] "g" +; CHECK-DAG: OpName [[h:%[0-9]+]] "h" +; CHECK-DAG: OpName [[i:%[0-9]+]] "i" +; CHECK-DAG: OpDecorate [[b]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[b]] Binding 1 +; CHECK-DAG: OpDecorate [[c]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[c]] Binding 0 +; CHECK-DAG: OpDecorate [[d]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[d]] Binding 3 +; CHECK-DAG: OpDecorate [[e]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[e]] Binding 2 +; CHECK-DAG: OpDecorate [[f]] DescriptorSet 10 +; CHECK-DAG: OpDecorate [[f]] Binding 1 +; CHECK-DAG: OpDecorate [[g]] DescriptorSet 10 +; CHECK-DAG: OpDecorate [[g]] Binding 0 +; CHECK-DAG: OpDecorate [[h]] DescriptorSet 10 +; CHECK-DAG: OpDecorate [[h]] Binding 3 +; CHECK-DAG: OpDecorate [[i]] DescriptorSet 10 +; CHECK-DAG: OpDecorate [[i]] Binding 2 + + +define void @main() local_unnamed_addr #0 { +entry: + %0 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str) + %1 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str.2) + %2 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 1, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str.4) + %3 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str.6) + %4 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 1, i32 1, i32 0, i1 false, ptr nonnull @.str.8) + %5 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 2, i32 10, i32 1, i32 0, i1 false, ptr nonnull @.str.10) + %6 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 3, i32 10, i32 1, i32 0, i1 false, ptr nonnull @.str.12) + %7 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str.14) + %8 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %1, i32 0) + %9 = load i32, ptr addrspace(11) %8, align 4 + %10 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %2, i32 0) + %11 = load i32, ptr addrspace(11) %10, align 4 + %add.i = add nsw i32 %11, %9 + %12 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %3, i32 0) + %13 = load i32, ptr addrspace(11) %12, align 4 + %add4.i = add nsw i32 %add.i, %13 + %14 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %4, i32 0) + %15 = load i32, ptr addrspace(11) %14, align 4 + %add6.i = add nsw i32 %add4.i, %15 + %16 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %5, i32 0) + %17 = load i32, ptr addrspace(11) %16, align 4 + %add8.i = add nsw i32 %add6.i, %17 + %18 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %6, i32 0) + %19 = load i32, ptr addrspace(11) %18, align 4 + %add10.i = add nsw i32 %add8.i, %19 + %20 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %7, i32 0) + %21 = load i32, ptr addrspace(11) %20, align 4 + %add12.i = add nsw i32 %add10.i, %21 + %22 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %0, i32 0) + store i32 %add12.i, ptr addrspace(11) %22, align 4 + ret void +} + + +attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
\ No newline at end of file diff --git a/llvm/test/CodeGen/SPIRV/layout.ll b/llvm/test/CodeGen/SPIRV/layout.ll new file mode 100644 index 0000000..94fa432 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/layout.ll @@ -0,0 +1,84 @@ +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpCapability Kernel +; CHECK: OpCapability Addresses +; CHECK: OpCapability GenericPointer +; CHECK: OpCapability Int64 +; CHECK: OpCapability Int8 +; CHECK: OpCapability Linkage + +; CHECK: OpExtInstImport "OpenCL.std" +; CHECK: OpMemoryModel Physical64 OpenCL +; CHECK: OpEntryPoint Kernel %[[#]] "foo" %[[#]] +; CHECK: OpSource OpenCL_C 200000 + +; CHECK-DAG: OpName %[[#]] +; CHECK-DAG: OpDecorate %[[#]] + + +; CHECK: %[[#I8:]] = OpTypeInt 8 0 +; CHECK: %[[#PTR_CW_I8:]] = OpTypePointer CrossWorkgroup %[[#I8]] +; CHECK: %[[#I32:]] = OpTypeInt 32 0 +; CHECK: %[[#VEC4:]] = OpTypeVector %[[#I32]] 4 +; CHECK: %[[#VOID:]] = OpTypeVoid +; CHECK: %[[#FUNC_TYPE0:]] = OpTypeFunction %[[#VOID]] %[[#PTR_CW_I8]] %[[#VEC4]] +; CHECK: %[[#FUNC_TYPE1:]] = OpTypeFunction %[[#VOID]] %[[#PTR_CW_I8]] +; CHECK: %[[#VEC3:]] = OpTypeVector %[[#I32]] 3 +; CHECK: %[[#FUNC_TYPE2:]] = OpTypeFunction %[[#VOID]] %[[#PTR_CW_I8]] %[[#VEC3]] +; CHECK: %[[#PTR_GEN_I8:]] = OpTypePointer Generic %[[#I8]] +; CHECK: %[[#STRUCT_B:]] = OpTypeStruct %[[#I32]] %[[#PTR_GEN_I8]] +; CHECK: %[[#STRUCT_C:]] = OpTypeStruct %[[#I32]] %[[#STRUCT_B]] +; CHECK: %[[#STRUCT_A:]] = OpTypeStruct %[[#I32]] %[[#STRUCT_C]] +; CHECK: %[[#F32:]] = OpTypeFloat 32 +; CHECK: %[[#CONST_2:]] = OpConstant %[[#I32]] 2 +; CHECK: %[[#ARRAY_F:]] = OpTypeArray %[[#F32]] %[[#CONST_2]] +; CHECK: %[[#ARRAY_I:]] = OpTypeArray %[[#I32]] %[[#CONST_2]] +; CHECK: %[[#PTR_CW_STRUCT_A:]] = OpTypePointer CrossWorkgroup %[[#STRUCT_A]] +; CHECK: %[[#PTR_UC_VEC4:]] = OpTypePointer UniformConstant %[[#VEC4]] +; CHECK: %[[#PTR_UC_ARRAY_F:]] = OpTypePointer UniformConstant %[[#ARRAY_F]] +; CHECK: %[[#PTR_CW_PTR_CW_I8:]] = OpTypePointer CrossWorkgroup %[[#PTR_CW_I8]] +; CHECK: %[[#I64:]] = OpTypeInt 64 0 +; CHECK: %[[#PTR_CW_ARRAY_I:]] = OpTypePointer CrossWorkgroup %[[#ARRAY_I]] + +; CHECK: %[[#NULL_I32:]] = OpConstantNull %[[#I32]] +; CHECK: %[[#CONST_I64_4:]] = OpConstant %[[#I64]] 4 +; CHECK: %[[#CONST_I32_1:]] = OpConstant %[[#I32]] 1 +; CHECK: %[[#COMP_I32:]] = OpConstantComposite %[[#ARRAY_I]] %[[#CONST_I32_1]] %[[#CONST_2]] + +; CHECK: %[[#VAR_V:]] = OpVariable %[[#PTR_CW_ARRAY_I]] CrossWorkgroup %[[#COMP_I32]] +; CHECK: %[[#SPECCONSTOP:]] = OpSpecConstantOp %[[#PTR_CW_I8]] InBoundsPtrAccessChain %[[#VAR_V]] %[[#NULL_I32]] %[[#CONST_I64_4]] +; CHECK: %[[#VAR_S:]] = OpVariable %[[#PTR_CW_PTR_CW_I8]] CrossWorkgroup %[[#SPECCONSTOP]] +; CHECK: %[[#NULL_ARRAY_F:]] = OpConstantNull %[[#ARRAY_F]] +; CHECK: %[[#VAR_F:]] = OpVariable %[[#PTR_UC_ARRAY_F]] UniformConstant %[[#NULL_ARRAY_F]] +; CHECK: %[[#NULL_STRUCT_A:]] = OpConstantNull %[[#STRUCT_A]] +; CHECK: %[[#VAR_A:]] = OpVariable %[[#PTR_CW_STRUCT_A]] CrossWorkgroup %[[#NULL_STRUCT_A]] + +; CHECK: %[[#FN_BAR1:]] = OpFunction %[[#VOID]] None %[[#FUNC_TYPE1]] +; CHECK: %[[#P_BAR1:]] = OpFunctionParameter %[[#PTR_CW_I8]] +; CHECK: OpFunctionEnd + +@v = addrspace(1) global [2 x i32] [i32 1, i32 2], align 4 +@s = addrspace(1) global ptr addrspace(1) getelementptr inbounds ([2 x i32], ptr addrspace(1) @v, i32 0, i32 1), align 4 + +%struct.A = type { i32, %struct.C } +%struct.C = type { i32, %struct.B } +%struct.B = type { i32, ptr addrspace(4) } + +@f = addrspace(2) constant [2 x float] zeroinitializer, align 4 +@b = external addrspace(2) constant <4 x i32> +@a = common addrspace(1) global %struct.A zeroinitializer, align 4 + +define spir_kernel void @foo(ptr addrspace(1) %a, <4 x i32> %vec_in) { +entry: + call spir_func void @bar1(ptr addrspace(1) %a) + %extractVec = shufflevector <4 x i32> %vec_in, <4 x i32> %vec_in, <3 x i32> <i32 0, i32 1, i32 2> + call spir_func void @bar2(ptr addrspace(1) %a, <3 x i32> %extractVec) + ret void +} + +declare spir_func void @bar1(ptr addrspace(1)) +declare spir_func void @bar2(ptr addrspace(1), <3 x i32>) + +!opencl.ocl.version = !{!7} +!7 = !{i32 2, i32 0} diff --git a/llvm/test/CodeGen/SPIRV/lit.local.cfg b/llvm/test/CodeGen/SPIRV/lit.local.cfg index f139d13..5179542 100644 --- a/llvm/test/CodeGen/SPIRV/lit.local.cfg +++ b/llvm/test/CodeGen/SPIRV/lit.local.cfg @@ -1,16 +1,6 @@ if not "SPIRV" in config.root.targets: config.unsupported = True -spirv_sim_root = os.path.join(config.llvm_src_root, "utils", "spirv-sim") - -config.substitutions.append( - ( - "spirv-sim", - "'%s' %s" % (config.python_executable, - os.path.join(spirv_sim_root, "spirv-sim.py")), - ) -) - if config.spirv_tools_tests: config.available_features.add("spirv-tools") config.substitutions.append(("spirv-dis", os.path.join(config.llvm_tools_dir, "spirv-dis"))) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bitreverse_small_type.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bitreverse_small_type.ll new file mode 100644 index 0000000..438fff6 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bitreverse_small_type.ll @@ -0,0 +1,92 @@ +;; Check that llvm.bitreverse.* intrinsics are lowered for +;; 2/4-bit scalar and vector types. + +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_arbitrary_precision_integers,+SPV_KHR_bit_instructions %s -o - | FileCheck %s +; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_arbitrary_precision_integers,+SPV_KHR_bit_instructions %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpCapability ArbitraryPrecisionIntegersINTEL +; CHECK: OpExtension "SPV_INTEL_arbitrary_precision_integers" + +; CHECK: %[[#I4:]] = OpTypeInt 4 0 +; CHECK: %[[#I2:]] = OpTypeInt 2 0 +; CHECK: %[[#Z4:]] = OpConstantNull %[[#I4]] +; CHECK: %[[#Z2:]] = OpConstantNull %[[#I2]] +; CHECK: %[[#V2I2:]] = OpTypeVector %[[#I2]] 2 +; CHECK: %[[#V2I4:]] = OpTypeVector %[[#I4]] 2 +; CHECK: %[[#V3I2:]] = OpTypeVector %[[#I2]] 3 +; CHECK: %[[#V3I4:]] = OpTypeVector %[[#I4]] 3 +; CHECK: %[[#V4I2:]] = OpTypeVector %[[#I2]] 4 +; CHECK: %[[#V4I4:]] = OpTypeVector %[[#I4]] 4 +; CHECK: %[[#V8I2:]] = OpTypeVector %[[#I2]] 8 +; CHECK: %[[#V8I4:]] = OpTypeVector %[[#I4]] 8 +; CHECK: %[[#V16I2:]] = OpTypeVector %[[#I2]] 16 +; CHECK: %[[#V16I4:]] = OpTypeVector %[[#I4]] 16 + + +; CHECK: %[[#]] = OpBitReverse %[[#I2]] %[[#Z2]] +; CHECK: %[[#]] = OpBitReverse %[[#I4]] %[[#Z4]] +; CHECK: %[[#]] = OpBitReverse %[[#V2I2]] %[[#]] +; CHECK: %[[#]] = OpBitReverse %[[#V2I4]] %[[#]] +; CHECK: %[[#]] = OpBitReverse %[[#V3I2]] %[[#]] +; CHECK: %[[#]] = OpBitReverse %[[#V3I4]] %[[#]] +; CHECK: %[[#]] = OpBitReverse %[[#V4I2]] %[[#]] +; CHECK: %[[#]] = OpBitReverse %[[#V4I4]] %[[#]] +; CHECK: %[[#]] = OpBitReverse %[[#V8I2]] %[[#]] +; CHECK: %[[#]] = OpBitReverse %[[#V8I4]] %[[#]] +; CHECK: %[[#]] = OpBitReverse %[[#V16I2]] %[[#]] +; CHECK: %[[#]] = OpBitReverse %[[#V16I4]] %[[#]] + +define spir_kernel void @testBitRev() { +entry: + %call2 = call i2 @llvm.bitreverse.i2(i2 0) + %call4 = call i4 @llvm.bitreverse.i4(i4 0) + ret void +} + +define spir_kernel void @testBitRevV2(<2 x i2> %a, <2 x i4> %b) { +entry: + %call2 = call <2 x i2> @llvm.bitreverse.v2i2(<2 x i2> %a) + %call4 = call <2 x i4> @llvm.bitreverse.v2i4(<2 x i4> %b) + ret void +} + +define spir_kernel void @testBitRevV3(<3 x i2> %a, <3 x i4> %b) { +entry: + %call2 = call <3 x i2> @llvm.bitreverse.v3i2(<3 x i2> %a) + %call4 = call <3 x i4> @llvm.bitreverse.v3i4(<3 x i4> %b) + ret void +} + +define spir_kernel void @testBitRevV4(<4 x i2> %a, <4 x i4> %b) { +entry: + %call2 = call <4 x i2> @llvm.bitreverse.v4i2(<4 x i2> %a) + %call4 = call <4 x i4> @llvm.bitreverse.v4i4(<4 x i4> %b) + ret void +} + +define spir_kernel void @testBitRevV8(<8 x i2> %a, <8 x i4> %b) { +entry: + %call2 = call <8 x i2> @llvm.bitreverse.v8i2(<8 x i2> %a) + %call4 = call <8 x i4> @llvm.bitreverse.v8i4(<8 x i4> %b) + ret void +} + +define spir_kernel void @testBitRevV16(<16 x i2> %a, <16 x i4> %b) { +entry: + %call2 = call <16 x i2> @llvm.bitreverse.v16i2(<16 x i2> %a) + %call4 = call <16 x i4> @llvm.bitreverse.v16i4(<16 x i4> %b) + ret void +} + +declare i2 @llvm.bitreverse.i2(i2) +declare i4 @llvm.bitreverse.i4(i4) +declare <2 x i2> @llvm.bitreverse.v2i2(<2 x i2>) +declare <2 x i4> @llvm.bitreverse.v2i4(<2 x i4>) +declare <3 x i2> @llvm.bitreverse.v3i2(<3 x i2>) +declare <3 x i4> @llvm.bitreverse.v3i4(<3 x i4>) +declare <4 x i2> @llvm.bitreverse.v4i2(<4 x i2>) +declare <4 x i4> @llvm.bitreverse.v4i4(<4 x i4>) +declare <8 x i2> @llvm.bitreverse.v8i2(<8 x i2>) +declare <8 x i4> @llvm.bitreverse.v8i4(<8 x i4>) +declare <16 x i2> @llvm.bitreverse.v16i2(<16 x i2>) +declare <16 x i4> @llvm.bitreverse.v16i4(<16 x i4>) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fp-to-int-intrinsics.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fp-to-int-intrinsics.ll new file mode 100644 index 0000000..66c744f --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fp-to-int-intrinsics.ll @@ -0,0 +1,196 @@ +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv64-unkown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unkown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpDecorate %[[#SAT1:]] SaturatedConversion +; CHECK: OpDecorate %[[#SAT2:]] SaturatedConversion +; CHECK: OpDecorate %[[#SAT3:]] SaturatedConversion +; CHECK: OpDecorate %[[#SAT4:]] SaturatedConversion +; CHECK: OpDecorate %[[#SAT5:]] SaturatedConversion +; CHECK: OpDecorate %[[#SAT6:]] SaturatedConversion +; CHECK: OpDecorate %[[#SAT7:]] SaturatedConversion +; CHECK: OpDecorate %[[#SAT8:]] SaturatedConversion +; CHECK: OpDecorate %[[#SAT9:]] SaturatedConversion +; CHECK: OpDecorate %[[#SAT10:]] SaturatedConversion +; CHECK: OpDecorate %[[#SAT11:]] SaturatedConversion +; CHECK: OpDecorate %[[#SAT12:]] SaturatedConversion +; CHECK: OpDecorate %[[#SAT13:]] SaturatedConversion +; CHECK: OpDecorate %[[#SAT14:]] SaturatedConversion +; CHECK: OpDecorate %[[#SAT15:]] SaturatedConversion +; CHECK: OpDecorate %[[#SAT16:]] SaturatedConversion + + +; CHECK: %[[#SAT1]] = OpConvertFToS %[[#]] %[[#]] +define spir_kernel void @testfunction_float_to_signed_i8(float %input) { +entry: + %ptr = alloca i8 + %signed_int = call i8 @llvm.fptosi.sat.i8.f32(float %input) + store i8 %signed_int, i8* %ptr + ret void + +} +declare i8 @llvm.fptosi.sat.i8.f32(float) + + +; CHECK: %[[#SAT2]] = OpConvertFToS %[[#]] %[[#]] +define spir_kernel void @testfunction_float_to_signed_i16(float %input) { +entry: + %ptr = alloca i16 + %signed_int = call i16 @llvm.fptosi.sat.i16.f32(float %input) + store i16 %signed_int, i16* %ptr + ret void + +} +declare i16 @llvm.fptosi.sat.i16.f32(float) + +; CHECK: %[[#SAT3]] = OpConvertFToS %[[#]] %[[#]] +define spir_kernel void @testfunction_float_to_signed_i32(float %input) { +entry: + %ptr = alloca i32 + %signed_int = call i32 @llvm.fptosi.sat.i32.f32(float %input) + store i32 %signed_int, i32* %ptr + ret void + +} +declare i32 @llvm.fptosi.sat.i32.f32(float) + + +; CHECK: %[[#SAT4]] = OpConvertFToS %[[#]] %[[#]] +define spir_kernel void @testfunction_float_to_signed_i64(float %input) { +entry: + %ptr = alloca i64 + %signed_int = call i64 @llvm.fptosi.sat.i64.f32(float %input) + store i64 %signed_int, i64* %ptr + ret void +} +declare i64 @llvm.fptosi.sat.i64.f32(float) + + +; CHECK: %[[#SAT5]] = OpConvertFToS %[[#]] %[[#]] +define spir_kernel void @testfunction_double_to_signed_i8(double %input) { +entry: + %ptr = alloca i8 + %signed_int = call i8 @llvm.fptosi.sat.i8.f64(double %input) + store i8 %signed_int, i8* %ptr + ret void +} +declare i8 @llvm.fptosi.sat.i8.f64(double) + + +; CHECK: %[[#SAT6]] = OpConvertFToS %[[#]] %[[#]] +define spir_kernel void @testfunction_double_to_signed_i16(double %input) { +entry: + %ptr = alloca i16 + %signed_int = call i16 @llvm.fptosi.sat.i16.f64(double %input) + store i16 %signed_int, i16* %ptr + ret void +} +declare i16 @llvm.fptosi.sat.i16.f64(double) + + +; CHECK: %[[#SAT7]] = OpConvertFToS %[[#]] %[[#]] +define spir_kernel void @testfunction_double_to_signed_i32(double %input) { +entry: + %ptr = alloca i32 + %signed_int = call i32 @llvm.fptosi.sat.i32.f64(double %input) + store i32 %signed_int, i32* %ptr + ret void +} +declare i32 @llvm.fptosi.sat.i32.f64(double) + + +; CHECK: %[[#SAT8]] = OpConvertFToS %[[#]] %[[#]] +define spir_kernel void @testfunction_double_to_signed_i64(double %input) { +entry: + %ptr = alloca i64 + %signed_int = call i64 @llvm.fptosi.sat.i64.f64(double %input) + store i64 %signed_int, i64* %ptr + ret void +} +declare i64 @llvm.fptosi.sat.i64.f64(double) + +; CHECK: %[[#SAT9]] = OpConvertFToU %[[#]] %[[#]] +define spir_kernel void @testfunction_float_to_unsigned_i8(float %input) { +entry: + %ptr = alloca i8 + %unsigned_int = call i8 @llvm.fptoui.sat.i8.f32(float %input) + store i8 %unsigned_int, i8* %ptr + ret void +} +declare i8 @llvm.fptoui.sat.i8.f32(float) + + +; CHECK: %[[#SAT10]] = OpConvertFToU %[[#]] %[[#]] +define spir_kernel void @testfunction_float_to_unsigned_i16(float %input) { +entry: + %ptr = alloca i16 + %unsigned_int = call i16 @llvm.fptoui.sat.i16.f32(float %input) + store i16 %unsigned_int, i16* %ptr + ret void +} +declare i16 @llvm.fptoui.sat.i16.f32(float) + + +; CHECK: %[[#SAT11]] = OpConvertFToU %[[#]] %[[#]] +define spir_kernel void @testfunction_float_to_unsigned_i32(float %input) { +entry: + %ptr = alloca i32 + %unsigned_int = call i32 @llvm.fptoui.sat.i32.f32(float %input) + store i32 %unsigned_int, i32* %ptr + ret void +} +declare i32 @llvm.fptoui.sat.i32.f32(float) + + +; CHECK: %[[#SAT12]] = OpConvertFToU %[[#]] %[[#]] +define spir_kernel void @testfunction_float_to_unsigned_i64(float %input) { +entry: + %ptr = alloca i64 + %unsigned_int = call i64 @llvm.fptoui.sat.i64.f32(float %input) + store i64 %unsigned_int, i64* %ptr + ret void +} +declare i64 @llvm.fptoui.sat.i64.f32(float) + + +; CHECK: %[[#SAT13]] = OpConvertFToU %[[#]] %[[#]] +define spir_kernel void @testfunction_double_to_unsigned_i8(double %input) { +entry: + %ptr = alloca i8 + %unsigned_int = call i8 @llvm.fptoui.sat.i8.f64(double %input) + store i8 %unsigned_int, i8* %ptr + ret void +} +declare i8 @llvm.fptoui.sat.i8.f64(double) + + +; CHECK: %[[#SAT14]] = OpConvertFToU %[[#]] %[[#]] +define spir_kernel void @testfunction_double_to_unsigned_i16(double %input) { +entry: + %ptr = alloca i16 + %unsigned_int = call i16 @llvm.fptoui.sat.i16.f64(double %input) + store i16 %unsigned_int, i16* %ptr + ret void +} +declare i16 @llvm.fptoui.sat.i16.f64(double) + + +; CHECK: %[[#SAT15]] = OpConvertFToU %[[#]] %[[#]] +define spir_kernel void @testfunction_double_to_unsigned_i32(double %input) { +entry: + %ptr = alloca i32 + %unsigned_int = call i32 @llvm.fptoui.sat.i32.f64(double %input) + store i32 %unsigned_int, i32* %ptr + ret void +} +declare i32 @llvm.fptoui.sat.i32.f64(double) + + +; CHECK: %[[#SAT16]] = OpConvertFToU %[[#]] %[[#]] +define spir_kernel void @testfunction_double_to_unsigned_i64(double %input) { +entry: + %ptr = alloca i64 + %unsigned_int = call i64 @llvm.fptoui.sat.i64.f64(double %input) + store i64 %unsigned_int, i64* %ptr + ret void +} +declare i64 @llvm.fptoui.sat.i64.f64(double) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll index 9d07b63..483d707 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll @@ -18,7 +18,6 @@ ; CL: %[[#FooVar:]] = OpVariable ; CL-NEXT: %[[#Casted1:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]] ; CL-NEXT: OpLifetimeStart %[[#Casted1]], 72 -; CL-NEXT: OpCopyMemorySized ; CL-NEXT: OpBitcast ; CL-NEXT: OpInBoundsPtrAccessChain ; CL-NEXT: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]] @@ -26,13 +25,11 @@ ; VK: OpFunction ; VK: %[[#FooVar:]] = OpVariable -; VK-NEXT: OpCopyMemorySized ; VK-NEXT: OpInBoundsAccessChain ; VK-NEXT: OpReturn define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange) { %RoundedRangeKernel = alloca %tprange, align 8 call void @llvm.lifetime.start.p0(i64 72, ptr nonnull %RoundedRangeKernel) - call void @llvm.memcpy.p0.p0.i64(ptr align 8 %RoundedRangeKernel, ptr align 8 %_arg_UserRange, i64 16, i1 false) %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8 call void @llvm.lifetime.end.p0(i64 72, ptr nonnull %RoundedRangeKernel) ret void @@ -41,20 +38,17 @@ define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange) ; CL: OpFunction ; CL: %[[#BarVar:]] = OpVariable ; CL-NEXT: OpLifetimeStart %[[#BarVar]], 0 -; CL-NEXT: OpCopyMemorySized ; CL-NEXT: OpBitcast ; CL-NEXT: OpInBoundsPtrAccessChain ; CL-NEXT: OpLifetimeStop %[[#BarVar]], 0 ; VK: OpFunction ; VK: %[[#BarVar:]] = OpVariable -; VK-NEXT: OpCopyMemorySized ; VK-NEXT: OpInBoundsAccessChain ; VK-NEXT: OpReturn define spir_func void @bar(ptr noundef byval(%tprange) align 8 %_arg_UserRange) { %RoundedRangeKernel = alloca %tprange, align 8 call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %RoundedRangeKernel) - call void @llvm.memcpy.p0.p0.i64(ptr align 8 %RoundedRangeKernel, ptr align 8 %_arg_UserRange, i64 16, i1 false) %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8 call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %RoundedRangeKernel) ret void @@ -63,20 +57,17 @@ define spir_func void @bar(ptr noundef byval(%tprange) align 8 %_arg_UserRange) ; CL: OpFunction ; CL: %[[#TestVar:]] = OpVariable ; CL-NEXT: OpLifetimeStart %[[#TestVar]], 1 -; CL-NEXT: OpCopyMemorySized ; CL-NEXT: OpInBoundsPtrAccessChain ; CL-NEXT: OpLifetimeStop %[[#TestVar]], 1 ; VK: OpFunction ; VK: %[[#Test:]] = OpVariable -; VK-NEXT: OpCopyMemorySized ; VK-NEXT: OpInBoundsAccessChain ; VK-NEXT: OpReturn define spir_func void @test(ptr noundef align 8 %_arg) { %var = alloca i8, align 8 call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %var) - call void @llvm.memcpy.p0.p0.i64(ptr align 8 %var, ptr align 8 %_arg, i64 1, i1 false) - %KernelFunc = getelementptr inbounds i8, ptr %var, i64 0 + %KernelFunc = getelementptr inbounds i8, ptr %var, i64 1 call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %var) ret void } diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memcpy.align.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memcpy.align.ll new file mode 100644 index 0000000..66a12b1 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memcpy.align.ll @@ -0,0 +1,54 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +%struct.B = type { [2 x i32] } +%struct.A = type { i64, %struct.B } + +@__const.foo.b = private unnamed_addr addrspace(2) constant %struct.B { [2 x i32] [i32 1, i32 2] }, align 4 +@__const.bar.a = private unnamed_addr addrspace(2) constant %struct.A { i64 0, %struct.B { [2 x i32] [i32 1, i32 2] } }, align 8 + +define spir_func void @foo(%struct.A* noalias sret(%struct.A) %agg.result) { +entry: + %b = alloca %struct.B, align 4 + %0 = bitcast %struct.B* %b to i8* + call void @llvm.lifetime.start.p0i8(i64 8, i8* %0) + %1 = bitcast %struct.B* %b to i8* + call void @llvm.memcpy.p0i8.p2i8.i32(i8* align 4 %1, i8 addrspace(2)* align 4 bitcast (%struct.B addrspace(2)* @__const.foo.b to i8 addrspace(2)*), i32 8, i1 false) +; CHECK: OpCopyMemorySized %[[#]] %[[#]] %[[#]] Aligned 4 + %b1 = getelementptr inbounds %struct.A, %struct.A* %agg.result, i32 0, i32 1 + %2 = bitcast %struct.B* %b1 to i8* + %3 = bitcast %struct.B* %b to i8* + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %2, i8* align 4 %3, i32 8, i1 false) +; CHECK: %[[#PTR1:]] = OpInBoundsPtrAccessChain %[[#]] %[[#]] %[[#]] %[[#]] +; CHECK: OpCopyMemorySized %[[#PTR1]] %[[#]] %[[#]] Aligned 8 + %4 = bitcast %struct.B* %b to i8* + call void @llvm.lifetime.end.p0i8(i64 8, i8* %4) + ret void +} + +declare void @llvm.lifetime.start.p0i8(i64, i8* captures(none)) + +declare void @llvm.memcpy.p0i8.p2i8.i32(i8* captures(none) writeonly, i8 addrspace(2)* captures(none) readonly, i32, i1) + +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* captures(none) writeonly, i8* captures(none) readonly, i32, i1) + +declare void @llvm.lifetime.end.p0i8(i64, i8* captures(none)) + +define spir_func void @bar(%struct.B* noalias sret(%struct.B) %agg.result) { +entry: + %a = alloca %struct.A, align 8 + %0 = bitcast %struct.A* %a to i8* + call void @llvm.lifetime.start.p0i8(i64 16, i8* %0) + %1 = bitcast %struct.A* %a to i8* + call void @llvm.memcpy.p0i8.p2i8.i32(i8* align 8 %1, i8 addrspace(2)* align 8 bitcast (%struct.A addrspace(2)* @__const.bar.a to i8 addrspace(2)*), i32 16, i1 false) +; CHECK: OpCopyMemorySized %[[#]] %[[#]] %[[#]] Aligned 8 + %b = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 1 + %2 = bitcast %struct.B* %agg.result to i8* + %3 = bitcast %struct.B* %b to i8* + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %2, i8* align 8 %3, i32 8, i1 false) +; CHECK: %[[#PTR2:]] = OpInBoundsPtrAccessChain %[[#]] %[[#]] %[[#]] %[[#]] +; CHECK: OpCopyMemorySized %[[#]] %[[#PTR2]] %[[#]] Aligned 4 + %4 = bitcast %struct.A* %a to i8* + call void @llvm.lifetime.end.p0i8(i64 16, i8* %4) + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/tan.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/tan.ll new file mode 100644 index 0000000..dfb185da --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/tan.ll @@ -0,0 +1,21 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: %[[#ext:]] = OpExtInstImport "OpenCL.std" +; CHECK-DAG: %[[#type_f32:]] = OpTypeFloat 32 +; CHECK-DAG: %[[#type_f64:]] = OpTypeFloat 64 +; CHECK: %[[#extinst_f32:]] = OpExtInst %[[#type_f32]] %[[#ext]] tan %[[#]] +; CHECK: %[[#extinst_f64:]] = OpExtInst %[[#type_f64]] %[[#ext]] tan %[[#]] + +define float @test_tan_f32(float %x) { + %res = call float @llvm.tan.f32(float %x) + ret float %res +} + +define double @test_tan_f64(double %x) { + %res = call double @llvm.tan.f64(double %x) + ret double %res +} + +declare float @llvm.tan.f32(float) +declare double @llvm.tan.f64(double) diff --git a/llvm/test/CodeGen/SPIRV/memory_model_md.ll b/llvm/test/CodeGen/SPIRV/memory_model_md.ll index e52343c..684a163 100644 --- a/llvm/test/CodeGen/SPIRV/memory_model_md.ll +++ b/llvm/test/CodeGen/SPIRV/memory_model_md.ll @@ -1,6 +1,7 @@ ; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=SPV +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32v1.2-unknown-unknown %s -o - -filetype=obj | spirv-val --target-env opencl2.2 %} -; SPV: OpMemoryModel Physical32 Simple +; SPV: OpMemoryModel Physical32 OpenCL define dso_local dllexport void @k_no_fc(i32 %ibuf, i32 %obuf) local_unnamed_addr { entry: ret void @@ -8,4 +9,4 @@ entry: !spirv.MemoryModel = !{!0} -!0 = !{i32 1, i32 0} +!0 = !{i32 1, i32 2} diff --git a/llvm/test/CodeGen/SPIRV/pointers/global-addrspacecast.ll b/llvm/test/CodeGen/SPIRV/pointers/global-addrspacecast.ll index 544c657..19451d2 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/global-addrspacecast.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/global-addrspacecast.ll @@ -1,5 +1,5 @@ -; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s -; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val --target-env vulkan1.3 %} @PrivInternal = internal addrspace(10) global i32 456 ; CHECK-DAG: %[[#type:]] = OpTypeInt 32 0 @@ -7,7 +7,7 @@ ; CHECK-DAG: %[[#value:]] = OpConstant %[[#type]] 456 ; CHECK-DAG: %[[#var:]] = OpVariable %[[#ptrty]] Private %[[#value]] -define spir_kernel void @Foo() { +define hidden spir_func void @Foo() { %p = addrspacecast ptr addrspace(10) @PrivInternal to ptr %v = load i32, ptr %p, align 4 ret void @@ -15,3 +15,9 @@ define spir_kernel void @Foo() { ; CHECK-NEXT: OpLoad %[[#type]] %[[#var]] Aligned 4 ; CHECK-Next: OpReturn } + +define void @main() #1 { + ret void +} + +attributes #1 = { "hlsl.numthreads"="8,1,1" "hlsl.shader"="compute" } diff --git a/llvm/test/CodeGen/SPIRV/pointers/variables-storage-class-private.ll b/llvm/test/CodeGen/SPIRV/pointers/variables-storage-class-private.ll new file mode 100644 index 0000000..51db120 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/pointers/variables-storage-class-private.ll @@ -0,0 +1,21 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val --target-env vulkan1.3%} + +; CHECK-DAG: %[[#U32:]] = OpTypeInt 32 0 + +; CHECK-DAG: %[[#VAL:]] = OpConstant %[[#U32]] 456 +; CHECK-DAG: %[[#VTYPE:]] = OpTypePointer Private %[[#U32]] +; CHECK-DAG: %[[#]] = OpVariable %[[#VTYPE]] Private %[[#VAL]] +@PrivInternal = internal addrspace(10) global i32 456 + +define hidden spir_func void @Foo() { + %tmp = load i32, ptr addrspace(10) @PrivInternal + ret void +} + +define void @main() #1 { + ret void +} + +declare void @llvm.memcpy.p1.p2.i64(ptr addrspace(1) noalias nocapture writeonly, ptr addrspace(2) noalias nocapture readonly, i64, i1 immarg) +attributes #1 = { "hlsl.numthreads"="8,1,1" "hlsl.shader"="compute" } diff --git a/llvm/test/CodeGen/SPIRV/pointers/variables-storage-class.ll b/llvm/test/CodeGen/SPIRV/pointers/variables-storage-class.ll index a1ded05..6914f4f 100644 --- a/llvm/test/CodeGen/SPIRV/pointers/variables-storage-class.ll +++ b/llvm/test/CodeGen/SPIRV/pointers/variables-storage-class.ll @@ -1,5 +1,5 @@ ; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s -; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64v1.2-unknown-unknown %s -o - -filetype=obj | spirv-val --target-env opencl2.2 %} ; CHECK-DAG: %[[#U8:]] = OpTypeInt 8 0 ; CHECK-DAG: %[[#U32:]] = OpTypeInt 32 0 @@ -15,12 +15,7 @@ ; CHECK-DAG: %[[#INIT:]] = OpVariable %[[#VTYPE]] UniformConstant %[[#VAL]] @Init = private addrspace(2) constant i32 123 -; CHECK-DAG: %[[#VAL:]] = OpConstant %[[#U32]] 456 -; CHECK-DAG: %[[#VTYPE:]] = OpTypePointer Private %[[#U32]] -; CHECK-DAG: %[[#]] = OpVariable %[[#VTYPE]] Private %[[#VAL]] -@PrivInternal = internal addrspace(10) global i32 456 - -define spir_kernel void @Foo() { +define internal spir_func void @Foo() { ; CHECK: %[[#]] = OpLoad %[[#]] %[[#PTR]] Aligned 8 %l = load ptr addrspace(1), ptr addrspace(1) @Ptr, align 8 ; CHECK: OpCopyMemorySized %[[#]] %[[#INIT]] %[[#]] Aligned 4 diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse_i32.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse_i32.ll index f396b5a..838c551 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse_i32.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse_i32.ll @@ -1,13 +1,19 @@ -; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val --target-env vulkan1.3 %} ; CHECK-SPIRV: %[[#int:]] = OpTypeInt 32 ; CHECK-SPIRV: OpBitReverse %[[#int]] -define spir_kernel void @testBitRev(i32 %a, i32 %b, i32 %c, i32 addrspace(1)* nocapture %res) local_unnamed_addr { +define hidden spir_func void @testBitRev(i32 %a, i32 %b, i32 %c, ptr %res) { entry: %call = tail call i32 @llvm.bitreverse.i32(i32 %b) - store i32 %call, i32 addrspace(1)* %res, align 4 + store i32 %call, ptr %res, align 4 + ret void +} + +define void @main() #1 { ret void } declare i32 @llvm.bitreverse.i32(i32) +attributes #1 = { "hlsl.numthreads"="8,1,1" "hlsl.shader"="compute" } diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse_v2i16.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse_v2i16.ll deleted file mode 100644 index 8f04929..0000000 --- a/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse_v2i16.ll +++ /dev/null @@ -1,14 +0,0 @@ -; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV - -; CHECK-SPIRV: %[[#short:]] = OpTypeInt 16 -; CHECK-SPIRV: %[[#short2:]] = OpTypeVector %[[#short]] 2 -; CHECK-SPIRV: OpBitReverse %[[#short2]] - -define spir_kernel void @testBitRev(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> addrspace(1)* nocapture %res) local_unnamed_addr { -entry: - %call = tail call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %b) - store <2 x i16> %call, <2 x i16> addrspace(1)* %res, align 4 - ret void -} - -declare <2 x i16> @llvm.bitreverse.v2i16(<2 x i16>) diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse_v2i32.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse_v2i32.ll new file mode 100644 index 0000000..3e2ed8b --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse_v2i32.ll @@ -0,0 +1,20 @@ +; RUN: llc -O0 -mtriple=spirv-unknown-vulkan %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val --target-env vulkan1.3 %} + +; CHECK-SPIRV: %[[#short:]] = OpTypeInt 32 +; CHECK-SPIRV: %[[#short2:]] = OpTypeVector %[[#short]] 2 +; CHECK-SPIRV: OpBitReverse %[[#short2]] + +define hidden spir_func void @testBitRev(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, ptr %res) { +entry: + %call = tail call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %b) + store <2 x i32> %call, ptr %res, align 4 + ret void +} + +define void @main() #1 { + ret void +} + +declare <2 x i32> @llvm.bitreverse.v2i32(<2 x i32>) +attributes #1 = { "hlsl.numthreads"="8,1,1" "hlsl.shader"="compute" } diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/convert_functions.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/convert_functions.ll new file mode 100644 index 0000000..13a61b0 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/convert_functions.ll @@ -0,0 +1,56 @@ +; This test checks that functions with `convert_` prefix are translated as +; OpenCL builtins only in case they match the specification. Otherwise, we +; expect such functions to be translated to SPIR-V FunctionCall. + +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-SPIRV: OpName %[[#Func:]] "_Z18convert_float_func" +; CHECK-SPIRV: OpName %[[#Func1:]] "_Z20convert_uint_satfunc" +; CHECK-SPIRV: OpName %[[#Func2:]] "_Z21convert_float_rtzfunc" +; CHECK-SPIRV-DAG: %[[#VoidTy:]] = OpTypeVoid +; CHECK-SPIRV-DAG: %[[#CharTy:]] = OpTypeInt 8 +; CHECK-SPIRV-DAG: %[[#FloatTy:]] = OpTypeFloat 32 + +; CHECK-SPIRV: %[[#Func]] = OpFunction %[[#VoidTy]] None %[[#]] +; CHECK-SPIRV: %[[#ConvertId1:]] = OpUConvert %[[#CharTy]] %[[#]] +; CHECK-SPIRV: %[[#ConvertId2:]] = OpConvertSToF %[[#FloatTy]] %[[#]] +; CHECK-SPIRV: %[[#]] = OpFunctionCall %[[#VoidTy]] %[[#Func]] %[[#ConvertId2]] +; CHECK-SPIRV: %[[#]] = OpFunctionCall %[[#VoidTy]] %[[#Func1]] %[[#]] +; CHECK-SPIRV: %[[#]] = OpFunctionCall %[[#VoidTy]] %[[#Func2]] %[[#ConvertId2]] +; CHECK-SPIRV-NOT: OpFConvert +; CHECK-SPIRV-NOT: OpConvertUToF + +define dso_local spir_func void @_Z18convert_float_func(float noundef %x) { +entry: + %x.addr = alloca float, align 4 + store float %x, ptr %x.addr, align 4 + ret void +} + +define dso_local spir_func void @_Z20convert_uint_satfunc(i32 noundef %x) { +entry: + ret void +} + +define dso_local spir_func void @_Z21convert_float_rtzfunc(float noundef %x) { +entry: + ret void +} + +define dso_local spir_func void @convert_int_bf16(i32 noundef %x) { +entry: + %x.addr = alloca i32, align 4 + store i32 %x, ptr %x.addr, align 4 + %0 = load i32, ptr %x.addr, align 4 + call spir_func signext i8 @_Z16convert_char_rtei(i32 noundef %0) + %call = call spir_func float @_Z13convert_floati(i32 noundef %0) + call spir_func void @_Z18convert_float_func(float noundef %call) + call spir_func void @_Z20convert_uint_satfunc(i32 noundef %0) + call spir_func void @_Z21convert_float_rtzfunc(float noundef %call) + ret void +} + +declare spir_func signext i8 @_Z16convert_char_rtei(i32 noundef) + +declare spir_func float @_Z13convert_floati(i32 noundef) diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/nan.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/nan.ll new file mode 100644 index 0000000..1072f07 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/nan.ll @@ -0,0 +1,15 @@ +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; Check OpenCL built-in nan translation. + +; CHECK-SPIRV: %[[#]] = OpExtInst %[[#]] %[[#]] nan %[[#]] + +define dso_local spir_kernel void @test(ptr addrspace(1) align 4 %a, i32 %b) { +entry: + %call = tail call spir_func float @_Z3nanj(i32 %b) + store float %call, ptr addrspace(1) %a, align 4 + ret void +} + +declare spir_func float @_Z3nanj(i32) diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/shuffle.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/shuffle.ll new file mode 100644 index 0000000..aeca431 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/shuffle.ll @@ -0,0 +1,23 @@ +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; Check OpenCL built-in shuffle and shuffle2 translation. + +; CHECK-SPIRV: %[[#]] = OpExtInst %[[#]] %[[#]] shuffle %[[#]] %[[#]] +; CHECK-SPIRV: %[[#]] = OpExtInst %[[#]] %[[#]] shuffle2 %[[#]] %[[#]] %[[#]] + +define spir_kernel void @test() { +entry: + %call = call spir_func <2 x float> @_Z7shuffleDv2_fDv2_j(<2 x float> zeroinitializer, <2 x i32> zeroinitializer) + ret void +} + +declare spir_func <2 x float> @_Z7shuffleDv2_fDv2_j(<2 x float>, <2 x i32>) + +define spir_kernel void @test2() { +entry: + %call = call spir_func <4 x float> @_Z8shuffle2Dv2_fS_Dv4_j(<2 x float> zeroinitializer, <2 x float> zeroinitializer, <4 x i32> zeroinitializer) + ret void +} + +declare spir_func <4 x float> @_Z8shuffle2Dv2_fS_Dv4_j(<2 x float>, <2 x float>, <4 x i32>) diff --git a/llvm/test/CodeGen/SPIRV/transcoding/printf.ll b/llvm/test/CodeGen/SPIRV/transcoding/printf.ll new file mode 100644 index 0000000..338f0a5 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/printf.ll @@ -0,0 +1,14 @@ +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +@.str = private unnamed_addr addrspace(2) constant [12 x i8] c"Hello World\00", align 1 + +; CHECK-SPIRV: %[[#]] = OpExtInst %[[#]] %[[#]] printf %[[#]] + +define dso_local spir_kernel void @BuiltinPrintf() { +entry: + %call = tail call i32 (ptr addrspace(2), ...) @printf(ptr addrspace(2) noundef @.str) + ret void +} + +declare noundef i32 @printf(ptr addrspace(2) nocapture noundef readonly, ...) diff --git a/llvm/test/CodeGen/SPIRV/zero-length-array.ll b/llvm/test/CodeGen/SPIRV/zero-length-array.ll new file mode 100644 index 0000000..666176c --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/zero-length-array.ll @@ -0,0 +1,11 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val %} + +; CHECK: %[[#type:]] = OpTypeInt 32 0 +; CHECK: %[[#ext:]] = OpConstant %[[#type]] 0 + +define spir_func void @_Z3foov() { +entry: + %i = alloca [0 x i32], align 4 + ret void +} diff --git a/llvm/test/CodeGen/SystemZ/llvm.sincos.f16.ll b/llvm/test/CodeGen/SystemZ/llvm.sincos.f16.ll new file mode 100644 index 0000000..1163207 --- /dev/null +++ b/llvm/test/CodeGen/SystemZ/llvm.sincos.f16.ll @@ -0,0 +1,126 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=s390x-linux-gnu < %s | FileCheck -check-prefix=LINUX %s + +; FIXME: Merge with llvm.sincos.ll when zos is fixed for half. + +define { half, half } @test_sincos_f16(half %a) #0 { +; LINUX-LABEL: test_sincos_f16: +; LINUX: # %bb.0: +; LINUX-NEXT: stmg %r14, %r15, 112(%r15) +; LINUX-NEXT: .cfi_offset %r14, -48 +; LINUX-NEXT: .cfi_offset %r15, -40 +; LINUX-NEXT: aghi %r15, -176 +; LINUX-NEXT: .cfi_def_cfa_offset 336 +; LINUX-NEXT: std %f8, 168(%r15) # 8-byte Spill +; LINUX-NEXT: .cfi_offset %f8, -168 +; LINUX-NEXT: brasl %r14, __extendhfsf2@PLT +; LINUX-NEXT: la %r2, 164(%r15) +; LINUX-NEXT: la %r3, 160(%r15) +; LINUX-NEXT: brasl %r14, sincosf@PLT +; LINUX-NEXT: le %f0, 164(%r15) +; LINUX-NEXT: brasl %r14, __truncsfhf2@PLT +; LINUX-NEXT: le %f1, 160(%r15) +; LINUX-NEXT: ler %f8, %f0 +; LINUX-NEXT: ler %f0, %f1 +; LINUX-NEXT: brasl %r14, __truncsfhf2@PLT +; LINUX-NEXT: ler %f2, %f0 +; LINUX-NEXT: ler %f0, %f8 +; LINUX-NEXT: ld %f8, 168(%r15) # 8-byte Reload +; LINUX-NEXT: lmg %r14, %r15, 288(%r15) +; LINUX-NEXT: br %r14 + %result = call { half, half } @llvm.sincos.f16(half %a) + ret { half, half } %result +} + +define half @test_sincos_f16_only_use_sin(half %a) #0 { +; LINUX-LABEL: test_sincos_f16_only_use_sin: +; LINUX: # %bb.0: +; LINUX-NEXT: stmg %r14, %r15, 112(%r15) +; LINUX-NEXT: .cfi_offset %r14, -48 +; LINUX-NEXT: .cfi_offset %r15, -40 +; LINUX-NEXT: aghi %r15, -168 +; LINUX-NEXT: .cfi_def_cfa_offset 328 +; LINUX-NEXT: brasl %r14, __extendhfsf2@PLT +; LINUX-NEXT: la %r2, 160(%r15) +; LINUX-NEXT: la %r3, 164(%r15) +; LINUX-NEXT: brasl %r14, sincosf@PLT +; LINUX-NEXT: le %f0, 160(%r15) +; LINUX-NEXT: brasl %r14, __truncsfhf2@PLT +; LINUX-NEXT: lmg %r14, %r15, 280(%r15) +; LINUX-NEXT: br %r14 + %result = call { half, half } @llvm.sincos.f16(half %a) + %result.0 = extractvalue { half, half } %result, 0 + ret half %result.0 +} + +define half @test_sincos_f16_only_use_cos(half %a) #0 { +; LINUX-LABEL: test_sincos_f16_only_use_cos: +; LINUX: # %bb.0: +; LINUX-NEXT: stmg %r14, %r15, 112(%r15) +; LINUX-NEXT: .cfi_offset %r14, -48 +; LINUX-NEXT: .cfi_offset %r15, -40 +; LINUX-NEXT: aghi %r15, -168 +; LINUX-NEXT: .cfi_def_cfa_offset 328 +; LINUX-NEXT: brasl %r14, __extendhfsf2@PLT +; LINUX-NEXT: la %r2, 164(%r15) +; LINUX-NEXT: la %r3, 160(%r15) +; LINUX-NEXT: brasl %r14, sincosf@PLT +; LINUX-NEXT: le %f0, 160(%r15) +; LINUX-NEXT: brasl %r14, __truncsfhf2@PLT +; LINUX-NEXT: lmg %r14, %r15, 280(%r15) +; LINUX-NEXT: br %r14 + %result = call { half, half } @llvm.sincos.f16(half %a) + %result.1 = extractvalue { half, half } %result, 1 + ret half %result.1 +} + +define { <2 x half>, <2 x half> } @test_sincos_v2f16(<2 x half> %a) #0 { +; LINUX-LABEL: test_sincos_v2f16: +; LINUX: # %bb.0: +; LINUX-NEXT: stmg %r14, %r15, 112(%r15) +; LINUX-NEXT: .cfi_offset %r14, -48 +; LINUX-NEXT: .cfi_offset %r15, -40 +; LINUX-NEXT: aghi %r15, -200 +; LINUX-NEXT: .cfi_def_cfa_offset 360 +; LINUX-NEXT: std %f8, 192(%r15) # 8-byte Spill +; LINUX-NEXT: std %f9, 184(%r15) # 8-byte Spill +; LINUX-NEXT: std %f10, 176(%r15) # 8-byte Spill +; LINUX-NEXT: .cfi_offset %f8, -168 +; LINUX-NEXT: .cfi_offset %f9, -176 +; LINUX-NEXT: .cfi_offset %f10, -184 +; LINUX-NEXT: ler %f8, %f2 +; LINUX-NEXT: brasl %r14, __extendhfsf2@PLT +; LINUX-NEXT: la %r2, 172(%r15) +; LINUX-NEXT: la %r3, 168(%r15) +; LINUX-NEXT: brasl %r14, sincosf@PLT +; LINUX-NEXT: ler %f0, %f8 +; LINUX-NEXT: brasl %r14, __extendhfsf2@PLT +; LINUX-NEXT: la %r2, 164(%r15) +; LINUX-NEXT: la %r3, 160(%r15) +; LINUX-NEXT: brasl %r14, sincosf@PLT +; LINUX-NEXT: le %f0, 172(%r15) +; LINUX-NEXT: brasl %r14, __truncsfhf2@PLT +; LINUX-NEXT: le %f1, 164(%r15) +; LINUX-NEXT: ler %f8, %f0 +; LINUX-NEXT: ler %f0, %f1 +; LINUX-NEXT: brasl %r14, __truncsfhf2@PLT +; LINUX-NEXT: le %f1, 168(%r15) +; LINUX-NEXT: ler %f9, %f0 +; LINUX-NEXT: ler %f0, %f1 +; LINUX-NEXT: brasl %r14, __truncsfhf2@PLT +; LINUX-NEXT: le %f1, 160(%r15) +; LINUX-NEXT: ler %f10, %f0 +; LINUX-NEXT: ler %f0, %f1 +; LINUX-NEXT: brasl %r14, __truncsfhf2@PLT +; LINUX-NEXT: ler %f6, %f0 +; LINUX-NEXT: ler %f0, %f8 +; LINUX-NEXT: ler %f2, %f9 +; LINUX-NEXT: ler %f4, %f10 +; LINUX-NEXT: ld %f8, 192(%r15) # 8-byte Reload +; LINUX-NEXT: ld %f9, 184(%r15) # 8-byte Reload +; LINUX-NEXT: ld %f10, 176(%r15) # 8-byte Reload +; LINUX-NEXT: lmg %r14, %r15, 312(%r15) +; LINUX-NEXT: br %r14 + %result = call { <2 x half>, <2 x half> } @llvm.sincos.v2f16(<2 x half> %a) + ret { <2 x half>, <2 x half> } %result +} diff --git a/llvm/test/CodeGen/SystemZ/llvm.sincos.ll b/llvm/test/CodeGen/SystemZ/llvm.sincos.ll new file mode 100644 index 0000000..9798077 --- /dev/null +++ b/llvm/test/CodeGen/SystemZ/llvm.sincos.ll @@ -0,0 +1,201 @@ +; RUN: llc -mtriple=s390x-linux-gnu < %s | FileCheck -check-prefix=LINUX %s +; RUN: llc -mtriple=s390x-ibm-zos < %s | FileCheck -check-prefix=ZOS %s + +; FIXME: half cases split out since they are broken on zos + +; FIXME: Check ZOS function content + +define { float, float } @test_sincos_f32(float %a) #0 { +; LINUX-LABEL: test_sincos_f32: +; LINUX: # %bb.0: +; LINUX-NEXT: stmg %r14, %r15, 112(%r15) +; LINUX-NEXT: aghi %r15, -168 +; LINUX-NEXT: la %r2, 164(%r15) +; LINUX-NEXT: la %r3, 160(%r15) +; LINUX-NEXT: brasl %r14, sincosf@PLT +; LINUX-NEXT: le %f0, 164(%r15) +; LINUX-NEXT: le %f2, 160(%r15) +; LINUX-NEXT: lmg %r14, %r15, 280(%r15) +; LINUX-NEXT: br %r14 + %result = call { float, float } @llvm.sincos.f32(float %a) + ret { float, float } %result +} + +define { <2 x float>, <2 x float> } @test_sincos_v2f32(<2 x float> %a) #0 { +; LINUX-LABEL: test_sincos_v2f32: +; LINUX: # %bb.0: +; LINUX-NEXT: stmg %r14, %r15, 112(%r15) +; LINUX-NEXT: aghi %r15, -184 +; LINUX-NEXT: std %f8, 176(%r15) # 8-byte Spill +; LINUX-NEXT: la %r2, 164(%r15) +; LINUX-NEXT: la %r3, 160(%r15) +; LINUX-NEXT: ler %f8, %f2 +; LINUX-NEXT: brasl %r14, sincosf@PLT +; LINUX-NEXT: la %r2, 172(%r15) +; LINUX-NEXT: la %r3, 168(%r15) +; LINUX-NEXT: ler %f0, %f8 +; LINUX-NEXT: brasl %r14, sincosf@PLT +; LINUX-NEXT: le %f0, 164(%r15) +; LINUX-NEXT: le %f2, 172(%r15) +; LINUX-NEXT: le %f4, 160(%r15) +; LINUX-NEXT: le %f6, 168(%r15) +; LINUX-NEXT: ld %f8, 176(%r15) # 8-byte Reload +; LINUX-NEXT: lmg %r14, %r15, 296(%r15) +; LINUX-NEXT: br %r14 + %result = call { <2 x float>, <2 x float> } @llvm.sincos.v2f32(<2 x float> %a) + ret { <2 x float>, <2 x float> } %result +} + +define { <3 x float>, <3 x float> } @test_sincos_v3f32(<3 x float> %a) #0 { +; LINUX-LABEL: test_sincos_v3f32: +; LINUX: # %bb.0: +; LINUX-NEXT: stmg %r13, %r15, 104(%r15) +; LINUX-NEXT: aghi %r15, -192 +; LINUX-NEXT: std %f8, 184(%r15) # 8-byte Spill +; LINUX-NEXT: std %f9, 176(%r15) # 8-byte Spill +; LINUX-NEXT: lgr %r13, %r2 +; LINUX-NEXT: la %r2, 164(%r15) +; LINUX-NEXT: la %r3, 160(%r15) +; LINUX-NEXT: ler %f8, %f4 +; LINUX-NEXT: ler %f9, %f2 +; LINUX-NEXT: brasl %r14, sincosf@PLT +; LINUX-NEXT: la %r2, 172(%r15) +; LINUX-NEXT: la %r3, 168(%r15) +; LINUX-NEXT: ler %f0, %f9 +; LINUX-NEXT: brasl %r14, sincosf@PLT +; LINUX-NEXT: la %r2, 8(%r13) +; LINUX-NEXT: la %r3, 24(%r13) +; LINUX-NEXT: ler %f0, %f8 +; LINUX-NEXT: brasl %r14, sincosf@PLT +; LINUX-NEXT: le %f0, 164(%r15) +; LINUX-NEXT: le %f1, 172(%r15) +; LINUX-NEXT: le %f2, 160(%r15) +; LINUX-NEXT: lgdr %r0, %f0 +; LINUX-NEXT: lgdr %r1, %f1 +; LINUX-NEXT: lgdr %r2, %f2 +; LINUX-NEXT: le %f0, 168(%r15) +; LINUX-NEXT: nilf %r0, 0 +; LINUX-NEXT: srlg %r1, %r1, 32 +; LINUX-NEXT: nilf %r2, 0 +; LINUX-NEXT: lgdr %r3, %f0 +; LINUX-NEXT: srlg %r3, %r3, 32 +; LINUX-NEXT: lr %r0, %r1 +; LINUX-NEXT: lr %r2, %r3 +; LINUX-NEXT: stg %r2, 16(%r13) +; LINUX-NEXT: stg %r0, 0(%r13) +; LINUX-NEXT: ld %f8, 184(%r15) # 8-byte Reload +; LINUX-NEXT: ld %f9, 176(%r15) # 8-byte Reload +; LINUX-NEXT: lmg %r13, %r15, 296(%r15) +; LINUX-NEXT: br %r14 + %result = call { <3 x float>, <3 x float> } @llvm.sincos.v3f32(<3 x float> %a) + ret { <3 x float>, <3 x float> } %result +} + +define { double, double } @test_sincos_f64(double %a) #0 { +; LINUX-LABEL: test_sincos_f64: +; LINUX: # %bb.0: +; LINUX-NEXT: stmg %r14, %r15, 112(%r15) +; LINUX-NEXT: aghi %r15, -176 +; LINUX-NEXT: la %r2, 168(%r15) +; LINUX-NEXT: la %r3, 160(%r15) +; LINUX-NEXT: brasl %r14, sincos@PLT +; LINUX-NEXT: ld %f0, 168(%r15) +; LINUX-NEXT: ld %f2, 160(%r15) +; LINUX-NEXT: lmg %r14, %r15, 288(%r15) +; LINUX-NEXT: br %r14 + %result = call { double, double } @llvm.sincos.f64(double %a) + ret { double, double } %result +} + +define { <2 x double>, <2 x double> } @test_sincos_v2f64(<2 x double> %a) #0 { +; LINUX-LABEL: test_sincos_v2f64: +; LINUX: # %bb.0: +; LINUX-NEXT: stmg %r14, %r15, 112(%r15) +; LINUX-NEXT: aghi %r15, -200 +; LINUX-NEXT: std %f8, 192(%r15) # 8-byte Spill +; LINUX-NEXT: la %r2, 168(%r15) +; LINUX-NEXT: la %r3, 160(%r15) +; LINUX-NEXT: ldr %f8, %f2 +; LINUX-NEXT: brasl %r14, sincos@PLT +; LINUX-NEXT: la %r2, 184(%r15) +; LINUX-NEXT: la %r3, 176(%r15) +; LINUX-NEXT: ldr %f0, %f8 +; LINUX-NEXT: brasl %r14, sincos@PLT +; LINUX-NEXT: ld %f0, 168(%r15) +; LINUX-NEXT: ld %f2, 184(%r15) +; LINUX-NEXT: ld %f4, 160(%r15) +; LINUX-NEXT: ld %f6, 176(%r15) +; LINUX-NEXT: ld %f8, 192(%r15) # 8-byte Reload +; LINUX-NEXT: lmg %r14, %r15, 312(%r15) +; LINUX-NEXT: br %r14 + %result = call { <2 x double>, <2 x double> } @llvm.sincos.v2f64(<2 x double> %a) + ret { <2 x double>, <2 x double> } %result +} + +define { fp128, fp128 } @test_sincos_f128(fp128 %a) #0 { +; LINUX-LABEL: test_sincos_f128: +; LINUX: # %bb.0: +; LINUX-NEXT: stmg %r14, %r15, 112(%r15) +; LINUX-NEXT: aghi %r15, -176 +; LINUX-NEXT: ld %f0, 0(%r3) +; LINUX-NEXT: ld %f2, 8(%r3) +; LINUX-NEXT: lgr %r3, %r2 +; LINUX-NEXT: la %r4, 16(%r2) +; LINUX-NEXT: la %r2, 160(%r15) +; LINUX-NEXT: std %f0, 160(%r15) +; LINUX-NEXT: std %f2, 168(%r15) +; LINUX-NEXT: brasl %r14, sincosl@PLT +; LINUX-NEXT: lmg %r14, %r15, 288(%r15) +; LINUX-NEXT: br %r14 + %result = call { fp128, fp128 } @llvm.sincos.f128(fp128 %a) + ret { fp128, fp128 } %result +} + +define { <2 x fp128>, <2 x fp128> } @test_sincos_v2f128(<2 x fp128> %a) #0 { +; LINUX-LABEL: test_sincos_v2f128: +; LINUX: # %bb.0: +; LINUX-NEXT: stmg %r13, %r15, 104(%r15) +; LINUX-NEXT: aghi %r15, -208 +; LINUX-NEXT: std %f8, 200(%r15) # 8-byte Spill +; LINUX-NEXT: std %f10, 192(%r15) # 8-byte Spill +; LINUX-NEXT: lgr %r13, %r2 +; LINUX-NEXT: ld %f8, 0(%r3) +; LINUX-NEXT: ld %f10, 8(%r3) +; LINUX-NEXT: ld %f0, 16(%r3) +; LINUX-NEXT: ld %f2, 24(%r3) +; LINUX-NEXT: la %r3, 16(%r2) +; LINUX-NEXT: la %r4, 48(%r2) +; LINUX-NEXT: la %r2, 176(%r15) +; LINUX-NEXT: std %f0, 176(%r15) +; LINUX-NEXT: std %f2, 184(%r15) +; LINUX-NEXT: brasl %r14, sincosl@PLT +; LINUX-NEXT: la %r4, 32(%r13) +; LINUX-NEXT: la %r2, 160(%r15) +; LINUX-NEXT: std %f8, 160(%r15) +; LINUX-NEXT: std %f10, 168(%r15) +; LINUX-NEXT: lgr %r3, %r13 +; LINUX-NEXT: brasl %r14, sincosl@PLT +; LINUX-NEXT: ld %f8, 200(%r15) # 8-byte Reload +; LINUX-NEXT: ld %f10, 192(%r15) # 8-byte Reload +; LINUX-NEXT: lmg %r13, %r15, 312(%r15) +; LINUX-NEXT: br %r14 + %result = call { <2 x fp128>, <2 x fp128> } @llvm.sincos.v2f128(<2 x fp128> %a) + ret { <2 x fp128>, <2 x fp128> } %result +} + + +; ZOS: .quad R(@@FSIN@B) * Offset 0 function descriptor of @@FSIN@B +; ZOS: .quad V(@@FSIN@B) +; ZOS: .quad R(@@FCOS@B) * Offset 16 function descriptor of @@FCOS@B +; ZOS: .quad V(@@FCOS@B) +; ZOS: .quad R(@@SSIN@B) * Offset 32 function descriptor of @@SSIN@B +; ZOS: .quad V(@@SSIN@B) +; ZOS: .quad R(@@SCOS@B) * Offset 48 function descriptor of @@SCOS@B +; ZOS: .quad V(@@SCOS@B) +; ZOS: .quad R(@@LSIN@B) * Offset 64 function descriptor of @@LSIN@B +; ZOS: .quad V(@@LSIN@B) +; ZOS: .quad R(@@LCOS@B) * Offset 80 function descriptor of @@LCOS@B +; ZOS: .quad V(@@LCOS@B) + + +attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/SystemZ/regcoal-subranges-update-remat.mir b/llvm/test/CodeGen/SystemZ/regcoal-subranges-update-remat.mir index e3207df..d9fe810 100644 --- a/llvm/test/CodeGen/SystemZ/regcoal-subranges-update-remat.mir +++ b/llvm/test/CodeGen/SystemZ/regcoal-subranges-update-remat.mir @@ -43,3 +43,28 @@ body: | %3:gr32bit = COPY killed %1 Return implicit %3 ... + +--- +name: test_dead_at_remat_later_defined +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: test_dead_at_remat_later_defined + ; CHECK: undef [[LHI:%[0-9]+]].subreg_l32:gr128bit = LHI 0 + ; CHECK-NEXT: [[LHI:%[0-9]+]].subreg_l64:gr128bit = LGHI 2 + ; CHECK-NEXT: [[LHI1:%[0-9]+]]:gr32bit = LHI 1 + ; CHECK-NEXT: [[LHI:%[0-9]+]].subreg_lh32:gr128bit = COPY [[LHI1]] + ; CHECK-NEXT: [[LGHI:%[0-9]+]]:gr64bit = LGHI 2 + ; CHECK-NEXT: [[LHI:%[0-9]+]].subreg_h32:gr128bit = COPY [[LGHI]].subreg_l32 + ; CHECK-NEXT: $r0q = COPY [[LHI]] + ; CHECK-NEXT: $r4d = COPY [[LGHI]].subreg_h32 + %0:gr64bit = LGHI 2 + %1:gr32bit = LHI 0 + %2:gr32bit = LHI 1 + undef %3.subreg_ll32:gr128bit = COPY %0.subreg_l32 + %3.subreg_lh32:gr128bit = COPY %2 + %3.subreg_l32:gr128bit = COPY %1 + %3.subreg_h32:gr128bit = COPY %0.subreg_l32 + $r0q = COPY %3 + $r4d = COPY %0.subreg_h32 +... diff --git a/llvm/test/CodeGen/VE/Scalar/min.ll b/llvm/test/CodeGen/VE/Scalar/min.ll index 69d5ce4..e8f4939 100644 --- a/llvm/test/CodeGen/VE/Scalar/min.ll +++ b/llvm/test/CodeGen/VE/Scalar/min.ll @@ -278,18 +278,18 @@ define i32 @min2u32(i32, i32) { define zeroext i1 @mini1(i1 zeroext, i1 zeroext) { ; CHECK-LABEL: mini1: ; CHECK: # %bb.0: -; CHECK-NEXT: and %s0, %s0, (32)0 -; CHECK-NEXT: and %s2, %s1, %s0 -; CHECK-NEXT: cmov.w.ne %s2, %s1, %s0 -; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1 +; CHECK-NEXT: and %s2, 1, %s0 +; CHECK-NEXT: and %s0, %s1, %s0 +; CHECK-NEXT: cmov.w.ne %s0, %s1, %s2 +; CHECK-NEXT: adds.w.zx %s0, %s0, (0)1 ; CHECK-NEXT: b.l.t (, %s10) ; ; OPT-LABEL: mini1: ; OPT: # %bb.0: -; OPT-NEXT: and %s0, %s0, (32)0 -; OPT-NEXT: and %s2, %s1, %s0 -; OPT-NEXT: cmov.w.ne %s2, %s1, %s0 -; OPT-NEXT: adds.w.zx %s0, %s2, (0)1 +; OPT-NEXT: and %s2, 1, %s0 +; OPT-NEXT: and %s0, %s1, %s0 +; OPT-NEXT: cmov.w.ne %s0, %s1, %s2 +; OPT-NEXT: adds.w.zx %s0, %s0, (0)1 ; OPT-NEXT: b.l.t (, %s10) %3 = xor i1 %0, true %4 = and i1 %3, %1 diff --git a/llvm/test/CodeGen/X86/avg-mask.ll b/llvm/test/CodeGen/X86/avg-mask.ll index b148cd3..e886639 100644 --- a/llvm/test/CodeGen/X86/avg-mask.ll +++ b/llvm/test/CodeGen/X86/avg-mask.ll @@ -177,11 +177,11 @@ define <64 x i8> @avg_v64i8_maskz(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwin ; AVX512F-NEXT: shrq $32, %rdi ; AVX512F-NEXT: shrq $48, %rax ; AVX512F-NEXT: shrl $16, %ecx -; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 -; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3 -; AVX512F-NEXT: vpavgb %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpavgb %ymm1, %ymm0, %ymm2 +; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm1 +; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0 ; AVX512F-NEXT: vpavgb %ymm1, %ymm0, %ymm0 -; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0 ; AVX512F-NEXT: kmovw %ecx, %k2 ; AVX512F-NEXT: kmovw %eax, %k3 ; AVX512F-NEXT: kmovw %edi, %k4 @@ -364,11 +364,11 @@ define <32 x i16> @avg_v32i16_maskz(<32 x i16> %a, <32 x i16> %b, i32 %mask) nou ; AVX512F: # %bb.0: ; AVX512F-NEXT: kmovw %edi, %k1 ; AVX512F-NEXT: shrl $16, %edi -; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 -; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3 -; AVX512F-NEXT: vpavgw %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpavgw %ymm1, %ymm0, %ymm2 +; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm1 +; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0 ; AVX512F-NEXT: vpavgw %ymm1, %ymm0, %ymm0 -; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0 ; AVX512F-NEXT: kmovw %edi, %k2 ; AVX512F-NEXT: vpternlogd {{.*#+}} zmm1 {%k1} {z} = -1 ; AVX512F-NEXT: vpmovdw %zmm1, %ymm1 diff --git a/llvm/test/CodeGen/X86/avx512-broadcast-unfold.ll b/llvm/test/CodeGen/X86/avx512-broadcast-unfold.ll index ba2cacc..2f86499 100644 --- a/llvm/test/CodeGen/X86/avx512-broadcast-unfold.ll +++ b/llvm/test/CodeGen/X86/avx512-broadcast-unfold.ll @@ -1974,9 +1974,8 @@ define void @bcast_unfold_fmax_v4f32(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB60_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovups 4096(%rdi,%rax), %xmm1 -; CHECK-NEXT: vmaxps %xmm0, %xmm1, %xmm1 -; CHECK-NEXT: vmovups %xmm1, 4096(%rdi,%rax) +; CHECK-NEXT: vcmpnltps 4096(%rdi,%rax), %xmm0, %k1 +; CHECK-NEXT: vmovups %xmm0, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $16, %rax ; CHECK-NEXT: jne .LBB60_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -2007,9 +2006,8 @@ define void @bcast_unfold_fmax_v8f32(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB61_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovups 4096(%rdi,%rax), %ymm1 -; CHECK-NEXT: vmaxps %ymm0, %ymm1, %ymm1 -; CHECK-NEXT: vmovups %ymm1, 4096(%rdi,%rax) +; CHECK-NEXT: vcmpnltps 4096(%rdi,%rax), %ymm0, %k1 +; CHECK-NEXT: vmovups %ymm0, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $32, %rax ; CHECK-NEXT: jne .LBB61_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -2041,9 +2039,8 @@ define void @bcast_unfold_fmax_v16f32(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB62_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovups 4096(%rdi,%rax), %zmm1 -; CHECK-NEXT: vmaxps %zmm0, %zmm1, %zmm1 -; CHECK-NEXT: vmovups %zmm1, 4096(%rdi,%rax) +; CHECK-NEXT: vcmpnltps 4096(%rdi,%rax), %zmm0, %k1 +; CHECK-NEXT: vmovups %zmm0, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $64, %rax ; CHECK-NEXT: jne .LBB62_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -2076,9 +2073,8 @@ define void @bcast_unfold_fmax_v2f64(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB63_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovupd 8192(%rdi,%rax), %xmm1 -; CHECK-NEXT: vmaxpd %xmm0, %xmm1, %xmm1 -; CHECK-NEXT: vmovupd %xmm1, 8192(%rdi,%rax) +; CHECK-NEXT: vcmpnltpd 8192(%rdi,%rax), %xmm0, %k1 +; CHECK-NEXT: vmovupd %xmm0, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $16, %rax ; CHECK-NEXT: jne .LBB63_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -2109,9 +2105,8 @@ define void @bcast_unfold_fmax_v4f64(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB64_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovupd 8192(%rdi,%rax), %ymm1 -; CHECK-NEXT: vmaxpd %ymm0, %ymm1, %ymm1 -; CHECK-NEXT: vmovupd %ymm1, 8192(%rdi,%rax) +; CHECK-NEXT: vcmpnltpd 8192(%rdi,%rax), %ymm0, %k1 +; CHECK-NEXT: vmovupd %ymm0, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $32, %rax ; CHECK-NEXT: jne .LBB64_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -2143,9 +2138,8 @@ define void @bcast_unfold_fmax_v8f64(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB65_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovupd 8192(%rdi,%rax), %zmm1 -; CHECK-NEXT: vmaxpd %zmm0, %zmm1, %zmm1 -; CHECK-NEXT: vmovupd %zmm1, 8192(%rdi,%rax) +; CHECK-NEXT: vcmpnltpd 8192(%rdi,%rax), %zmm0, %k1 +; CHECK-NEXT: vmovupd %zmm0, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $64, %rax ; CHECK-NEXT: jne .LBB65_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -2177,9 +2171,8 @@ define void @bcast_unfold_fmin_v4f32(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB66_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovups 4096(%rdi,%rax), %xmm1 -; CHECK-NEXT: vminps %xmm0, %xmm1, %xmm1 -; CHECK-NEXT: vmovups %xmm1, 4096(%rdi,%rax) +; CHECK-NEXT: vcmpngtps 4096(%rdi,%rax), %xmm0, %k1 +; CHECK-NEXT: vmovups %xmm0, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $16, %rax ; CHECK-NEXT: jne .LBB66_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -2210,9 +2203,8 @@ define void @bcast_unfold_fmin_v8f32(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB67_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovups 4096(%rdi,%rax), %ymm1 -; CHECK-NEXT: vminps %ymm0, %ymm1, %ymm1 -; CHECK-NEXT: vmovups %ymm1, 4096(%rdi,%rax) +; CHECK-NEXT: vcmpngtps 4096(%rdi,%rax), %ymm0, %k1 +; CHECK-NEXT: vmovups %ymm0, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $32, %rax ; CHECK-NEXT: jne .LBB67_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -2244,9 +2236,8 @@ define void @bcast_unfold_fmin_v16f32(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB68_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovups 4096(%rdi,%rax), %zmm1 -; CHECK-NEXT: vminps %zmm0, %zmm1, %zmm1 -; CHECK-NEXT: vmovups %zmm1, 4096(%rdi,%rax) +; CHECK-NEXT: vcmpngtps 4096(%rdi,%rax), %zmm0, %k1 +; CHECK-NEXT: vmovups %zmm0, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $64, %rax ; CHECK-NEXT: jne .LBB68_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -2279,9 +2270,8 @@ define void @bcast_unfold_fmin_v2f64(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB69_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovupd 8192(%rdi,%rax), %xmm1 -; CHECK-NEXT: vminpd %xmm0, %xmm1, %xmm1 -; CHECK-NEXT: vmovupd %xmm1, 8192(%rdi,%rax) +; CHECK-NEXT: vcmpngtpd 8192(%rdi,%rax), %xmm0, %k1 +; CHECK-NEXT: vmovupd %xmm0, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $16, %rax ; CHECK-NEXT: jne .LBB69_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -2312,9 +2302,8 @@ define void @bcast_unfold_fmin_v4f64(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB70_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovupd 8192(%rdi,%rax), %ymm1 -; CHECK-NEXT: vminpd %ymm0, %ymm1, %ymm1 -; CHECK-NEXT: vmovupd %ymm1, 8192(%rdi,%rax) +; CHECK-NEXT: vcmpngtpd 8192(%rdi,%rax), %ymm0, %k1 +; CHECK-NEXT: vmovupd %ymm0, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $32, %rax ; CHECK-NEXT: jne .LBB70_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -2346,9 +2335,8 @@ define void @bcast_unfold_fmin_v8f64(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB71_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovupd 8192(%rdi,%rax), %zmm1 -; CHECK-NEXT: vminpd %zmm0, %zmm1, %zmm1 -; CHECK-NEXT: vmovupd %zmm1, 8192(%rdi,%rax) +; CHECK-NEXT: vcmpngtpd 8192(%rdi,%rax), %zmm0, %k1 +; CHECK-NEXT: vmovupd %zmm0, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $64, %rax ; CHECK-NEXT: jne .LBB71_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -3161,13 +3149,12 @@ define void @bcast_unfold_pcmpgt_v4i32(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-4096, %rax # imm = 0xF000 ; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm0 = [1,1,1,1] +; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB96_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu 4096(%rdi,%rax), %xmm1 -; CHECK-NEXT: vpcmpgtd %xmm0, %xmm1, %k1 -; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 {%k1} = [3,3,3,3] -; CHECK-NEXT: vmovdqu %xmm1, 4096(%rdi,%rax) +; CHECK-NEXT: vpcmpltd 4096(%rdi,%rax), %xmm0, %k1 +; CHECK-NEXT: vmovdqu32 %xmm1, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $16, %rax ; CHECK-NEXT: jne .LBB96_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -3195,13 +3182,12 @@ define void @bcast_unfold_pcmpgt_v8i32(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-4096, %rax # imm = 0xF000 ; CHECK-NEXT: vpbroadcastd {{.*#+}} ymm0 = [1,1,1,1,1,1,1,1] +; CHECK-NEXT: vpbroadcastd {{.*#+}} ymm1 = [3,3,3,3,3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB97_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu 4096(%rdi,%rax), %ymm1 -; CHECK-NEXT: vpcmpgtd %ymm0, %ymm1, %k1 -; CHECK-NEXT: vpbroadcastd {{.*#+}} ymm1 {%k1} = [3,3,3,3,3,3,3,3] -; CHECK-NEXT: vmovdqu %ymm1, 4096(%rdi,%rax) +; CHECK-NEXT: vpcmpltd 4096(%rdi,%rax), %ymm0, %k1 +; CHECK-NEXT: vmovdqu32 %ymm1, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $32, %rax ; CHECK-NEXT: jne .LBB97_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -3230,13 +3216,12 @@ define void @bcast_unfold_pcmpgt_v16i32(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-4096, %rax # imm = 0xF000 ; CHECK-NEXT: vpbroadcastd {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; CHECK-NEXT: vpbroadcastd {{.*#+}} zmm1 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB98_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu64 4096(%rdi,%rax), %zmm1 -; CHECK-NEXT: vpcmpgtd %zmm0, %zmm1, %k1 -; CHECK-NEXT: vpbroadcastd {{.*#+}} zmm1 {%k1} = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] -; CHECK-NEXT: vmovdqu64 %zmm1, 4096(%rdi,%rax) +; CHECK-NEXT: vpcmpltd 4096(%rdi,%rax), %zmm0, %k1 +; CHECK-NEXT: vmovdqu32 %zmm1, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $64, %rax ; CHECK-NEXT: jne .LBB98_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -3265,13 +3250,12 @@ define void @bcast_unfold_pcmpgt_v2i64(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-8192, %rax # imm = 0xE000 ; CHECK-NEXT: vpbroadcastq {{.*#+}} xmm0 = [1,1] +; CHECK-NEXT: vpbroadcastq {{.*#+}} xmm1 = [3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB99_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu 8192(%rdi,%rax), %xmm1 -; CHECK-NEXT: vpcmpgtq %xmm0, %xmm1, %k1 -; CHECK-NEXT: vpbroadcastq {{.*#+}} xmm1 {%k1} = [3,3] -; CHECK-NEXT: vmovdqu %xmm1, 8192(%rdi,%rax) +; CHECK-NEXT: vpcmpltq 8192(%rdi,%rax), %xmm0, %k1 +; CHECK-NEXT: vmovdqu64 %xmm1, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $16, %rax ; CHECK-NEXT: jne .LBB99_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -3299,13 +3283,12 @@ define void @bcast_unfold_pcmpgt_v4i64(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-8192, %rax # imm = 0xE000 ; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm0 = [1,1,1,1] +; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 = [3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB100_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu 8192(%rdi,%rax), %ymm1 -; CHECK-NEXT: vpcmpgtq %ymm0, %ymm1, %k1 -; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 {%k1} = [3,3,3,3] -; CHECK-NEXT: vmovdqu %ymm1, 8192(%rdi,%rax) +; CHECK-NEXT: vpcmpltq 8192(%rdi,%rax), %ymm0, %k1 +; CHECK-NEXT: vmovdqu64 %ymm1, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $32, %rax ; CHECK-NEXT: jne .LBB100_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -3334,13 +3317,12 @@ define void @bcast_unfold_pcmpgt_v8i64(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-8192, %rax # imm = 0xE000 ; CHECK-NEXT: vpbroadcastq {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1] +; CHECK-NEXT: vpbroadcastq {{.*#+}} zmm1 = [3,3,3,3,3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB101_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu64 8192(%rdi,%rax), %zmm1 -; CHECK-NEXT: vpcmpgtq %zmm0, %zmm1, %k1 -; CHECK-NEXT: vpbroadcastq {{.*#+}} zmm1 {%k1} = [3,3,3,3,3,3,3,3] -; CHECK-NEXT: vmovdqu64 %zmm1, 8192(%rdi,%rax) +; CHECK-NEXT: vpcmpltq 8192(%rdi,%rax), %zmm0, %k1 +; CHECK-NEXT: vmovdqu64 %zmm1, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $64, %rax ; CHECK-NEXT: jne .LBB101_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -3369,13 +3351,12 @@ define void @bcast_unfold_pcmpeq_v4i32(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-4096, %rax # imm = 0xF000 ; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm0 = [1,1,1,1] +; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB102_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu 4096(%rdi,%rax), %xmm1 -; CHECK-NEXT: vpcmpeqd %xmm0, %xmm1, %k1 -; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 {%k1} = [3,3,3,3] -; CHECK-NEXT: vmovdqu %xmm1, 4096(%rdi,%rax) +; CHECK-NEXT: vpcmpeqd 4096(%rdi,%rax), %xmm0, %k1 +; CHECK-NEXT: vmovdqu32 %xmm1, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $16, %rax ; CHECK-NEXT: jne .LBB102_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -3403,13 +3384,12 @@ define void @bcast_unfold_pcmpeq_v8i32(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-4096, %rax # imm = 0xF000 ; CHECK-NEXT: vpbroadcastd {{.*#+}} ymm0 = [1,1,1,1,1,1,1,1] +; CHECK-NEXT: vpbroadcastd {{.*#+}} ymm1 = [3,3,3,3,3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB103_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu 4096(%rdi,%rax), %ymm1 -; CHECK-NEXT: vpcmpeqd %ymm0, %ymm1, %k1 -; CHECK-NEXT: vpbroadcastd {{.*#+}} ymm1 {%k1} = [3,3,3,3,3,3,3,3] -; CHECK-NEXT: vmovdqu %ymm1, 4096(%rdi,%rax) +; CHECK-NEXT: vpcmpeqd 4096(%rdi,%rax), %ymm0, %k1 +; CHECK-NEXT: vmovdqu32 %ymm1, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $32, %rax ; CHECK-NEXT: jne .LBB103_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -3438,13 +3418,12 @@ define void @bcast_unfold_pcmpeq_v16i32(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-4096, %rax # imm = 0xF000 ; CHECK-NEXT: vpbroadcastd {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; CHECK-NEXT: vpbroadcastd {{.*#+}} zmm1 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB104_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu64 4096(%rdi,%rax), %zmm1 -; CHECK-NEXT: vpcmpeqd %zmm0, %zmm1, %k1 -; CHECK-NEXT: vpbroadcastd {{.*#+}} zmm1 {%k1} = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] -; CHECK-NEXT: vmovdqu64 %zmm1, 4096(%rdi,%rax) +; CHECK-NEXT: vpcmpeqd 4096(%rdi,%rax), %zmm0, %k1 +; CHECK-NEXT: vmovdqu32 %zmm1, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $64, %rax ; CHECK-NEXT: jne .LBB104_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -3473,13 +3452,12 @@ define void @bcast_unfold_pcmpeq_v2i64(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-8192, %rax # imm = 0xE000 ; CHECK-NEXT: vpbroadcastq {{.*#+}} xmm0 = [1,1] +; CHECK-NEXT: vpbroadcastq {{.*#+}} xmm1 = [3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB105_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu 8192(%rdi,%rax), %xmm1 -; CHECK-NEXT: vpcmpeqq %xmm0, %xmm1, %k1 -; CHECK-NEXT: vpbroadcastq {{.*#+}} xmm1 {%k1} = [3,3] -; CHECK-NEXT: vmovdqu %xmm1, 8192(%rdi,%rax) +; CHECK-NEXT: vpcmpeqq 8192(%rdi,%rax), %xmm0, %k1 +; CHECK-NEXT: vmovdqu64 %xmm1, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $16, %rax ; CHECK-NEXT: jne .LBB105_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -3507,13 +3485,12 @@ define void @bcast_unfold_pcmpeq_v4i64(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-8192, %rax # imm = 0xE000 ; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm0 = [1,1,1,1] +; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 = [3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB106_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu 8192(%rdi,%rax), %ymm1 -; CHECK-NEXT: vpcmpeqq %ymm0, %ymm1, %k1 -; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 {%k1} = [3,3,3,3] -; CHECK-NEXT: vmovdqu %ymm1, 8192(%rdi,%rax) +; CHECK-NEXT: vpcmpeqq 8192(%rdi,%rax), %ymm0, %k1 +; CHECK-NEXT: vmovdqu64 %ymm1, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $32, %rax ; CHECK-NEXT: jne .LBB106_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -3542,13 +3519,12 @@ define void @bcast_unfold_pcmpeq_v8i64(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-8192, %rax # imm = 0xE000 ; CHECK-NEXT: vpbroadcastq {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1] +; CHECK-NEXT: vpbroadcastq {{.*#+}} zmm1 = [3,3,3,3,3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB107_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu64 8192(%rdi,%rax), %zmm1 -; CHECK-NEXT: vpcmpeqq %zmm0, %zmm1, %k1 -; CHECK-NEXT: vpbroadcastq {{.*#+}} zmm1 {%k1} = [3,3,3,3,3,3,3,3] -; CHECK-NEXT: vmovdqu64 %zmm1, 8192(%rdi,%rax) +; CHECK-NEXT: vpcmpeqq 8192(%rdi,%rax), %zmm0, %k1 +; CHECK-NEXT: vmovdqu64 %zmm1, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $64, %rax ; CHECK-NEXT: jne .LBB107_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -3577,13 +3553,12 @@ define void @bcast_unfold_pcmp_v4i32(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm0 = [1,1,1,1] +; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB108_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu (%rdi,%rax,4), %xmm1 -; CHECK-NEXT: vpcmpltd %xmm0, %xmm1, %k1 -; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 {%k1} = [3,3,3,3] -; CHECK-NEXT: vmovdqu %xmm1, (%rdi,%rax,4) +; CHECK-NEXT: vpcmpgtd (%rdi,%rax,4), %xmm0, %k1 +; CHECK-NEXT: vmovdqu32 %xmm1, (%rdi,%rax,4) {%k1} ; CHECK-NEXT: addq $4, %rax ; CHECK-NEXT: cmpq $1023, %rax # imm = 0x3FF ; CHECK-NEXT: jg .LBB108_1 @@ -3612,13 +3587,12 @@ define void @bcast_unfold_pcmp_v8i32(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: vpbroadcastd {{.*#+}} ymm0 = [1,1,1,1,1,1,1,1] +; CHECK-NEXT: vpbroadcastd {{.*#+}} ymm1 = [3,3,3,3,3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB109_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu (%rdi,%rax,4), %ymm1 -; CHECK-NEXT: vpcmpltd %ymm0, %ymm1, %k1 -; CHECK-NEXT: vpbroadcastd {{.*#+}} ymm1 {%k1} = [3,3,3,3,3,3,3,3] -; CHECK-NEXT: vmovdqu %ymm1, (%rdi,%rax,4) +; CHECK-NEXT: vpcmpgtd (%rdi,%rax,4), %ymm0, %k1 +; CHECK-NEXT: vmovdqu32 %ymm1, (%rdi,%rax,4) {%k1} ; CHECK-NEXT: addq $8, %rax ; CHECK-NEXT: cmpq $1023, %rax # imm = 0x3FF ; CHECK-NEXT: jg .LBB109_1 @@ -3648,13 +3622,12 @@ define void @bcast_unfold_pcmp_v16i32(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: vpbroadcastd {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; CHECK-NEXT: vpbroadcastd {{.*#+}} zmm1 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB110_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu64 (%rdi,%rax,4), %zmm1 -; CHECK-NEXT: vpcmpltd %zmm0, %zmm1, %k1 -; CHECK-NEXT: vpbroadcastd {{.*#+}} zmm1 {%k1} = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] -; CHECK-NEXT: vmovdqu64 %zmm1, (%rdi,%rax,4) +; CHECK-NEXT: vpcmpgtd (%rdi,%rax,4), %zmm0, %k1 +; CHECK-NEXT: vmovdqu32 %zmm1, (%rdi,%rax,4) {%k1} ; CHECK-NEXT: addq $16, %rax ; CHECK-NEXT: cmpq $1023, %rax # imm = 0x3FF ; CHECK-NEXT: jg .LBB110_1 @@ -3684,13 +3657,12 @@ define void @bcast_unfold_pcmp_v2i64(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: vpbroadcastq {{.*#+}} xmm0 = [1,1] +; CHECK-NEXT: vpbroadcastq {{.*#+}} xmm1 = [3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB111_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu (%rdi,%rax,8), %xmm1 -; CHECK-NEXT: vpcmpltq %xmm0, %xmm1, %k1 -; CHECK-NEXT: vpbroadcastq {{.*#+}} xmm1 {%k1} = [3,3] -; CHECK-NEXT: vmovdqu %xmm1, (%rdi,%rax,8) +; CHECK-NEXT: vpcmpgtq (%rdi,%rax,8), %xmm0, %k1 +; CHECK-NEXT: vmovdqu64 %xmm1, (%rdi,%rax,8) {%k1} ; CHECK-NEXT: addq $2, %rax ; CHECK-NEXT: cmpq $1023, %rax # imm = 0x3FF ; CHECK-NEXT: jg .LBB111_1 @@ -3719,13 +3691,12 @@ define void @bcast_unfold_pcmp_v4i64(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm0 = [1,1,1,1] +; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 = [3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB112_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu (%rdi,%rax,8), %ymm1 -; CHECK-NEXT: vpcmpltq %ymm0, %ymm1, %k1 -; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 {%k1} = [3,3,3,3] -; CHECK-NEXT: vmovdqu %ymm1, (%rdi,%rax,8) +; CHECK-NEXT: vpcmpgtq (%rdi,%rax,8), %ymm0, %k1 +; CHECK-NEXT: vmovdqu64 %ymm1, (%rdi,%rax,8) {%k1} ; CHECK-NEXT: addq $4, %rax ; CHECK-NEXT: cmpq $1023, %rax # imm = 0x3FF ; CHECK-NEXT: jg .LBB112_1 @@ -3755,13 +3726,12 @@ define void @bcast_unfold_pcmp_v8i64(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: vpbroadcastq {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1] +; CHECK-NEXT: vpbroadcastq {{.*#+}} zmm1 = [3,3,3,3,3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB113_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu64 (%rdi,%rax,8), %zmm1 -; CHECK-NEXT: vpcmpltq %zmm0, %zmm1, %k1 -; CHECK-NEXT: vpbroadcastq {{.*#+}} zmm1 {%k1} = [3,3,3,3,3,3,3,3] -; CHECK-NEXT: vmovdqu64 %zmm1, (%rdi,%rax,8) +; CHECK-NEXT: vpcmpgtq (%rdi,%rax,8), %zmm0, %k1 +; CHECK-NEXT: vmovdqu64 %zmm1, (%rdi,%rax,8) {%k1} ; CHECK-NEXT: addq $8, %rax ; CHECK-NEXT: cmpq $1023, %rax # imm = 0x3FF ; CHECK-NEXT: jg .LBB113_1 @@ -3791,13 +3761,12 @@ define void @bcast_unfold_pcmpu_v4i32(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm0 = [2,2,2,2] +; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB114_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu (%rdi,%rax,4), %xmm1 -; CHECK-NEXT: vpcmpltud %xmm0, %xmm1, %k1 -; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 {%k1} = [3,3,3,3] -; CHECK-NEXT: vmovdqu %xmm1, (%rdi,%rax,4) +; CHECK-NEXT: vpcmpnleud (%rdi,%rax,4), %xmm0, %k1 +; CHECK-NEXT: vmovdqu32 %xmm1, (%rdi,%rax,4) {%k1} ; CHECK-NEXT: addq $4, %rax ; CHECK-NEXT: cmpq $1023, %rax # imm = 0x3FF ; CHECK-NEXT: ja .LBB114_1 @@ -3826,13 +3795,12 @@ define void @bcast_unfold_pcmpu_v8i32(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: vpbroadcastd {{.*#+}} ymm0 = [2,2,2,2,2,2,2,2] +; CHECK-NEXT: vpbroadcastd {{.*#+}} ymm1 = [3,3,3,3,3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB115_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu (%rdi,%rax,4), %ymm1 -; CHECK-NEXT: vpcmpltud %ymm0, %ymm1, %k1 -; CHECK-NEXT: vpbroadcastd {{.*#+}} ymm1 {%k1} = [3,3,3,3,3,3,3,3] -; CHECK-NEXT: vmovdqu %ymm1, (%rdi,%rax,4) +; CHECK-NEXT: vpcmpnleud (%rdi,%rax,4), %ymm0, %k1 +; CHECK-NEXT: vmovdqu32 %ymm1, (%rdi,%rax,4) {%k1} ; CHECK-NEXT: addq $8, %rax ; CHECK-NEXT: cmpq $1023, %rax # imm = 0x3FF ; CHECK-NEXT: ja .LBB115_1 @@ -3862,13 +3830,12 @@ define void @bcast_unfold_pcmpu_v16i32(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: vpbroadcastd {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2] +; CHECK-NEXT: vpbroadcastd {{.*#+}} zmm1 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB116_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu64 (%rdi,%rax,4), %zmm1 -; CHECK-NEXT: vpcmpltud %zmm0, %zmm1, %k1 -; CHECK-NEXT: vpbroadcastd {{.*#+}} zmm1 {%k1} = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] -; CHECK-NEXT: vmovdqu64 %zmm1, (%rdi,%rax,4) +; CHECK-NEXT: vpcmpnleud (%rdi,%rax,4), %zmm0, %k1 +; CHECK-NEXT: vmovdqu32 %zmm1, (%rdi,%rax,4) {%k1} ; CHECK-NEXT: addq $16, %rax ; CHECK-NEXT: cmpq $1023, %rax # imm = 0x3FF ; CHECK-NEXT: ja .LBB116_1 @@ -3898,13 +3865,12 @@ define void @bcast_unfold_pcmpu_v2i64(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: vpbroadcastq {{.*#+}} xmm0 = [2,2] +; CHECK-NEXT: vpbroadcastq {{.*#+}} xmm1 = [3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB117_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu (%rdi,%rax,8), %xmm1 -; CHECK-NEXT: vpcmpltuq %xmm0, %xmm1, %k1 -; CHECK-NEXT: vpbroadcastq {{.*#+}} xmm1 {%k1} = [3,3] -; CHECK-NEXT: vmovdqu %xmm1, (%rdi,%rax,8) +; CHECK-NEXT: vpcmpnleuq (%rdi,%rax,8), %xmm0, %k1 +; CHECK-NEXT: vmovdqu64 %xmm1, (%rdi,%rax,8) {%k1} ; CHECK-NEXT: addq $2, %rax ; CHECK-NEXT: cmpq $1023, %rax # imm = 0x3FF ; CHECK-NEXT: ja .LBB117_1 @@ -3933,13 +3899,12 @@ define void @bcast_unfold_pcmpu_v4i64(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm0 = [2,2,2,2] +; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 = [3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB118_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu (%rdi,%rax,8), %ymm1 -; CHECK-NEXT: vpcmpltuq %ymm0, %ymm1, %k1 -; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 {%k1} = [3,3,3,3] -; CHECK-NEXT: vmovdqu %ymm1, (%rdi,%rax,8) +; CHECK-NEXT: vpcmpnleuq (%rdi,%rax,8), %ymm0, %k1 +; CHECK-NEXT: vmovdqu64 %ymm1, (%rdi,%rax,8) {%k1} ; CHECK-NEXT: addq $4, %rax ; CHECK-NEXT: cmpq $1023, %rax # imm = 0x3FF ; CHECK-NEXT: ja .LBB118_1 @@ -3969,13 +3934,12 @@ define void @bcast_unfold_pcmpu_v8i64(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: vpbroadcastq {{.*#+}} zmm0 = [2,2,2,2,2,2,2,2] +; CHECK-NEXT: vpbroadcastq {{.*#+}} zmm1 = [3,3,3,3,3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB119_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu64 (%rdi,%rax,8), %zmm1 -; CHECK-NEXT: vpcmpltuq %zmm0, %zmm1, %k1 -; CHECK-NEXT: vpbroadcastq {{.*#+}} zmm1 {%k1} = [3,3,3,3,3,3,3,3] -; CHECK-NEXT: vmovdqu64 %zmm1, (%rdi,%rax,8) +; CHECK-NEXT: vpcmpnleuq (%rdi,%rax,8), %zmm0, %k1 +; CHECK-NEXT: vmovdqu64 %zmm1, (%rdi,%rax,8) {%k1} ; CHECK-NEXT: addq $8, %rax ; CHECK-NEXT: cmpq $1023, %rax # imm = 0x3FF ; CHECK-NEXT: ja .LBB119_1 @@ -4009,10 +3973,8 @@ define void @bcast_unfold_cmp_v4f32(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB120_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovups 4096(%rdi,%rax), %xmm2 -; CHECK-NEXT: vcmpltps %xmm0, %xmm2, %k1 -; CHECK-NEXT: vblendmps %xmm2, %xmm1, %xmm2 {%k1} -; CHECK-NEXT: vmovups %xmm2, 4096(%rdi,%rax) +; CHECK-NEXT: vcmpngtps 4096(%rdi,%rax), %xmm0, %k1 +; CHECK-NEXT: vmovups %xmm1, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $16, %rax ; CHECK-NEXT: jne .LBB120_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -4044,10 +4006,8 @@ define void @bcast_unfold_cmp_v8f32(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB121_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovups 4096(%rdi,%rax), %ymm2 -; CHECK-NEXT: vcmpltps %ymm0, %ymm2, %k1 -; CHECK-NEXT: vblendmps %ymm2, %ymm1, %ymm2 {%k1} -; CHECK-NEXT: vmovups %ymm2, 4096(%rdi,%rax) +; CHECK-NEXT: vcmpngtps 4096(%rdi,%rax), %ymm0, %k1 +; CHECK-NEXT: vmovups %ymm1, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $32, %rax ; CHECK-NEXT: jne .LBB121_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -4080,10 +4040,8 @@ define void @bcast_unfold_cmp_v16f32(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB122_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovups 4096(%rdi,%rax), %zmm2 -; CHECK-NEXT: vcmpltps %zmm0, %zmm2, %k1 -; CHECK-NEXT: vblendmps %zmm2, %zmm1, %zmm2 {%k1} -; CHECK-NEXT: vmovups %zmm2, 4096(%rdi,%rax) +; CHECK-NEXT: vcmpngtps 4096(%rdi,%rax), %zmm0, %k1 +; CHECK-NEXT: vmovups %zmm1, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $64, %rax ; CHECK-NEXT: jne .LBB122_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -4118,10 +4076,8 @@ define void @bcast_unfold_cmp_v2f64(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB123_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovupd 8192(%rdi,%rax), %xmm2 -; CHECK-NEXT: vcmpltpd %xmm0, %xmm2, %k1 -; CHECK-NEXT: vblendmpd %xmm2, %xmm1, %xmm2 {%k1} -; CHECK-NEXT: vmovupd %xmm2, 8192(%rdi,%rax) +; CHECK-NEXT: vcmpngtpd 8192(%rdi,%rax), %xmm0, %k1 +; CHECK-NEXT: vmovupd %xmm1, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $16, %rax ; CHECK-NEXT: jne .LBB123_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -4153,10 +4109,8 @@ define void @bcast_unfold_cmp_v4f64(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB124_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovupd 8192(%rdi,%rax), %ymm2 -; CHECK-NEXT: vcmpltpd %ymm0, %ymm2, %k1 -; CHECK-NEXT: vblendmpd %ymm2, %ymm1, %ymm2 {%k1} -; CHECK-NEXT: vmovupd %ymm2, 8192(%rdi,%rax) +; CHECK-NEXT: vcmpngtpd 8192(%rdi,%rax), %ymm0, %k1 +; CHECK-NEXT: vmovupd %ymm1, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $32, %rax ; CHECK-NEXT: jne .LBB124_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -4189,10 +4143,8 @@ define void @bcast_unfold_cmp_v8f64(ptr %arg) { ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB125_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovupd 8192(%rdi,%rax), %zmm2 -; CHECK-NEXT: vcmpltpd %zmm0, %zmm2, %k1 -; CHECK-NEXT: vblendmpd %zmm2, %zmm1, %zmm2 {%k1} -; CHECK-NEXT: vmovupd %zmm2, 8192(%rdi,%rax) +; CHECK-NEXT: vcmpngtpd 8192(%rdi,%rax), %zmm0, %k1 +; CHECK-NEXT: vmovupd %zmm1, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $64, %rax ; CHECK-NEXT: jne .LBB125_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -4254,13 +4206,12 @@ define void @bcast_unfold_ptestm_v4i32(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-4096, %rax # imm = 0xF000 ; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm0 = [2,2,2,2] +; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB127_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu 4096(%rdi,%rax), %xmm1 -; CHECK-NEXT: vptestmd %xmm0, %xmm1, %k1 -; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 {%k1} = [3,3,3,3] -; CHECK-NEXT: vmovdqu %xmm1, 4096(%rdi,%rax) +; CHECK-NEXT: vptestmd 4096(%rdi,%rax), %xmm0, %k1 +; CHECK-NEXT: vmovdqu32 %xmm1, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $16, %rax ; CHECK-NEXT: jne .LBB127_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -4289,13 +4240,12 @@ define void @bcast_unfold_ptestnm_v4i32(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-4096, %rax # imm = 0xF000 ; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm0 = [2,2,2,2] +; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB128_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu 4096(%rdi,%rax), %xmm1 -; CHECK-NEXT: vptestnmd %xmm0, %xmm1, %k1 -; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 {%k1} = [3,3,3,3] -; CHECK-NEXT: vmovdqu %xmm1, 4096(%rdi,%rax) +; CHECK-NEXT: vptestnmd 4096(%rdi,%rax), %xmm0, %k1 +; CHECK-NEXT: vmovdqu32 %xmm1, 4096(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $16, %rax ; CHECK-NEXT: jne .LBB128_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -4324,13 +4274,12 @@ define void @bcast_unfold_ptestm_v4i64(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-8192, %rax # imm = 0xE000 ; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm0 = [2,2,2,2] +; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 = [3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB129_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu 8192(%rdi,%rax), %ymm1 -; CHECK-NEXT: vptestmq %ymm0, %ymm1, %k1 -; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 {%k1} = [3,3,3,3] -; CHECK-NEXT: vmovdqu %ymm1, 8192(%rdi,%rax) +; CHECK-NEXT: vptestmq 8192(%rdi,%rax), %ymm0, %k1 +; CHECK-NEXT: vmovdqu64 %ymm1, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $32, %rax ; CHECK-NEXT: jne .LBB129_1 ; CHECK-NEXT: # %bb.2: # %bb10 @@ -4360,13 +4309,12 @@ define void @bcast_unfold_ptestnm_v4i64(ptr %arg) { ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: movq $-8192, %rax # imm = 0xE000 ; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm0 = [2,2,2,2] +; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 = [3,3,3,3] ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB130_1: # %bb1 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmovdqu 8192(%rdi,%rax), %ymm1 -; CHECK-NEXT: vptestnmq %ymm0, %ymm1, %k1 -; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 {%k1} = [3,3,3,3] -; CHECK-NEXT: vmovdqu %ymm1, 8192(%rdi,%rax) +; CHECK-NEXT: vptestnmq 8192(%rdi,%rax), %ymm0, %k1 +; CHECK-NEXT: vmovdqu64 %ymm1, 8192(%rdi,%rax) {%k1} ; CHECK-NEXT: addq $32, %rax ; CHECK-NEXT: jne .LBB130_1 ; CHECK-NEXT: # %bb.2: # %bb10 diff --git a/llvm/test/CodeGen/X86/avx512-ext.ll b/llvm/test/CodeGen/X86/avx512-ext.ll index c60d9a3..1a712ff 100644 --- a/llvm/test/CodeGen/X86/avx512-ext.ll +++ b/llvm/test/CodeGen/X86/avx512-ext.ll @@ -6,7 +6,8 @@ define <8 x i16> @zext_8x8mem_to_8x16(ptr%i , <8 x i1> %mask) nounwind readnone { ; KNL-LABEL: zext_8x8mem_to_8x16: ; KNL: # %bb.0: -; KNL-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; KNL-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; KNL-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; KNL-NEXT: vpsllw $15, %xmm0, %xmm0 ; KNL-NEXT: vpsraw $15, %xmm0, %xmm0 ; KNL-NEXT: vpand %xmm1, %xmm0, %xmm0 @@ -21,7 +22,8 @@ define <8 x i16> @zext_8x8mem_to_8x16(ptr%i , <8 x i1> %mask) nounwind readnone ; ; AVX512DQNOBW-LABEL: zext_8x8mem_to_8x16: ; AVX512DQNOBW: # %bb.0: -; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX512DQNOBW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; AVX512DQNOBW-NEXT: vpsllw $15, %xmm0, %xmm0 ; AVX512DQNOBW-NEXT: vpsraw $15, %xmm0, %xmm0 ; AVX512DQNOBW-NEXT: vpand %xmm1, %xmm0, %xmm0 @@ -35,7 +37,8 @@ define <8 x i16> @zext_8x8mem_to_8x16(ptr%i , <8 x i1> %mask) nounwind readnone define <8 x i16> @sext_8x8mem_to_8x16(ptr%i , <8 x i1> %mask) nounwind readnone { ; KNL-LABEL: sext_8x8mem_to_8x16: ; KNL: # %bb.0: -; KNL-NEXT: vpmovsxbw (%rdi), %xmm1 +; KNL-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; KNL-NEXT: vpmovsxbw %xmm1, %xmm1 ; KNL-NEXT: vpsllw $15, %xmm0, %xmm0 ; KNL-NEXT: vpsraw $15, %xmm0, %xmm0 ; KNL-NEXT: vpand %xmm1, %xmm0, %xmm0 @@ -50,7 +53,8 @@ define <8 x i16> @sext_8x8mem_to_8x16(ptr%i , <8 x i1> %mask) nounwind readnone ; ; AVX512DQNOBW-LABEL: sext_8x8mem_to_8x16: ; AVX512DQNOBW: # %bb.0: -; AVX512DQNOBW-NEXT: vpmovsxbw (%rdi), %xmm1 +; AVX512DQNOBW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX512DQNOBW-NEXT: vpmovsxbw %xmm1, %xmm1 ; AVX512DQNOBW-NEXT: vpsllw $15, %xmm0, %xmm0 ; AVX512DQNOBW-NEXT: vpsraw $15, %xmm0, %xmm0 ; AVX512DQNOBW-NEXT: vpand %xmm1, %xmm0, %xmm0 @@ -208,8 +212,10 @@ define <32 x i16> @zext_32x8mem_to_32x16(ptr%i , <32 x i1> %mask) nounwind readn ; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1 ; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; KNL-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; KNL-NEXT: vpmovzxbw {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; KNL-NEXT: vmovdqu (%rdi), %ymm2 +; KNL-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero +; KNL-NEXT: vextracti128 $1, %ymm2, %xmm2 +; KNL-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero ; KNL-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2 ; KNL-NEXT: vpsllw $15, %ymm0, %ymm0 ; KNL-NEXT: vpsraw $15, %ymm0, %ymm0 @@ -231,8 +237,10 @@ define <32 x i16> @zext_32x8mem_to_32x16(ptr%i , <32 x i1> %mask) nounwind readn ; AVX512DQNOBW-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVX512DQNOBW-NEXT: vmovdqu (%rdi), %ymm2 +; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero +; AVX512DQNOBW-NEXT: vextracti128 $1, %ymm2, %xmm2 +; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero ; AVX512DQNOBW-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2 ; AVX512DQNOBW-NEXT: vpsllw $15, %ymm0, %ymm0 ; AVX512DQNOBW-NEXT: vpsraw $15, %ymm0, %ymm0 @@ -253,8 +261,10 @@ define <32 x i16> @sext_32x8mem_to_32x16(ptr%i , <32 x i1> %mask) nounwind readn ; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1 ; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; KNL-NEXT: vpmovsxbw 16(%rdi), %ymm2 -; KNL-NEXT: vpmovsxbw (%rdi), %ymm3 +; KNL-NEXT: vmovdqu (%rdi), %ymm2 +; KNL-NEXT: vpmovsxbw %xmm2, %ymm3 +; KNL-NEXT: vextracti128 $1, %ymm2, %xmm2 +; KNL-NEXT: vpmovsxbw %xmm2, %ymm2 ; KNL-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2 ; KNL-NEXT: vpsllw $15, %ymm0, %ymm0 ; KNL-NEXT: vpsraw $15, %ymm0, %ymm0 @@ -276,8 +286,10 @@ define <32 x i16> @sext_32x8mem_to_32x16(ptr%i , <32 x i1> %mask) nounwind readn ; AVX512DQNOBW-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX512DQNOBW-NEXT: vpmovsxbw 16(%rdi), %ymm2 -; AVX512DQNOBW-NEXT: vpmovsxbw (%rdi), %ymm3 +; AVX512DQNOBW-NEXT: vmovdqu (%rdi), %ymm2 +; AVX512DQNOBW-NEXT: vpmovsxbw %xmm2, %ymm3 +; AVX512DQNOBW-NEXT: vextracti128 $1, %ymm2, %xmm2 +; AVX512DQNOBW-NEXT: vpmovsxbw %xmm2, %ymm2 ; AVX512DQNOBW-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2 ; AVX512DQNOBW-NEXT: vpsllw $15, %ymm0, %ymm0 ; AVX512DQNOBW-NEXT: vpsraw $15, %ymm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/avx512fp16-combine-shuffle-fma.ll b/llvm/test/CodeGen/X86/avx512fp16-combine-shuffle-fma.ll index f02d1164..6d22f66 100644 --- a/llvm/test/CodeGen/X86/avx512fp16-combine-shuffle-fma.ll +++ b/llvm/test/CodeGen/X86/avx512fp16-combine-shuffle-fma.ll @@ -4,7 +4,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl | FileCheck %s --check-prefix=F16C ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl | FileCheck %s --check-prefix=FP16 -define <2 x half> @foo(<2 x half> %0) "unsafe-fp-math"="true" nounwind { +define <2 x half> @foo(<2 x half> %0) nounwind { ; AVX2-LABEL: foo: ; AVX2: # %bb.0: ; AVX2-NEXT: subq $40, %rsp diff --git a/llvm/test/CodeGen/X86/cmp.ll b/llvm/test/CodeGen/X86/cmp.ll index f3e1417..ed3f0e0 100644 --- a/llvm/test/CodeGen/X86/cmp.ll +++ b/llvm/test/CodeGen/X86/cmp.ll @@ -956,3 +956,185 @@ define i1 @fold_test_and_with_chain(ptr %x, ptr %y, i32 %z) { store i32 %z, ptr %y ret i1 %c } + +define i1 @sext_mask(i32 %a) { +; CHECK-LABEL: sext_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: cmpl $-523, %edi # encoding: [0x81,0xff,0xf5,0xfd,0xff,0xff] +; CHECK-NEXT: # imm = 0xFDF5 +; CHECK-NEXT: setl %al # encoding: [0x0f,0x9c,0xc0] +; CHECK-NEXT: retq # encoding: [0xc3] + %a64 = sext i32 %a to i64 + %v1 = icmp slt i64 %a64, -523 + ret i1 %v1 +} + +define i1 @sext_i9_mask(i9 %a) { +; NO-NDD-LABEL: sext_i9_mask: +; NO-NDD: # %bb.0: +; NO-NDD-NEXT: # kill: def $edi killed $edi def $rdi +; NO-NDD-NEXT: shlq $55, %rdi # encoding: [0x48,0xc1,0xe7,0x37] +; NO-NDD-NEXT: sarq $55, %rdi # encoding: [0x48,0xc1,0xff,0x37] +; NO-NDD-NEXT: cmpl $-522, %edi # encoding: [0x81,0xff,0xf6,0xfd,0xff,0xff] +; NO-NDD-NEXT: # imm = 0xFDF6 +; NO-NDD-NEXT: setl %al # encoding: [0x0f,0x9c,0xc0] +; NO-NDD-NEXT: retq # encoding: [0xc3] +; +; NDD-LABEL: sext_i9_mask: +; NDD: # %bb.0: +; NDD-NEXT: # kill: def $edi killed $edi def $rdi +; NDD-NEXT: shlq $55, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xe7,0x37] +; NDD-NEXT: sarq $55, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xff,0x37] +; NDD-NEXT: cmpl $-522, %edi # encoding: [0x81,0xff,0xf6,0xfd,0xff,0xff] +; NDD-NEXT: # imm = 0xFDF6 +; NDD-NEXT: setl %al # encoding: [0x0f,0x9c,0xc0] +; NDD-NEXT: retq # encoding: [0xc3] + %a64 = sext i9 %a to i64 + %v1 = icmp slt i64 %a64, -522 + ret i1 %v1 +} + +define i1 @sext_i32_mask(i32 %a) { +; CHECK-LABEL: sext_i32_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: cmpl $-522, %edi # encoding: [0x81,0xff,0xf6,0xfd,0xff,0xff] +; CHECK-NEXT: # imm = 0xFDF6 +; CHECK-NEXT: setl %al # encoding: [0x0f,0x9c,0xc0] +; CHECK-NEXT: retq # encoding: [0xc3] + %a64 = sext i32 %a to i64 + %v1 = icmp slt i64 %a64, -522 + ret i1 %v1 +} + +define i1 @i40(i40 %a) { +; NO-NDD-LABEL: i40: +; NO-NDD: # %bb.0: +; NO-NDD-NEXT: shlq $24, %rdi # encoding: [0x48,0xc1,0xe7,0x18] +; NO-NDD-NEXT: sarq $24, %rdi # encoding: [0x48,0xc1,0xff,0x18] +; NO-NDD-NEXT: cmpq $-521, %rdi # encoding: [0x48,0x81,0xff,0xf7,0xfd,0xff,0xff] +; NO-NDD-NEXT: # imm = 0xFDF7 +; NO-NDD-NEXT: setl %al # encoding: [0x0f,0x9c,0xc0] +; NO-NDD-NEXT: retq # encoding: [0xc3] +; +; NDD-LABEL: i40: +; NDD: # %bb.0: +; NDD-NEXT: shlq $24, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xe7,0x18] +; NDD-NEXT: sarq $24, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xff,0x18] +; NDD-NEXT: cmpq $-521, %rdi # encoding: [0x48,0x81,0xff,0xf7,0xfd,0xff,0xff] +; NDD-NEXT: # imm = 0xFDF7 +; NDD-NEXT: setl %al # encoding: [0x0f,0x9c,0xc0] +; NDD-NEXT: retq # encoding: [0xc3] + %a64 = sext i40 %a to i64 + %v1 = icmp slt i64 %a64, -521 + ret i1 %v1 +} + +define i1 @sext_i9_mask_sgt(i9 %a) { +; NO-NDD-LABEL: sext_i9_mask_sgt: +; NO-NDD: # %bb.0: +; NO-NDD-NEXT: # kill: def $edi killed $edi def $rdi +; NO-NDD-NEXT: shlq $55, %rdi # encoding: [0x48,0xc1,0xe7,0x37] +; NO-NDD-NEXT: sarq $55, %rdi # encoding: [0x48,0xc1,0xff,0x37] +; NO-NDD-NEXT: cmpl $-520, %edi # encoding: [0x81,0xff,0xf8,0xfd,0xff,0xff] +; NO-NDD-NEXT: # imm = 0xFDF8 +; NO-NDD-NEXT: setge %al # encoding: [0x0f,0x9d,0xc0] +; NO-NDD-NEXT: retq # encoding: [0xc3] +; +; NDD-LABEL: sext_i9_mask_sgt: +; NDD: # %bb.0: +; NDD-NEXT: # kill: def $edi killed $edi def $rdi +; NDD-NEXT: shlq $55, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xe7,0x37] +; NDD-NEXT: sarq $55, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xff,0x37] +; NDD-NEXT: cmpl $-520, %edi # encoding: [0x81,0xff,0xf8,0xfd,0xff,0xff] +; NDD-NEXT: # imm = 0xFDF8 +; NDD-NEXT: setge %al # encoding: [0x0f,0x9d,0xc0] +; NDD-NEXT: retq # encoding: [0xc3] + %a64 = sext i9 %a to i64 + %v1 = icmp sgt i64 %a64, -521 + ret i1 %v1 +} + +define i1 @sext_i32_mask_sgt(i32 %a) { +; CHECK-LABEL: sext_i32_mask_sgt: +; CHECK: # %bb.0: +; CHECK-NEXT: cmpl $-521, %edi # encoding: [0x81,0xff,0xf7,0xfd,0xff,0xff] +; CHECK-NEXT: # imm = 0xFDF7 +; CHECK-NEXT: setge %al # encoding: [0x0f,0x9d,0xc0] +; CHECK-NEXT: retq # encoding: [0xc3] + %a64 = sext i32 %a to i64 + %v1 = icmp sgt i64 %a64, -522 + ret i1 %v1 +} + +define i1 @i40_sge(i40 %a) { +; NO-NDD-LABEL: i40_sge: +; NO-NDD: # %bb.0: +; NO-NDD-NEXT: shlq $24, %rdi # encoding: [0x48,0xc1,0xe7,0x18] +; NO-NDD-NEXT: sarq $24, %rdi # encoding: [0x48,0xc1,0xff,0x18] +; NO-NDD-NEXT: cmpq $-521, %rdi # encoding: [0x48,0x81,0xff,0xf7,0xfd,0xff,0xff] +; NO-NDD-NEXT: # imm = 0xFDF7 +; NO-NDD-NEXT: setge %al # encoding: [0x0f,0x9d,0xc0] +; NO-NDD-NEXT: retq # encoding: [0xc3] +; +; NDD-LABEL: i40_sge: +; NDD: # %bb.0: +; NDD-NEXT: shlq $24, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xe7,0x18] +; NDD-NEXT: sarq $24, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xff,0x18] +; NDD-NEXT: cmpq $-521, %rdi # encoding: [0x48,0x81,0xff,0xf7,0xfd,0xff,0xff] +; NDD-NEXT: # imm = 0xFDF7 +; NDD-NEXT: setge %al # encoding: [0x0f,0x9d,0xc0] +; NDD-NEXT: retq # encoding: [0xc3] + %a64 = sext i40 %a to i64 + %v1 = icmp sge i64 %a64, -521 + ret i1 %v1 +} + +define i1 @i40_eq(i40 %a) { +; NO-NDD-LABEL: i40_eq: +; NO-NDD: # %bb.0: +; NO-NDD-NEXT: movabsq $1099511627775, %rax # encoding: [0x48,0xb8,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00] +; NO-NDD-NEXT: # imm = 0xFFFFFFFFFF +; NO-NDD-NEXT: andq %rdi, %rax # encoding: [0x48,0x21,0xf8] +; NO-NDD-NEXT: movabsq $1099511627255, %rcx # encoding: [0x48,0xb9,0xf7,0xfd,0xff,0xff,0xff,0x00,0x00,0x00] +; NO-NDD-NEXT: # imm = 0xFFFFFFFDF7 +; NO-NDD-NEXT: cmpq %rcx, %rax # encoding: [0x48,0x39,0xc8] +; NO-NDD-NEXT: sete %al # encoding: [0x0f,0x94,0xc0] +; NO-NDD-NEXT: retq # encoding: [0xc3] +; +; NDD-LABEL: i40_eq: +; NDD: # %bb.0: +; NDD-NEXT: movabsq $1099511627775, %rax # encoding: [0x48,0xb8,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00] +; NDD-NEXT: # imm = 0xFFFFFFFFFF +; NDD-NEXT: andq %rdi, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x21,0xf8] +; NDD-NEXT: movabsq $1099511627255, %rcx # encoding: [0x48,0xb9,0xf7,0xfd,0xff,0xff,0xff,0x00,0x00,0x00] +; NDD-NEXT: # imm = 0xFFFFFFFDF7 +; NDD-NEXT: cmpq %rcx, %rax # encoding: [0x48,0x39,0xc8] +; NDD-NEXT: sete %al # encoding: [0x0f,0x94,0xc0] +; NDD-NEXT: retq # encoding: [0xc3] + %a64 = sext i40 %a to i64 + %v1 = icmp eq i64 %a64, -521 + ret i1 %v1 +} + +define i1 @i40_ult(i40 %a) { +; NO-NDD-LABEL: i40_ult: +; NO-NDD: # %bb.0: +; NO-NDD-NEXT: shlq $24, %rdi # encoding: [0x48,0xc1,0xe7,0x18] +; NO-NDD-NEXT: sarq $24, %rdi # encoding: [0x48,0xc1,0xff,0x18] +; NO-NDD-NEXT: cmpq $-521, %rdi # encoding: [0x48,0x81,0xff,0xf7,0xfd,0xff,0xff] +; NO-NDD-NEXT: # imm = 0xFDF7 +; NO-NDD-NEXT: setb %al # encoding: [0x0f,0x92,0xc0] +; NO-NDD-NEXT: retq # encoding: [0xc3] +; +; NDD-LABEL: i40_ult: +; NDD: # %bb.0: +; NDD-NEXT: shlq $24, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xe7,0x18] +; NDD-NEXT: sarq $24, %rdi # EVEX TO LEGACY Compression encoding: [0x48,0xc1,0xff,0x18] +; NDD-NEXT: cmpq $-521, %rdi # encoding: [0x48,0x81,0xff,0xf7,0xfd,0xff,0xff] +; NDD-NEXT: # imm = 0xFDF7 +; NDD-NEXT: setb %al # encoding: [0x0f,0x92,0xc0] +; NDD-NEXT: retq # encoding: [0xc3] + %a64 = sext i40 %a to i64 + %v1 = icmp ult i64 %a64, -521 + ret i1 %v1 +} diff --git a/llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness.ll b/llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness.ll deleted file mode 100644 index ea7454f..0000000 --- a/llvm/test/CodeGen/X86/coalescer-breaks-subreg-to-reg-liveness.ll +++ /dev/null @@ -1,185 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -mtriple=x86_64-grtev4-linux-gnu < %s | FileCheck %s - -%struct.wibble = type { %struct.wombat } -%struct.wombat = type { %struct.ham, [3 x i8] } -%struct.ham = type { %struct.zot } -%struct.zot = type { %struct.blam } -%struct.blam = type { %struct.ham.0 } -%struct.ham.0 = type { %struct.bar } -%struct.bar = type { %struct.bar.1 } -%struct.bar.1 = type { %struct.baz, i8 } -%struct.baz = type { %struct.snork } -%struct.snork = type <{ %struct.spam, i8, [3 x i8] }> -%struct.spam = type { %struct.snork.2, %struct.snork.2 } -%struct.snork.2 = type { i32 } -%struct.snork.3 = type { %struct.baz, i8, [3 x i8] } - -define void @foo(ptr %arg, ptr %arg1, i40 %arg2, ptr %arg3, i32 %arg4) #0 { -; CHECK-LABEL: foo: -; CHECK: # %bb.0: # %bb -; CHECK-NEXT: pushq %rbp -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: .cfi_offset %rbp, -16 -; CHECK-NEXT: movq %rsp, %rbp -; CHECK-NEXT: .cfi_def_cfa_register %rbp -; CHECK-NEXT: pushq %r15 -; CHECK-NEXT: pushq %r14 -; CHECK-NEXT: pushq %r13 -; CHECK-NEXT: pushq %r12 -; CHECK-NEXT: pushq %rbx -; CHECK-NEXT: subq $24, %rsp -; CHECK-NEXT: .cfi_offset %rbx, -56 -; CHECK-NEXT: .cfi_offset %r12, -48 -; CHECK-NEXT: .cfi_offset %r13, -40 -; CHECK-NEXT: .cfi_offset %r14, -32 -; CHECK-NEXT: .cfi_offset %r15, -24 -; CHECK-NEXT: movl %r8d, %r14d -; CHECK-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; CHECK-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; CHECK-NEXT: movq %rsi, %r13 -; CHECK-NEXT: movq %rdi, %r15 -; CHECK-NEXT: incl %r14d -; CHECK-NEXT: xorl %ebx, %ebx -; CHECK-NEXT: # implicit-def: $r12 -; CHECK-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; CHECK-NEXT: jmp .LBB0_3 -; CHECK-NEXT: .p2align 4 -; CHECK-NEXT: .LBB0_1: # %bb17 -; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 -; CHECK-NEXT: movq %r15, %r13 -; CHECK-NEXT: xorl %r15d, %r15d -; CHECK-NEXT: testq %rbx, %rbx -; CHECK-NEXT: sete %r15b -; CHECK-NEXT: xorl %edi, %edi -; CHECK-NEXT: callq _Znwm@PLT -; CHECK-NEXT: shll $4, %r15d -; CHECK-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload -; CHECK-NEXT: movq %r12, %rcx -; CHECK-NEXT: shrq $32, %rcx -; CHECK-NEXT: movb %cl, 12(%rax) -; CHECK-NEXT: movl %r12d, 8(%rax) -; CHECK-NEXT: movq %r15, %rbx -; CHECK-NEXT: movq %r13, %r15 -; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload -; CHECK-NEXT: decl %r14d -; CHECK-NEXT: je .LBB0_8 -; CHECK-NEXT: .LBB0_3: # %bb7 -; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: callq widget@PLT -; CHECK-NEXT: cmpb $-5, (%r13) -; CHECK-NEXT: jae .LBB0_5 -; CHECK-NEXT: # %bb.4: # in Loop: Header=BB0_3 Depth=1 -; CHECK-NEXT: movl %r12d, %r12d -; CHECK-NEXT: cmpq %r15, %rbx -; CHECK-NEXT: jbe .LBB0_1 -; CHECK-NEXT: jmp .LBB0_7 -; CHECK-NEXT: .p2align 4 -; CHECK-NEXT: .LBB0_5: # %bb12 -; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 -; CHECK-NEXT: movq 0, %rax -; CHECK-NEXT: movq 8, %rax -; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload -; CHECK-NEXT: cmpq %r15, %rbx -; CHECK-NEXT: jbe .LBB0_1 -; CHECK-NEXT: .LBB0_7: # in Loop: Header=BB0_3 Depth=1 -; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: xorl %ebx, %ebx -; CHECK-NEXT: decl %r14d -; CHECK-NEXT: jne .LBB0_3 -; CHECK-NEXT: .LBB0_8: # %bb21 -; CHECK-NEXT: cmpb $0, 12(%rax) -; CHECK-NEXT: jne .LBB0_10 -; CHECK-NEXT: # %bb.9: # %bb26 -; CHECK-NEXT: addq $24, %rsp -; CHECK-NEXT: popq %rbx -; CHECK-NEXT: popq %r12 -; CHECK-NEXT: popq %r13 -; CHECK-NEXT: popq %r14 -; CHECK-NEXT: popq %r15 -; CHECK-NEXT: popq %rbp -; CHECK-NEXT: .cfi_def_cfa %rsp, 8 -; CHECK-NEXT: retq -; CHECK-NEXT: .LBB0_10: # %bb25 -; CHECK-NEXT: .cfi_def_cfa %rbp, 16 -; CHECK-NEXT: movq %r15, %rdi -; CHECK-NEXT: callq pluto@PLT -bb: - br label %bb7 - -bb5: ; preds = %bb17, %bb14 - %phi = phi ptr [ %call19, %bb17 ], [ null, %bb14 ] - %phi6 = phi ptr [ %getelementptr, %bb17 ], [ null, %bb14 ] - %add = add i32 %phi9, 1 - %icmp = icmp eq i32 %phi9, %arg4 - br i1 %icmp, label %bb21, label %bb7 - -bb7: ; preds = %bb5, %bb - %phi8 = phi ptr [ null, %bb ], [ %phi6, %bb5 ] - %phi9 = phi i32 [ 0, %bb ], [ %add, %bb5 ] - %phi10 = phi i40 [ poison, %bb ], [ %phi15, %bb5 ] - %call = call ptr @widget() - %load = load i8, ptr %arg1, align 8 - %icmp11 = icmp ult i8 %load, -5 - %and = and i40 %phi10, 4294967295 - br i1 %icmp11, label %bb14, label %bb12 - -bb12: ; preds = %bb7 - %load13 = load volatile { i64, i64 }, ptr null, align 4294967296 - br label %bb14 - -bb14: ; preds = %bb12, %bb7 - %phi15 = phi i40 [ %and, %bb7 ], [ %arg2, %bb12 ] - %icmp16 = icmp ugt ptr %phi8, %arg - br i1 %icmp16, label %bb5, label %bb17 - -bb17: ; preds = %bb14 - %icmp18 = icmp eq ptr %phi8, null - %zext = zext i1 %icmp18 to i64 - %call19 = call ptr @_Znwm(i64 0) - %getelementptr = getelementptr %struct.wibble, ptr %arg3, i64 %zext - %getelementptr20 = getelementptr i8, ptr %call19, i64 8 - store i40 %phi15, ptr %getelementptr20, align 4 - br label %bb5 - -bb21: ; preds = %bb5 - %getelementptr22 = getelementptr %struct.snork.3, ptr %phi, i64 0, i32 1 - %load23 = load i8, ptr %getelementptr22, align 4 - %icmp24 = icmp eq i8 %load23, 0 - br i1 %icmp24, label %bb26, label %bb25 - -bb25: ; preds = %bb21 - call void @pluto(ptr %arg) - unreachable - -bb26: ; preds = %bb21 - ret void -} - -define void @eggs(ptr %arg, ptr %arg1) { -; CHECK-LABEL: eggs: -; CHECK: # %bb.0: # %bb -; CHECK-NEXT: pushq %rax -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: movq %rdi, %rax -; CHECK-NEXT: movq %rsi, %rdi -; CHECK-NEXT: movq %rax, %rsi -; CHECK-NEXT: xorl %edx, %edx -; CHECK-NEXT: xorl %ecx, %ecx -; CHECK-NEXT: xorl %r8d, %r8d -; CHECK-NEXT: callq foo@PLT -; CHECK-NEXT: popq %rax -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: retq -bb: - call void @foo(ptr %arg1, ptr %arg, i40 0, ptr null, i32 0) - ret void -} - -declare ptr @widget() - -declare void @pluto(ptr) - -declare ptr @_Znwm(i64) - -attributes #0 = { noinline "frame-pointer"="all" } diff --git a/llvm/test/CodeGen/X86/coalescer-implicit-def-regression-imp-operand-assert.mir b/llvm/test/CodeGen/X86/coalescer-implicit-def-regression-imp-operand-assert.mir index 0bc208d..8241a17 100644 --- a/llvm/test/CodeGen/X86/coalescer-implicit-def-regression-imp-operand-assert.mir +++ b/llvm/test/CodeGen/X86/coalescer-implicit-def-regression-imp-operand-assert.mir @@ -1,5 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3 -# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=register-coalescer -o - %s | FileCheck %s --match-full-lines +# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=register-coalescer -o - %s | FileCheck %s --- name: rematerialize_subreg_to_reg_added_impdef_1 tracksRegLiveness: true @@ -9,7 +9,7 @@ body: | ; CHECK-NEXT: successors: %bb.1(0x2aaaaaab), %bb.2(0x55555555) ; CHECK-NEXT: liveins: $edi ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_]] + ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags ; CHECK-NEXT: JCC_1 %bb.2, 5, implicit killed undef $eflags ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: @@ -28,7 +28,7 @@ body: | ; CHECK-NEXT: JCC_1 %bb.5, 5, implicit killed undef $eflags ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.4: - ; CHECK-NEXT: dead $eax = MOV32r0 implicit-def dead $eflags, implicit-def $al, implicit-def $al + ; CHECK-NEXT: dead $eax = MOV32r0 implicit-def dead $eflags, implicit-def $al ; CHECK-NEXT: RET 0, killed undef $al ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.5: diff --git a/llvm/test/CodeGen/X86/coalescing-subreg-to-reg-requires-subrange-update.mir b/llvm/test/CodeGen/X86/coalescing-subreg-to-reg-requires-subrange-update.mir deleted file mode 100644 index 2e6395f..0000000 --- a/llvm/test/CodeGen/X86/coalescing-subreg-to-reg-requires-subrange-update.mir +++ /dev/null @@ -1,44 +0,0 @@ -# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3 -# RUN: llc -mtriple=x86_64-- -run-pass=register-coalescer -enable-subreg-liveness -verify-coalescing -o - %s | FileCheck %s - ---- -name: requires_new_subrange_coalesce_subreg_to_reg -tracksRegLiveness: true -body: | - ; CHECK-LABEL: name: requires_new_subrange_coalesce_subreg_to_reg - ; CHECK: bb.0: - ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) - ; CHECK-NEXT: liveins: $eax - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %a.sub_32bit:gr64_with_sub_8bit = COPY $eax - ; CHECK-NEXT: %b:gr32 = IMPLICIT_DEF - ; CHECK-NEXT: %c:gr64 = INSERT_SUBREG %a, %b, %subreg.sub_32bit - ; CHECK-NEXT: JCC_1 %bb.2, 4, implicit undef $eflags - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: bb.1: - ; CHECK-NEXT: successors: %bb.2(0x80000000) - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %a.sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags - ; CHECK-NEXT: %c.sub_32bit:gr64 = COPY %a - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: bb.2: - ; CHECK-NEXT: %c.sub_32bit:gr64 = SUBREG_TO_REG %a, %b, %subreg.sub_32bit - ; CHECK-NEXT: RET 0, implicit %c - bb.0: - liveins: $eax - %init_eax:gr32 = COPY $eax - %a:gr64 = SUBREG_TO_REG 0, %init_eax, %subreg.sub_32bit - %b:gr32 = IMPLICIT_DEF - %c:gr64 = INSERT_SUBREG %a, %b, %subreg.sub_32bit - JCC_1 %bb.2, 4, implicit undef $eflags - - bb.1: - %imm0:gr32 = MOV32r0 implicit-def dead $eflags - %a = SUBREG_TO_REG 0, %imm0, %subreg.sub_32bit - %c.sub_32bit = COPY %a - - bb.2: - %c.sub_32bit = SUBREG_TO_REG %a, %b, %subreg.sub_32bit - RET 0, implicit %c - -... diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll index 1ae1d61..98187d6 100644 --- a/llvm/test/CodeGen/X86/combine-sdiv.ll +++ b/llvm/test/CodeGen/X86/combine-sdiv.ll @@ -2201,9 +2201,9 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] ; SSE41-NEXT: psraw $8, %xmm0 ; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: psllw $7, %xmm3 -; SSE41-NEXT: paddw %xmm0, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3,4],xmm0[5],xmm3[6],xmm0[7] +; SSE41-NEXT: paddw %xmm0, %xmm3 +; SSE41-NEXT: psllw $7, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm3[5],xmm0[6],xmm3[7] ; SSE41-NEXT: psrlw $8, %xmm0 ; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE41-NEXT: psraw $8, %xmm2 @@ -2234,9 +2234,9 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; AVX1-NEXT: vpsraw $8, %xmm2, %xmm2 -; AVX1-NEXT: vpsllw $7, %xmm2, %xmm3 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5],xmm3[6],xmm2[7] +; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 +; AVX1-NEXT: vpsllw $7, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5],xmm2[6],xmm3[7] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1 diff --git a/llvm/test/CodeGen/X86/combine-storetomstore.ll b/llvm/test/CodeGen/X86/combine-storetomstore.ll new file mode 100644 index 0000000..c18c89d --- /dev/null +++ b/llvm/test/CodeGen/X86/combine-storetomstore.ll @@ -0,0 +1,1540 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s -check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s -check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s -check-prefix=AVX512 + +define void @test_masked_store_success_v4i8(<4 x i8> %x, ptr %ptr, <4 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v4i8: +; AVX: # %bb.0: +; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX-NEXT: vmovdqa (%rdi), %xmm2 +; AVX-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0 +; AVX-NEXT: vmovd %xmm0, (%rdi) +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v4i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX2-NEXT: vmovdqa (%rdi), %xmm2 +; AVX2-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vmovd %xmm0, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v4i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vpmovd2m %xmm1, %k1 +; AVX512-NEXT: vmovdqa (%rdi), %xmm1 +; AVX512-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1} +; AVX512-NEXT: vmovd %xmm1, (%rdi) +; AVX512-NEXT: retq + %load = load <4 x i8>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x i8> %x, <4 x i8> %load + store <4 x i8> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4i16(<4 x i16> %x, ptr %ptr, <4 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v4i16: +; AVX: # %bb.0: +; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,u,4,u,8,u,12,u,8,u,12,u,12,u,14,u] +; AVX-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX-NEXT: vpsraw $15, %xmm1, %xmm1 +; AVX-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; AVX-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0 +; AVX-NEXT: vmovq %xmm0, (%rdi) +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v4i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,u,4,u,8,u,12,u,8,u,12,u,12,u,14,u] +; AVX2-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX2-NEXT: vpsraw $15, %xmm1, %xmm1 +; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; AVX2-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vmovq %xmm0, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v4i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vpmovd2m %xmm1, %k1 +; AVX512-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX512-NEXT: vmovdqu16 %xmm0, %xmm1 {%k1} +; AVX512-NEXT: vmovq %xmm1, (%rdi) +; AVX512-NEXT: retq + %load = load <4 x i16>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x i16> %x, <4 x i16> %load + store <4 x i16> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4i32(<4 x i32> %x, ptr %ptr, <4 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v4i32: +; AVX: # %bb.0: +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vmaskmovps %xmm0, %xmm1, (%rdi) +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v4i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmaskmovd %xmm0, %xmm1, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v4i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vpmovd2m %xmm1, %k1 +; AVX512-NEXT: vmovdqa32 %xmm0, (%rdi) {%k1} +; AVX512-NEXT: retq + %load = load <4 x i32>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %load + store <4 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4i64(<4 x i64> %x, ptr %ptr, <4 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v4i64: +; AVX: # %bb.0: +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vpmovsxdq %xmm1, %xmm2 +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] +; AVX-NEXT: vpmovsxdq %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX-NEXT: vmaskmovpd %ymm0, %ymm1, (%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v4i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: vpmaskmovq %ymm0, %ymm1, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v4i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vpmovd2m %xmm1, %k1 +; AVX512-NEXT: vmovdqa64 %ymm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <4 x i64>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %load + store <4 x i64> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4f16(<4 x half> %x, ptr %ptr, <4 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v4f16: +; AVX: # %bb.0: +; AVX-NEXT: vpsrlq $48, %xmm0, %xmm2 +; AVX-NEXT: vpextrw $0, %xmm2, %edx +; AVX-NEXT: vpsrld $16, %xmm0, %xmm2 +; AVX-NEXT: vpextrw $0, %xmm2, %ecx +; AVX-NEXT: movzwl 2(%rdi), %eax +; AVX-NEXT: vpextrb $4, %xmm1, %esi +; AVX-NEXT: testb $1, %sil +; AVX-NEXT: cmovnel %ecx, %eax +; AVX-NEXT: vpextrb $8, %xmm1, %ecx +; AVX-NEXT: testb $1, %cl +; AVX-NEXT: jne .LBB4_1 +; AVX-NEXT: # %bb.2: +; AVX-NEXT: movl 4(%rdi), %ecx +; AVX-NEXT: jmp .LBB4_3 +; AVX-NEXT: .LBB4_1: +; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] +; AVX-NEXT: vpextrw $0, %xmm2, %ecx +; AVX-NEXT: .LBB4_3: +; AVX-NEXT: movzwl 6(%rdi), %esi +; AVX-NEXT: vpextrb $12, %xmm1, %r8d +; AVX-NEXT: testb $1, %r8b +; AVX-NEXT: cmovnel %edx, %esi +; AVX-NEXT: vmovd %xmm1, %edx +; AVX-NEXT: testb $1, %dl +; AVX-NEXT: jne .LBB4_4 +; AVX-NEXT: # %bb.5: +; AVX-NEXT: movl (%rdi), %edx +; AVX-NEXT: jmp .LBB4_6 +; AVX-NEXT: .LBB4_4: +; AVX-NEXT: vpextrw $0, %xmm0, %edx +; AVX-NEXT: .LBB4_6: +; AVX-NEXT: movw %dx, (%rdi) +; AVX-NEXT: movw %si, 6(%rdi) +; AVX-NEXT: movw %cx, 4(%rdi) +; AVX-NEXT: movw %ax, 2(%rdi) +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v4f16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm2 +; AVX2-NEXT: vpextrw $0, %xmm2, %edx +; AVX2-NEXT: vpsrld $16, %xmm0, %xmm2 +; AVX2-NEXT: vpextrw $0, %xmm2, %ecx +; AVX2-NEXT: movzwl 2(%rdi), %eax +; AVX2-NEXT: vpextrb $4, %xmm1, %esi +; AVX2-NEXT: testb $1, %sil +; AVX2-NEXT: cmovnel %ecx, %eax +; AVX2-NEXT: vpextrb $8, %xmm1, %ecx +; AVX2-NEXT: testb $1, %cl +; AVX2-NEXT: jne .LBB4_1 +; AVX2-NEXT: # %bb.2: +; AVX2-NEXT: movl 4(%rdi), %ecx +; AVX2-NEXT: jmp .LBB4_3 +; AVX2-NEXT: .LBB4_1: +; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] +; AVX2-NEXT: vpextrw $0, %xmm2, %ecx +; AVX2-NEXT: .LBB4_3: +; AVX2-NEXT: movzwl 6(%rdi), %esi +; AVX2-NEXT: vpextrb $12, %xmm1, %r8d +; AVX2-NEXT: testb $1, %r8b +; AVX2-NEXT: cmovnel %edx, %esi +; AVX2-NEXT: vmovd %xmm1, %edx +; AVX2-NEXT: testb $1, %dl +; AVX2-NEXT: jne .LBB4_4 +; AVX2-NEXT: # %bb.5: +; AVX2-NEXT: movl (%rdi), %edx +; AVX2-NEXT: jmp .LBB4_6 +; AVX2-NEXT: .LBB4_4: +; AVX2-NEXT: vpextrw $0, %xmm0, %edx +; AVX2-NEXT: .LBB4_6: +; AVX2-NEXT: movw %dx, (%rdi) +; AVX2-NEXT: movw %si, 6(%rdi) +; AVX2-NEXT: movw %cx, 4(%rdi) +; AVX2-NEXT: movw %ax, 2(%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v4f16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vpmovd2m %xmm1, %k1 +; AVX512-NEXT: vmovdqa (%rdi), %xmm1 +; AVX512-NEXT: vmovdqu16 %xmm0, %xmm1 {%k1} +; AVX512-NEXT: vmovq %xmm1, (%rdi) +; AVX512-NEXT: retq + %load = load <4 x half>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x half> %x, <4 x half> %load + store <4 x half> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4f32(<4 x float> %x, ptr %ptr, <4 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v4f32: +; AVX: # %bb.0: +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vmaskmovps %xmm0, %xmm1, (%rdi) +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v4f32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vmaskmovps %xmm0, %xmm1, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v4f32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vpmovd2m %xmm1, %k1 +; AVX512-NEXT: vmovaps %xmm0, (%rdi) {%k1} +; AVX512-NEXT: retq + %load = load <4 x float>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x float> %x, <4 x float> %load + store <4 x float> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v4f64(<4 x double> %x, ptr %ptr, <4 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v4f64: +; AVX: # %bb.0: +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vpmovsxdq %xmm1, %xmm2 +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] +; AVX-NEXT: vpmovsxdq %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX-NEXT: vmaskmovpd %ymm0, %ymm1, (%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v4f64: +; AVX2: # %bb.0: +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: vmaskmovpd %ymm0, %ymm1, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v4f64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vpmovd2m %xmm1, %k1 +; AVX512-NEXT: vmovapd %ymm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <4 x double>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x double> %x, <4 x double> %load + store <4 x double> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8i8(<8 x i8> %x, ptr %ptr, <8 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v8i8: +; AVX: # %bb.0: +; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; AVX-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; AVX-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0 +; AVX-NEXT: vmovq %xmm0, (%rdi) +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v8i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; AVX2-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vmovq %xmm0, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v8i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX512-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1} +; AVX512-NEXT: vmovq %xmm1, (%rdi) +; AVX512-NEXT: retq + %load = load <8 x i8>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i8> %x, <8 x i8> %load + store <8 x i8> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8i16(<8 x i16> %x, ptr %ptr, <8 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v8i16: +; AVX: # %bb.0: +; AVX-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX-NEXT: vpsraw $15, %xmm1, %xmm1 +; AVX-NEXT: vmovdqa (%rdi), %xmm2 +; AVX-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0 +; AVX-NEXT: vmovdqa %xmm0, (%rdi) +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v8i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX2-NEXT: vpsraw $15, %xmm1, %xmm1 +; AVX2-NEXT: vmovdqa (%rdi), %xmm2 +; AVX2-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v8i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vmovdqu16 %xmm0, (%rdi) {%k1} +; AVX512-NEXT: retq + %load = load <8 x i16>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %load + store <8 x i16> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8i32(<8 x i32> %x, ptr %ptr, <8 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v8i32: +; AVX: # %bb.0: +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX-NEXT: vmaskmovps %ymm0, %ymm1, (%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v8i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 +; AVX2-NEXT: vpmaskmovd %ymm0, %ymm1, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v8i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vmovdqa32 %ymm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <8 x i32>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %load + store <8 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8i64(<8 x i64> %x, ptr %ptr, <8 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v8i64: +; AVX: # %bb.0: +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm2[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpslld $31, %xmm3, %xmm3 +; AVX-NEXT: vpmovsxdq %xmm3, %xmm4 +; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] +; AVX-NEXT: vpmovsxdq %xmm3, %xmm3 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX-NEXT: vpmovsxdq %xmm2, %xmm4 +; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] +; AVX-NEXT: vpmovsxdq %xmm2, %xmm2 +; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 +; AVX-NEXT: vmaskmovpd %ymm0, %ymm2, (%rdi) +; AVX-NEXT: vmaskmovpd %ymm1, %ymm3, 32(%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v8i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm2[4,4,5,5,6,6,7,7] +; AVX2-NEXT: vpslld $31, %xmm3, %xmm3 +; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3 +; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX2-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2 +; AVX2-NEXT: vpmaskmovq %ymm0, %ymm2, (%rdi) +; AVX2-NEXT: vpmaskmovq %ymm1, %ymm3, 32(%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v8i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vmovdqu64 %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <8 x i64>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %load + store <8 x i64> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8f16(<8 x half> %x, ptr %ptr, <8 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v8f16: +; AVX: # %bb.0: +; AVX-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX-NEXT: vpsraw $15, %xmm1, %xmm1 +; AVX-NEXT: vmovdqa (%rdi), %xmm2 +; AVX-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0 +; AVX-NEXT: vmovdqa %xmm0, (%rdi) +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v8f16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX2-NEXT: vpsraw $15, %xmm1, %xmm1 +; AVX2-NEXT: vmovdqa (%rdi), %xmm2 +; AVX2-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v8f16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vmovdqu16 %xmm0, (%rdi) {%k1} +; AVX512-NEXT: retq + %load = load <8 x half>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x half> %x, <8 x half> %load + store <8 x half> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8f32(<8 x float> %x, ptr %ptr, <8 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v8f32: +; AVX: # %bb.0: +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX-NEXT: vmaskmovps %ymm0, %ymm1, (%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v8f32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 +; AVX2-NEXT: vmaskmovps %ymm0, %ymm1, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v8f32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vmovaps %ymm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <8 x float>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x float> %x, <8 x float> %load + store <8 x float> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v8f64(<8 x double> %x, ptr %ptr, <8 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v8f64: +; AVX: # %bb.0: +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm2[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpslld $31, %xmm3, %xmm3 +; AVX-NEXT: vpmovsxdq %xmm3, %xmm4 +; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] +; AVX-NEXT: vpmovsxdq %xmm3, %xmm3 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX-NEXT: vpmovsxdq %xmm2, %xmm4 +; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] +; AVX-NEXT: vpmovsxdq %xmm2, %xmm2 +; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 +; AVX-NEXT: vmaskmovpd %ymm0, %ymm2, (%rdi) +; AVX-NEXT: vmaskmovpd %ymm1, %ymm3, 32(%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v8f64: +; AVX2: # %bb.0: +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm2[4,4,5,5,6,6,7,7] +; AVX2-NEXT: vpslld $31, %xmm3, %xmm3 +; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3 +; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX2-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2 +; AVX2-NEXT: vmaskmovpd %ymm0, %ymm2, (%rdi) +; AVX2-NEXT: vmaskmovpd %ymm1, %ymm3, 32(%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v8f64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vmovupd %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <8 x double>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x double> %x, <8 x double> %load + store <8 x double> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v16i8(<16 x i8> %x, ptr %ptr, <16 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v16i8: +; AVX: # %bb.0: +; AVX-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX-NEXT: vmovdqa (%rdi), %xmm2 +; AVX-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0 +; AVX-NEXT: vmovdqa %xmm0, (%rdi) +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v16i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX2-NEXT: vmovdqa (%rdi), %xmm2 +; AVX2-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v16i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX512-NEXT: vpmovb2m %xmm1, %k1 +; AVX512-NEXT: vmovdqu8 %xmm0, (%rdi) {%k1} +; AVX512-NEXT: retq + %load = load <16 x i8>, ptr %ptr, align 32 + %sel = select <16 x i1> %mask, <16 x i8> %x, <16 x i8> %load + store <16 x i8> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v16i16(<16 x i16> %x, ptr %ptr, <16 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v16i16: +; AVX: # %bb.0: +; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX-NEXT: vpsllw $15, %xmm2, %xmm2 +; AVX-NEXT: vpsraw $15, %xmm2, %xmm2 +; AVX-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX-NEXT: vpsraw $15, %xmm1, %xmm1 +; AVX-NEXT: vmovdqa (%rdi), %xmm3 +; AVX-NEXT: vpblendvb %xmm1, %xmm0, %xmm3, %xmm1 +; AVX-NEXT: vmovdqa 16(%rdi), %xmm3 +; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm3, %xmm0 +; AVX-NEXT: vmovdqa %xmm1, (%rdi) +; AVX-NEXT: vmovdqa %xmm0, 16(%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v16i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX2-NEXT: vpsllw $15, %ymm1, %ymm1 +; AVX2-NEXT: vpsraw $15, %ymm1, %ymm1 +; AVX2-NEXT: vmovdqa (%rdi), %ymm2 +; AVX2-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vmovdqa %ymm0, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v16i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX512-NEXT: vpmovb2m %xmm1, %k1 +; AVX512-NEXT: vmovdqu16 %ymm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <16 x i16>, ptr %ptr, align 32 + %sel = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %load + store <16 x i16> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v16i32(<16 x i32> %x, ptr %ptr, <16 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v16i32: +; AVX: # %bb.0: +; AVX-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero +; AVX-NEXT: vpslld $31, %xmm4, %xmm4 +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpslld $31, %xmm3, %xmm3 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 +; AVX-NEXT: vpmovzxbd {{.*#+}} xmm4 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero +; AVX-NEXT: vpslld $31, %xmm4, %xmm4 +; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,1,1] +; AVX-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero +; AVX-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 +; AVX-NEXT: vmaskmovps %ymm0, %ymm2, (%rdi) +; AVX-NEXT: vmaskmovps %ymm1, %ymm3, 32(%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v16i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero +; AVX2-NEXT: vpslld $31, %ymm3, %ymm3 +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero +; AVX2-NEXT: vpslld $31, %ymm2, %ymm2 +; AVX2-NEXT: vpmaskmovd %ymm0, %ymm2, (%rdi) +; AVX2-NEXT: vpmaskmovd %ymm1, %ymm3, 32(%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v16i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX512-NEXT: vpmovb2m %xmm1, %k1 +; AVX512-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <16 x i32>, ptr %ptr, align 32 + %sel = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %load + store <16 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v32i8(<32 x i8> %x, ptr %ptr, <32 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v32i8: +; AVX: # %bb.0: +; AVX-NEXT: vpsllw $7, %xmm1, %xmm2 +; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1 +; AVX-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX-NEXT: vmovdqa (%rdi), %xmm4 +; AVX-NEXT: vmovdqa 16(%rdi), %xmm5 +; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm5, %xmm1 +; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm4, %xmm0 +; AVX-NEXT: vmovdqa %xmm0, (%rdi) +; AVX-NEXT: vmovdqa %xmm1, 16(%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v32i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllw $7, %ymm1, %ymm1 +; AVX2-NEXT: vmovdqa (%rdi), %ymm2 +; AVX2-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vmovdqa %ymm0, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v32i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $7, %ymm1, %ymm1 +; AVX512-NEXT: vpmovb2m %ymm1, %k1 +; AVX512-NEXT: vmovdqu8 %ymm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <32 x i8>, ptr %ptr, align 32 + %sel = select <32 x i1> %mask, <32 x i8> %x, <32 x i8> %load + store <32 x i8> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v32i16(<32 x i16> %x, ptr %ptr, <32 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v32i16: +; AVX: # %bb.0: +; AVX-NEXT: vmovdqa (%rdi), %xmm3 +; AVX-NEXT: vmovdqa 16(%rdi), %xmm4 +; AVX-NEXT: vmovdqa 32(%rdi), %xmm5 +; AVX-NEXT: vmovdqa 48(%rdi), %xmm6 +; AVX-NEXT: vextractf128 $1, %ymm2, %xmm7 +; AVX-NEXT: vpmovzxbw {{.*#+}} xmm8 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero +; AVX-NEXT: vpsllw $15, %xmm8, %xmm8 +; AVX-NEXT: vpsraw $15, %xmm8, %xmm8 +; AVX-NEXT: vpblendvb %xmm8, %xmm1, %xmm5, %xmm5 +; AVX-NEXT: vpmovzxbw {{.*#+}} xmm8 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero +; AVX-NEXT: vpsllw $15, %xmm8, %xmm8 +; AVX-NEXT: vpsraw $15, %xmm8, %xmm8 +; AVX-NEXT: vpblendvb %xmm8, %xmm0, %xmm3, %xmm3 +; AVX-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX-NEXT: vpsllw $15, %xmm7, %xmm7 +; AVX-NEXT: vpsraw $15, %xmm7, %xmm7 +; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1 +; AVX-NEXT: vpblendvb %xmm7, %xmm1, %xmm6, %xmm1 +; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX-NEXT: vpsllw $15, %xmm2, %xmm2 +; AVX-NEXT: vpsraw $15, %xmm2, %xmm2 +; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm4, %xmm0 +; AVX-NEXT: vmovdqa %xmm3, (%rdi) +; AVX-NEXT: vmovdqa %xmm0, 16(%rdi) +; AVX-NEXT: vmovdqa %xmm5, 32(%rdi) +; AVX-NEXT: vmovdqa %xmm1, 48(%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v32i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa (%rdi), %ymm3 +; AVX2-NEXT: vmovdqa 32(%rdi), %ymm4 +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm5 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero +; AVX2-NEXT: vpsllw $15, %ymm5, %ymm5 +; AVX2-NEXT: vpsraw $15, %ymm5, %ymm5 +; AVX2-NEXT: vpblendvb %ymm5, %ymm1, %ymm4, %ymm1 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero +; AVX2-NEXT: vpsllw $15, %ymm2, %ymm2 +; AVX2-NEXT: vpsraw $15, %ymm2, %ymm2 +; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm3, %ymm0 +; AVX2-NEXT: vmovdqa %ymm0, (%rdi) +; AVX2-NEXT: vmovdqa %ymm1, 32(%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v32i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $7, %ymm1, %ymm1 +; AVX512-NEXT: vpmovb2m %ymm1, %k1 +; AVX512-NEXT: vmovdqu16 %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <32 x i16>, ptr %ptr, align 32 + %sel = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %load + store <32 x i16> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_v64i8(<64 x i8> %x, ptr %ptr, <64 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_v64i8: +; AVX: # %bb.0: +; AVX-NEXT: vmovd %esi, %xmm2 +; AVX-NEXT: vpinsrb $1, %edx, %xmm2, %xmm2 +; AVX-NEXT: vpinsrb $2, %ecx, %xmm2, %xmm2 +; AVX-NEXT: vpinsrb $3, %r8d, %xmm2, %xmm2 +; AVX-NEXT: vpinsrb $4, %r9d, %xmm2, %xmm2 +; AVX-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX-NEXT: vpsllw $7, %xmm2, %xmm2 +; AVX-NEXT: vmovd {{.*#+}} xmm3 = mem[0],zero,zero,zero +; AVX-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX-NEXT: vpsllw $7, %xmm3, %xmm3 +; AVX-NEXT: vmovd {{.*#+}} xmm4 = mem[0],zero,zero,zero +; AVX-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX-NEXT: vpsllw $7, %xmm4, %xmm4 +; AVX-NEXT: vmovd {{.*#+}} xmm5 = mem[0],zero,zero,zero +; AVX-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm5, %xmm5 +; AVX-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm5, %xmm5 +; AVX-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm5, %xmm5 +; AVX-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm5, %xmm5 +; AVX-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm5, %xmm5 +; AVX-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm5, %xmm5 +; AVX-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm5, %xmm5 +; AVX-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm5, %xmm5 +; AVX-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm5, %xmm5 +; AVX-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm5, %xmm5 +; AVX-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm5, %xmm5 +; AVX-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm5, %xmm5 +; AVX-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm5, %xmm5 +; AVX-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm5, %xmm5 +; AVX-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm5, %xmm5 +; AVX-NEXT: vpsllw $7, %xmm5, %xmm5 +; AVX-NEXT: vextractf128 $1, %ymm1, %xmm6 +; AVX-NEXT: vmovdqa (%rdi), %xmm7 +; AVX-NEXT: vmovdqa 16(%rdi), %xmm8 +; AVX-NEXT: vmovdqa 32(%rdi), %xmm9 +; AVX-NEXT: vmovdqa 48(%rdi), %xmm10 +; AVX-NEXT: vpblendvb %xmm5, %xmm6, %xmm10, %xmm5 +; AVX-NEXT: vpblendvb %xmm4, %xmm1, %xmm9, %xmm1 +; AVX-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX-NEXT: vpblendvb %xmm3, %xmm4, %xmm8, %xmm3 +; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm7, %xmm0 +; AVX-NEXT: vmovdqa %xmm3, 16(%rdi) +; AVX-NEXT: vmovdqa %xmm1, 32(%rdi) +; AVX-NEXT: vmovdqa %xmm5, 48(%rdi) +; AVX-NEXT: vmovdqa %xmm0, (%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_v64i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; AVX2-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX2-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX2-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX2-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX2-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX2-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX2-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX2-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX2-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX2-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX2-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX2-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX2-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX2-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX2-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm2, %xmm2 +; AVX2-NEXT: vmovd {{.*#+}} xmm3 = mem[0],zero,zero,zero +; AVX2-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 +; AVX2-NEXT: vpsllw $7, %ymm2, %ymm2 +; AVX2-NEXT: vmovd %esi, %xmm3 +; AVX2-NEXT: vpinsrb $1, %edx, %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $2, %ecx, %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $3, %r8d, %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $4, %r9d, %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm3, %xmm3 +; AVX2-NEXT: vmovd {{.*#+}} xmm4 = mem[0],zero,zero,zero +; AVX2-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX2-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX2-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX2-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX2-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX2-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX2-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX2-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX2-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX2-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX2-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX2-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX2-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX2-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX2-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm4, %xmm4 +; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm3 +; AVX2-NEXT: vpsllw $7, %ymm3, %ymm3 +; AVX2-NEXT: vmovdqa (%rdi), %ymm4 +; AVX2-NEXT: vpblendvb %ymm3, %ymm0, %ymm4, %ymm0 +; AVX2-NEXT: vmovdqa 32(%rdi), %ymm3 +; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm3, %ymm1 +; AVX2-NEXT: vmovdqa %ymm1, 32(%rdi) +; AVX2-NEXT: vmovdqa %ymm0, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_v64i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $7, %zmm1, %zmm1 +; AVX512-NEXT: vpmovb2m %zmm1, %k1 +; AVX512-NEXT: vmovdqu8 %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <64 x i8>, ptr %ptr, align 32 + %sel = select <64 x i1> %mask, <64 x i8> %x, <64 x i8> %load + store <64 x i8> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_invert_mask_v4i32(<4 x i32> %x, ptr %ptr, <4 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_invert_mask_v4i32: +; AVX: # %bb.0: +; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vmaskmovps %xmm0, %xmm1, (%rdi) +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_invert_mask_v4i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmaskmovd %xmm0, %xmm1, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_invert_mask_v4i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vpmovd2m %xmm1, %k0 +; AVX512-NEXT: knotw %k0, %k1 +; AVX512-NEXT: vmovdqa32 %xmm0, (%rdi) {%k1} +; AVX512-NEXT: retq + %load = load <4 x i32>, ptr %ptr, align 32 + %sel = select <4 x i1> %mask, <4 x i32> %load, <4 x i32> %x + store <4 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_invert_mask_v8i32(<8 x i32> %x, ptr %ptr, <8 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_invert_mask_v8i32: +; AVX: # %bb.0: +; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX-NEXT: vmaskmovps %ymm0, %ymm1, (%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_invert_mask_v8i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 +; AVX2-NEXT: vpmaskmovd %ymm0, %ymm1, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_invert_mask_v8i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k0 +; AVX512-NEXT: knotb %k0, %k1 +; AVX512-NEXT: vmovdqa32 %ymm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <8 x i32>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i32> %load, <8 x i32> %x + store <8 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_success_invert_mask_v16i32(<16 x i32> %x, ptr %ptr, <16 x i1> %mask) { +; AVX-LABEL: test_masked_store_success_invert_mask_v16i32: +; AVX: # %bb.0: +; AVX-NEXT: vpmovzxbd {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero +; AVX-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[1,1,1,1] +; AVX-NEXT: vpmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero +; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3 +; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm2[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5 +; AVX-NEXT: vpxor %xmm5, %xmm2, %xmm2 +; AVX-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX-NEXT: vpxor %xmm5, %xmm4, %xmm4 +; AVX-NEXT: vpslld $31, %xmm4, %xmm4 +; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2 +; AVX-NEXT: vmaskmovps %ymm1, %ymm2, 32(%rdi) +; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1 +; AVX-NEXT: vxorps %ymm1, %ymm3, %ymm1 +; AVX-NEXT: vpslld $31, %xmm1, %xmm2 +; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1 +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX-NEXT: vmaskmovps %ymm0, %ymm1, (%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_success_invert_mask_v16i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero +; AVX2-NEXT: vpcmpeqd %ymm4, %ymm4, %ymm4 +; AVX2-NEXT: vpxor %ymm4, %ymm2, %ymm2 +; AVX2-NEXT: vpslld $31, %ymm2, %ymm2 +; AVX2-NEXT: vpmaskmovd %ymm0, %ymm2, (%rdi) +; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm0 +; AVX2-NEXT: vpslld $31, %ymm0, %ymm0 +; AVX2-NEXT: vpmaskmovd %ymm1, %ymm0, 32(%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_success_invert_mask_v16i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX512-NEXT: vpmovb2m %xmm1, %k0 +; AVX512-NEXT: knotw %k0, %k1 +; AVX512-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <16 x i32>, ptr %ptr, align 32 + %sel = select <16 x i1> %mask, <16 x i32> %load, <16 x i32> %x + store <16 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_zextload(<4 x i64> %x, ptr %ptr, <4 x i1> %mask) { +; AVX-LABEL: test_masked_store_zextload: +; AVX: # %bb.0: +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vpmovsxdq %xmm1, %xmm2 +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] +; AVX-NEXT: vpmovsxdq %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero +; AVX-NEXT: vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero +; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 +; AVX-NEXT: vblendvpd %ymm1, %ymm0, %ymm2, %ymm0 +; AVX-NEXT: vmovapd %ymm0, (%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_zextload: +; AVX2: # %bb.0: +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX2-NEXT: vblendvpd %ymm1, %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vmovapd %ymm0, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_zextload: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vpmovd2m %xmm1, %k1 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX512-NEXT: vmovdqa64 %ymm0, %ymm1 {%k1} +; AVX512-NEXT: vmovdqa %ymm1, (%rdi) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <4 x i32>, ptr %ptr, align 32 + %zext = zext <4 x i32> %load to <4 x i64> + %masked = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %zext + store <4 x i64> %masked, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_volatile_load(<8 x i32> %x, ptr %ptr, <8 x i1> %mask) { +; AVX-LABEL: test_masked_store_volatile_load: +; AVX: # %bb.0: +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX-NEXT: vmovaps (%rdi), %ymm2 +; AVX-NEXT: vblendvps %ymm1, %ymm0, %ymm2, %ymm0 +; AVX-NEXT: vmovaps %ymm0, (%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_volatile_load: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 +; AVX2-NEXT: vmovaps (%rdi), %ymm2 +; AVX2-NEXT: vblendvps %ymm1, %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vmovaps %ymm0, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_volatile_load: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vmovdqa (%rdi), %ymm1 +; AVX512-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} +; AVX512-NEXT: vmovdqa %ymm1, (%rdi) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load volatile <8 x i32>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %load + store <8 x i32> %sel, ptr %ptr, align 32 + ret void +} + +define void @test_masked_store_volatile_store(<8 x i32> %x, ptr %ptr, <8 x i1> %mask) { +; AVX-LABEL: test_masked_store_volatile_store: +; AVX: # %bb.0: +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX-NEXT: vmovaps (%rdi), %ymm2 +; AVX-NEXT: vblendvps %ymm1, %ymm0, %ymm2, %ymm0 +; AVX-NEXT: vmovaps %ymm0, (%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_volatile_store: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 +; AVX2-NEXT: vmovaps (%rdi), %ymm2 +; AVX2-NEXT: vblendvps %ymm1, %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vmovaps %ymm0, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_volatile_store: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vmovdqa (%rdi), %ymm1 +; AVX512-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} +; AVX512-NEXT: vmovdqa %ymm1, (%rdi) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <8 x i32>, ptr %ptr, align 32 + %sel = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %load + store volatile <8 x i32> %sel, ptr %ptr, align 32 + ret void +} + +declare void @use_vec(<8 x i32>) + +define void @test_masked_store_intervening(<8 x i32> %x, ptr %ptr, <8 x i1> %mask) nounwind { +; AVX-LABEL: test_masked_store_intervening: +; AVX: # %bb.0: +; AVX-NEXT: pushq %rbx +; AVX-NEXT: subq $32, %rsp +; AVX-NEXT: movq %rdi, %rbx +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX-NEXT: vmovaps (%rdi), %ymm2 +; AVX-NEXT: vblendvps %ymm1, %ymm0, %ymm2, %ymm0 +; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill +; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vmovaps %ymm0, (%rdi) +; AVX-NEXT: callq use_vec@PLT +; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload +; AVX-NEXT: vmovaps %ymm0, (%rbx) +; AVX-NEXT: addq $32, %rsp +; AVX-NEXT: popq %rbx +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_intervening: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: subq $32, %rsp +; AVX2-NEXT: movq %rdi, %rbx +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 +; AVX2-NEXT: vmovaps (%rdi), %ymm2 +; AVX2-NEXT: vblendvps %ymm1, %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill +; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vmovaps %ymm0, (%rdi) +; AVX2-NEXT: callq use_vec@PLT +; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload +; AVX2-NEXT: vmovaps %ymm0, (%rbx) +; AVX2-NEXT: addq $32, %rsp +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_intervening: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: subq $80, %rsp +; AVX512-NEXT: movq %rdi, %rbx +; AVX512-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm0 +; AVX512-NEXT: vpmovw2m %xmm0, %k1 +; AVX512-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill +; AVX512-NEXT: vmovaps (%rdi), %ymm0 +; AVX512-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovaps %ymm0, (%rdi) +; AVX512-NEXT: callq use_vec@PLT +; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVX512-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload +; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload +; AVX512-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} +; AVX512-NEXT: vmovdqa %ymm1, (%rbx) +; AVX512-NEXT: addq $80, %rsp +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <8 x i32>, ptr %ptr, align 32 + store <8 x i32> zeroinitializer, ptr %ptr, align 32 + %tmp = load <8 x i32>, ptr %ptr + call void @use_vec(<8 x i32> %tmp) + %sel = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %load + store <8 x i32> %sel, ptr %ptr, align 32 + ret void +} + + +define void @test_masked_store_multiple_v8i32(<8 x i32> %x, <8 x i32> %y, ptr %ptr1, ptr %ptr2, <8 x i1> %mask, <8 x i1> %mask2) { +; AVX-LABEL: test_masked_store_multiple_v8i32: +; AVX: # %bb.0: +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX-NEXT: vpslld $31, %xmm4, %xmm4 +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero +; AVX-NEXT: vpslld $31, %xmm4, %xmm4 +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpslld $31, %xmm3, %xmm3 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 +; AVX-NEXT: vmovaps (%rsi), %ymm4 +; AVX-NEXT: vblendvps %ymm3, %ymm1, %ymm4, %ymm1 +; AVX-NEXT: vmaskmovps %ymm0, %ymm2, (%rdi) +; AVX-NEXT: vmovaps %ymm1, (%rsi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_multiple_v8i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero +; AVX2-NEXT: vpslld $31, %ymm2, %ymm2 +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero +; AVX2-NEXT: vpslld $31, %ymm3, %ymm3 +; AVX2-NEXT: vmovaps (%rsi), %ymm4 +; AVX2-NEXT: vblendvps %ymm3, %ymm1, %ymm4, %ymm1 +; AVX2-NEXT: vpmaskmovd %ymm0, %ymm2, (%rdi) +; AVX2-NEXT: vmovaps %ymm1, (%rsi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_multiple_v8i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm2, %xmm2 +; AVX512-NEXT: vpmovw2m %xmm2, %k1 +; AVX512-NEXT: vpsllw $15, %xmm3, %xmm2 +; AVX512-NEXT: vmovdqa (%rsi), %ymm3 +; AVX512-NEXT: vpmovw2m %xmm2, %k2 +; AVX512-NEXT: vmovdqa32 %ymm1, %ymm3 {%k2} +; AVX512-NEXT: vmovdqa32 %ymm0, (%rdi) {%k1} +; AVX512-NEXT: vmovdqa %ymm3, (%rsi) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <8 x i32>, ptr %ptr1, align 32 + %load2 = load <8 x i32>, ptr %ptr2, align 32 + %sel = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %load + %sel2 = select <8 x i1> %mask2, <8 x i32> %y, <8 x i32> %load2 + store <8 x i32> %sel, ptr %ptr1, align 32 + store <8 x i32> %sel2, ptr %ptr2, align 32 + ret void +} + +define void @test_masked_store_multiple_v8i64(<8 x i64> %x, <8 x i64> %y, ptr %ptr1, ptr %ptr2, <8 x i1> %mask, <8 x i1> %mask2) { +; AVX-LABEL: test_masked_store_multiple_v8i64: +; AVX: # %bb.0: +; AVX-NEXT: vmovapd (%rsi), %ymm6 +; AVX-NEXT: vmovapd 32(%rsi), %ymm7 +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm4[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpslld $31, %xmm8, %xmm8 +; AVX-NEXT: vpmovsxdq %xmm8, %xmm9 +; AVX-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[2,3,2,3] +; AVX-NEXT: vpmovsxdq %xmm8, %xmm8 +; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm9, %ymm8 +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero +; AVX-NEXT: vpslld $31, %xmm4, %xmm4 +; AVX-NEXT: vpmovsxdq %xmm4, %xmm9 +; AVX-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3] +; AVX-NEXT: vpmovsxdq %xmm4, %xmm4 +; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm9, %ymm4 +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm9 = xmm5[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpslld $31, %xmm9, %xmm9 +; AVX-NEXT: vpmovsxdq %xmm9, %xmm10 +; AVX-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[2,3,2,3] +; AVX-NEXT: vpmovsxdq %xmm9, %xmm9 +; AVX-NEXT: vinsertf128 $1, %xmm9, %ymm10, %ymm9 +; AVX-NEXT: vblendvpd %ymm9, %ymm3, %ymm7, %ymm3 +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero +; AVX-NEXT: vpslld $31, %xmm5, %xmm5 +; AVX-NEXT: vpmovsxdq %xmm5, %xmm7 +; AVX-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3] +; AVX-NEXT: vpmovsxdq %xmm5, %xmm5 +; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5 +; AVX-NEXT: vblendvpd %ymm5, %ymm2, %ymm6, %ymm2 +; AVX-NEXT: vmaskmovpd %ymm0, %ymm4, (%rdi) +; AVX-NEXT: vmaskmovpd %ymm1, %ymm8, 32(%rdi) +; AVX-NEXT: vmovapd %ymm3, 32(%rsi) +; AVX-NEXT: vmovapd %ymm2, (%rsi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_multiple_v8i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovapd (%rsi), %ymm6 +; AVX2-NEXT: vmovapd 32(%rsi), %ymm7 +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm4[4,4,5,5,6,6,7,7] +; AVX2-NEXT: vpslld $31, %xmm8, %xmm8 +; AVX2-NEXT: vpmovsxdq %xmm8, %ymm8 +; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero +; AVX2-NEXT: vpslld $31, %xmm4, %xmm4 +; AVX2-NEXT: vpmovsxdq %xmm4, %ymm4 +; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm9 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero +; AVX2-NEXT: vpslld $31, %xmm9, %xmm9 +; AVX2-NEXT: vpmovsxdq %xmm9, %ymm9 +; AVX2-NEXT: vblendvpd %ymm9, %ymm2, %ymm6, %ymm2 +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm5[4,4,5,5,6,6,7,7] +; AVX2-NEXT: vpslld $31, %xmm5, %xmm5 +; AVX2-NEXT: vpmovsxdq %xmm5, %ymm5 +; AVX2-NEXT: vblendvpd %ymm5, %ymm3, %ymm7, %ymm3 +; AVX2-NEXT: vpmaskmovq %ymm0, %ymm4, (%rdi) +; AVX2-NEXT: vpmaskmovq %ymm1, %ymm8, 32(%rdi) +; AVX2-NEXT: vmovapd %ymm3, 32(%rsi) +; AVX2-NEXT: vmovapd %ymm2, (%rsi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_multiple_v8i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm2, %xmm2 +; AVX512-NEXT: vpmovw2m %xmm2, %k1 +; AVX512-NEXT: vpsllw $15, %xmm3, %xmm2 +; AVX512-NEXT: vmovdqu64 (%rsi), %zmm3 +; AVX512-NEXT: vpmovw2m %xmm2, %k2 +; AVX512-NEXT: vmovdqa64 %zmm1, %zmm3 {%k2} +; AVX512-NEXT: vmovdqu64 %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vmovdqu64 %zmm3, (%rsi) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %load = load <8 x i64>, ptr %ptr1, align 32 + %load2 = load <8 x i64>, ptr %ptr2, align 32 + %sel = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %load + %sel2 = select <8 x i1> %mask2, <8 x i64> %y, <8 x i64> %load2 + store <8 x i64> %sel, ptr %ptr1, align 32 + store <8 x i64> %sel2, ptr %ptr2, align 32 + ret void +} + +define void @test_masked_store_unaligned_v4i32(<4 x i32> %data, ptr %ptr, <4 x i1> %mask) { +; AVX-LABEL: test_masked_store_unaligned_v4i32: +; AVX: # %bb.0: +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vmaskmovps %xmm0, %xmm1, 1(%rdi) +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_unaligned_v4i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmaskmovd %xmm0, %xmm1, 1(%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_unaligned_v4i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vpmovd2m %xmm1, %k1 +; AVX512-NEXT: vmovdqu32 %xmm0, 1(%rdi) {%k1} +; AVX512-NEXT: retq + %ptr_i8 = getelementptr i8, ptr %ptr, i32 1 + %ptr_vec = bitcast ptr %ptr_i8 to ptr + %load = load <4 x i32>, ptr %ptr_vec, align 1 + %sel = select <4 x i1> %mask, <4 x i32> %data, <4 x i32> %load + store <4 x i32> %sel, ptr %ptr_vec, align 1 + ret void +} + +define void @test_masked_store_unaligned_v4i64(<4 x i64> %data, ptr %ptr, <4 x i1> %mask) { +; AVX-LABEL: test_masked_store_unaligned_v4i64: +; AVX: # %bb.0: +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vpmovsxdq %xmm1, %xmm2 +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] +; AVX-NEXT: vpmovsxdq %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX-NEXT: vmaskmovpd %ymm0, %ymm1, 1(%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_unaligned_v4i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: vpmaskmovq %ymm0, %ymm1, 1(%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_unaligned_v4i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vpmovd2m %xmm1, %k1 +; AVX512-NEXT: vmovdqu64 %ymm0, 1(%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %ptr_i8 = getelementptr i8, ptr %ptr, i64 1 + %ptr_vec = bitcast ptr %ptr_i8 to ptr + %load = load <4 x i64>, ptr %ptr_vec, align 1 + %sel = select <4 x i1> %mask, <4 x i64> %data, <4 x i64> %load + store <4 x i64> %sel, ptr %ptr_vec, align 1 + ret void +} + +define void @test_masked_store_unaligned_v8i32(<8 x i32> %data, ptr %ptr, <8 x i1> %mask) { +; AVX-LABEL: test_masked_store_unaligned_v8i32: +; AVX: # %bb.0: +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX-NEXT: vmaskmovps %ymm0, %ymm1, 1(%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_unaligned_v8i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 +; AVX2-NEXT: vpmaskmovd %ymm0, %ymm1, 1(%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_unaligned_v8i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vmovdqu32 %ymm0, 1(%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %ptr_i8 = getelementptr i8, ptr %ptr, i32 1 + %ptr_vec = bitcast ptr %ptr_i8 to ptr + %load = load <8 x i32>, ptr %ptr_vec, align 1 + %sel = select <8 x i1> %mask, <8 x i32> %data, <8 x i32> %load + store <8 x i32> %sel, ptr %ptr_vec, align 1 + ret void +} + +define void @test_masked_store_unaligned_v8i64(<8 x i64> %data, ptr %ptr, <8 x i1> %mask) { +; AVX-LABEL: test_masked_store_unaligned_v8i64: +; AVX: # %bb.0: +; AVX-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm2[4,4,5,5,6,6,7,7] +; AVX-NEXT: vpslld $31, %xmm3, %xmm3 +; AVX-NEXT: vpmovsxdq %xmm3, %xmm4 +; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] +; AVX-NEXT: vpmovsxdq %xmm3, %xmm3 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX-NEXT: vpmovsxdq %xmm2, %xmm4 +; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] +; AVX-NEXT: vpmovsxdq %xmm2, %xmm2 +; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 +; AVX-NEXT: vmaskmovpd %ymm0, %ymm2, 1(%rdi) +; AVX-NEXT: vmaskmovpd %ymm1, %ymm3, 33(%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; AVX2-LABEL: test_masked_store_unaligned_v8i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm2[4,4,5,5,6,6,7,7] +; AVX2-NEXT: vpslld $31, %xmm3, %xmm3 +; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3 +; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX2-NEXT: vpslld $31, %xmm2, %xmm2 +; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2 +; AVX2-NEXT: vpmaskmovq %ymm0, %ymm2, 1(%rdi) +; AVX2-NEXT: vpmaskmovq %ymm1, %ymm3, 33(%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_masked_store_unaligned_v8i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllw $15, %xmm1, %xmm1 +; AVX512-NEXT: vpmovw2m %xmm1, %k1 +; AVX512-NEXT: vmovdqu64 %zmm0, 1(%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %ptr_i8 = getelementptr i8, ptr %ptr, i64 1 + %ptr_vec = bitcast ptr %ptr_i8 to ptr + %load = load <8 x i64>, ptr %ptr_vec, align 1 + %sel = select <8 x i1> %mask, <8 x i64> %data, <8 x i64> %load + store <8 x i64> %sel, ptr %ptr_vec, align 1 + ret void +} diff --git a/llvm/test/CodeGen/X86/exp10-libcall-names.ll b/llvm/test/CodeGen/X86/exp10-libcall-names.ll index 96e3aae..2688474 100644 --- a/llvm/test/CodeGen/X86/exp10-libcall-names.ll +++ b/llvm/test/CodeGen/X86/exp10-libcall-names.ll @@ -13,10 +13,7 @@ ; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefix=GISEL-X86 ; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefix=GISEL-X64 -; RUN: not llc -mtriple=x86_64-apple-macos10.8 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s -; Check exp10/exp10f is emitted as __exp10/__exp10f on assorted systems. - -; ERR: no libcall available for fexp10 +; Check exp10/exp10f is emitted as __exp10/__exp10f on assorted darwin systems. define float @test_exp10_f32(float %x) nounwind { ; LINUX-LABEL: test_exp10_f32: @@ -78,43 +75,3 @@ define double @test_exp10_f64(double %x) nounwind { %ret = call double @llvm.exp10.f64(double %x) ret double %ret } - -define x86_fp80 @test_exp10_f80(x86_fp80 %x) nounwind { -; LINUX-LABEL: test_exp10_f80: -; LINUX: # %bb.0: -; LINUX-NEXT: subq $24, %rsp -; LINUX-NEXT: fldt {{[0-9]+}}(%rsp) -; LINUX-NEXT: fstpt (%rsp) -; LINUX-NEXT: callq exp10l@PLT -; LINUX-NEXT: addq $24, %rsp -; LINUX-NEXT: retq -; -; APPLE-LABEL: test_exp10_f80: -; APPLE: ## %bb.0: -; APPLE-NEXT: subq $24, %rsp -; APPLE-NEXT: fldt {{[0-9]+}}(%rsp) -; APPLE-NEXT: fstpt (%rsp) -; APPLE-NEXT: callq _exp10l -; APPLE-NEXT: addq $24, %rsp -; APPLE-NEXT: retq -; -; GISEL-X86-LABEL: test_exp10_f80: -; GISEL-X86: # %bb.0: -; GISEL-X86-NEXT: subl $12, %esp -; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) -; GISEL-X86-NEXT: fstpt (%esp) -; GISEL-X86-NEXT: calll exp10l -; GISEL-X86-NEXT: addl $12, %esp -; GISEL-X86-NEXT: retl -; -; GISEL-X64-LABEL: test_exp10_f80: -; GISEL-X64: # %bb.0: -; GISEL-X64-NEXT: subq $24, %rsp -; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) -; GISEL-X64-NEXT: fstpt (%rsp) -; GISEL-X64-NEXT: callq exp10l -; GISEL-X64-NEXT: addq $24, %rsp -; GISEL-X64-NEXT: retq - %ret = call x86_fp80 @llvm.exp10.f80(x86_fp80 %x) - ret x86_fp80 %ret -} diff --git a/llvm/test/CodeGen/X86/exp10l-libcall-names.ll b/llvm/test/CodeGen/X86/exp10l-libcall-names.ll new file mode 100644 index 0000000..2e7f9e3 --- /dev/null +++ b/llvm/test/CodeGen/X86/exp10l-libcall-names.ll @@ -0,0 +1,46 @@ +; RUN: llc -mtriple=x86_64-linux-gnu < %s | FileCheck -check-prefix=LINUX %s +; RUN: not llc -mtriple=x86_64-apple-macos10.9 < %s 2>&1 | FileCheck -check-prefix=ERR %s +; RUN: not llc -mtriple=x86_64-apple-ios9.0 < %s 2>&1 | FileCheck -check-prefix=ERR %s +; RUN: not llc -mtriple=x86_64-apple-tvos9.0 < %s 2>&1 | FileCheck -check-prefix=ERR %s +; RUN: not llc -mtriple=x86_64-apple-watchos9.0 < %s 2>&1 | FileCheck -check-prefix=ERR %s +; RUN: not llc -mtriple=x86_64-apple-xros9.0 < %s 2>&1 | FileCheck -check-prefix=ERR %s +; RUN: not llc -mtriple=x86_64-apple-ios8.0 < %s 2>&1 | FileCheck -check-prefix=ERR %s +; RUN: not llc -mtriple=x86_64-apple-tvos8.0 < %s 2>&1 | FileCheck -check-prefix=ERR %s +; RUN: not llc -mtriple=x86_64-apple-xros8.0 < %s 2>&1 | FileCheck -check-prefix=ERR %s +; RUN: not llc -mtriple=x86_64-apple-driverkit < %s 2>&1 | FileCheck -check-prefix=ERR %s +; RUN: not llc -mtriple=x86_64-apple-driverkit24.0 < %s 2>&1 | FileCheck -check-prefix=ERR %s +; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefix=GISEL-X86 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefix=GISEL-X64 + +; ERR: no libcall available for fexp10 + +define x86_fp80 @test_exp10_f80(x86_fp80 %x) nounwind { +; LINUX-LABEL: test_exp10_f80: +; LINUX: # %bb.0: +; LINUX-NEXT: subq $24, %rsp +; LINUX-NEXT: fldt {{[0-9]+}}(%rsp) +; LINUX-NEXT: fstpt (%rsp) +; LINUX-NEXT: callq exp10l@PLT +; LINUX-NEXT: addq $24, %rsp +; LINUX-NEXT: retq +; +; GISEL-X86-LABEL: test_exp10_f80: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: subl $12, %esp +; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp) +; GISEL-X86-NEXT: fstpt (%esp) +; GISEL-X86-NEXT: calll exp10l +; GISEL-X86-NEXT: addl $12, %esp +; GISEL-X86-NEXT: retl +; +; GISEL-X64-LABEL: test_exp10_f80: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: subq $24, %rsp +; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fstpt (%rsp) +; GISEL-X64-NEXT: callq exp10l +; GISEL-X64-NEXT: addq $24, %rsp +; GISEL-X64-NEXT: retq + %ret = call x86_fp80 @llvm.exp10.f80(x86_fp80 %x) + ret x86_fp80 %ret +} diff --git a/llvm/test/CodeGen/X86/fmaddsub-combine.ll b/llvm/test/CodeGen/X86/fmaddsub-combine.ll index 5219ab3..2af219b 100644 --- a/llvm/test/CodeGen/X86/fmaddsub-combine.ll +++ b/llvm/test/CodeGen/X86/fmaddsub-combine.ll @@ -6,7 +6,7 @@ ; This test checks the fusing of MUL + ADDSUB to FMADDSUB. -define <2 x double> @mul_addsub_pd128(<2 x double> %A, <2 x double> %B, <2 x double> %C) #0 { +define <2 x double> @mul_addsub_pd128(<2 x double> %A, <2 x double> %B, <2 x double> %C) { ; NOFMA-LABEL: mul_addsub_pd128: ; NOFMA: # %bb.0: # %entry ; NOFMA-NEXT: vmulpd %xmm1, %xmm0, %xmm0 @@ -23,14 +23,14 @@ define <2 x double> @mul_addsub_pd128(<2 x double> %A, <2 x double> %B, <2 x do ; FMA4-NEXT: vfmaddsubpd {{.*#+}} xmm0 = (xmm0 * xmm1) +/- xmm2 ; FMA4-NEXT: retq entry: - %AB = fmul <2 x double> %A, %B - %Sub = fsub <2 x double> %AB, %C - %Add = fadd <2 x double> %AB, %C + %AB = fmul contract <2 x double> %A, %B + %Sub = fsub contract <2 x double> %AB, %C + %Add = fadd contract <2 x double> %AB, %C %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add, <2 x i32> <i32 0, i32 3> ret <2 x double> %Addsub } -define <4 x float> @mul_addsub_ps128(<4 x float> %A, <4 x float> %B, <4 x float> %C) #0 { +define <4 x float> @mul_addsub_ps128(<4 x float> %A, <4 x float> %B, <4 x float> %C) { ; NOFMA-LABEL: mul_addsub_ps128: ; NOFMA: # %bb.0: # %entry ; NOFMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 @@ -47,14 +47,14 @@ define <4 x float> @mul_addsub_ps128(<4 x float> %A, <4 x float> %B, <4 x float> ; FMA4-NEXT: vfmaddsubps {{.*#+}} xmm0 = (xmm0 * xmm1) +/- xmm2 ; FMA4-NEXT: retq entry: - %AB = fmul <4 x float> %A, %B - %Sub = fsub <4 x float> %AB, %C - %Add = fadd <4 x float> %AB, %C + %AB = fmul contract <4 x float> %A, %B + %Sub = fsub contract <4 x float> %AB, %C + %Add = fadd contract <4 x float> %AB, %C %Addsub = shufflevector <4 x float> %Sub, <4 x float> %Add, <4 x i32> <i32 0, i32 5, i32 2, i32 7> ret <4 x float> %Addsub } -define <4 x double> @mul_addsub_pd256(<4 x double> %A, <4 x double> %B, <4 x double> %C) #0 { +define <4 x double> @mul_addsub_pd256(<4 x double> %A, <4 x double> %B, <4 x double> %C) { ; NOFMA-LABEL: mul_addsub_pd256: ; NOFMA: # %bb.0: # %entry ; NOFMA-NEXT: vmulpd %ymm1, %ymm0, %ymm0 @@ -71,14 +71,14 @@ define <4 x double> @mul_addsub_pd256(<4 x double> %A, <4 x double> %B, <4 x dou ; FMA4-NEXT: vfmaddsubpd {{.*#+}} ymm0 = (ymm0 * ymm1) +/- ymm2 ; FMA4-NEXT: retq entry: - %AB = fmul <4 x double> %A, %B - %Sub = fsub <4 x double> %AB, %C - %Add = fadd <4 x double> %AB, %C + %AB = fmul contract <4 x double> %A, %B + %Sub = fsub contract <4 x double> %AB, %C + %Add = fadd contract <4 x double> %AB, %C %Addsub = shufflevector <4 x double> %Sub, <4 x double> %Add, <4 x i32> <i32 0, i32 5, i32 2, i32 7> ret <4 x double> %Addsub } -define <8 x float> @mul_addsub_ps256(<8 x float> %A, <8 x float> %B, <8 x float> %C) #0 { +define <8 x float> @mul_addsub_ps256(<8 x float> %A, <8 x float> %B, <8 x float> %C) { ; NOFMA-LABEL: mul_addsub_ps256: ; NOFMA: # %bb.0: # %entry ; NOFMA-NEXT: vmulps %ymm1, %ymm0, %ymm0 @@ -95,14 +95,14 @@ define <8 x float> @mul_addsub_ps256(<8 x float> %A, <8 x float> %B, <8 x float> ; FMA4-NEXT: vfmaddsubps {{.*#+}} ymm0 = (ymm0 * ymm1) +/- ymm2 ; FMA4-NEXT: retq entry: - %AB = fmul <8 x float> %A, %B - %Sub = fsub <8 x float> %AB, %C - %Add = fadd <8 x float> %AB, %C + %AB = fmul contract <8 x float> %A, %B + %Sub = fsub contract <8 x float> %AB, %C + %Add = fadd contract <8 x float> %AB, %C %Addsub = shufflevector <8 x float> %Sub, <8 x float> %Add, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15> ret <8 x float> %Addsub } -define <8 x double> @mul_addsub_pd512(<8 x double> %A, <8 x double> %B, <8 x double> %C) #0 { +define <8 x double> @mul_addsub_pd512(<8 x double> %A, <8 x double> %B, <8 x double> %C) { ; NOFMA-LABEL: mul_addsub_pd512: ; NOFMA: # %bb.0: # %entry ; NOFMA-NEXT: vmulpd %ymm3, %ymm1, %ymm1 @@ -128,14 +128,14 @@ define <8 x double> @mul_addsub_pd512(<8 x double> %A, <8 x double> %B, <8 x dou ; FMA4-NEXT: vfmaddsubpd {{.*#+}} ymm1 = (ymm1 * ymm3) +/- ymm5 ; FMA4-NEXT: retq entry: - %AB = fmul <8 x double> %A, %B - %Sub = fsub <8 x double> %AB, %C - %Add = fadd <8 x double> %AB, %C + %AB = fmul contract <8 x double> %A, %B + %Sub = fsub contract <8 x double> %AB, %C + %Add = fadd contract <8 x double> %AB, %C %Addsub = shufflevector <8 x double> %Sub, <8 x double> %Add, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15> ret <8 x double> %Addsub } -define <16 x float> @mul_addsub_ps512(<16 x float> %A, <16 x float> %B, <16 x float> %C) #0 { +define <16 x float> @mul_addsub_ps512(<16 x float> %A, <16 x float> %B, <16 x float> %C) { ; NOFMA-LABEL: mul_addsub_ps512: ; NOFMA: # %bb.0: # %entry ; NOFMA-NEXT: vmulps %ymm3, %ymm1, %ymm1 @@ -161,14 +161,14 @@ define <16 x float> @mul_addsub_ps512(<16 x float> %A, <16 x float> %B, <16 x fl ; FMA4-NEXT: vfmaddsubps {{.*#+}} ymm1 = (ymm1 * ymm3) +/- ymm5 ; FMA4-NEXT: retq entry: - %AB = fmul <16 x float> %A, %B - %Sub = fsub <16 x float> %AB, %C - %Add = fadd <16 x float> %AB, %C + %AB = fmul contract <16 x float> %A, %B + %Sub = fsub contract <16 x float> %AB, %C + %Add = fadd contract <16 x float> %AB, %C %Addsub = shufflevector <16 x float> %Sub, <16 x float> %Add, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31> ret <16 x float> %Addsub } -define <4 x float> @buildvector_mul_addsub_ps128(<4 x float> %C, <4 x float> %D, <4 x float> %B) #0 { +define <4 x float> @buildvector_mul_addsub_ps128(<4 x float> %C, <4 x float> %D, <4 x float> %B) { ; NOFMA-LABEL: buildvector_mul_addsub_ps128: ; NOFMA: # %bb.0: # %bb ; NOFMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 @@ -185,19 +185,19 @@ define <4 x float> @buildvector_mul_addsub_ps128(<4 x float> %C, <4 x float> %D, ; FMA4-NEXT: vfmaddsubps {{.*#+}} xmm0 = (xmm0 * xmm1) +/- xmm2 ; FMA4-NEXT: retq bb: - %A = fmul <4 x float> %C, %D + %A = fmul contract <4 x float> %C, %D %A0 = extractelement <4 x float> %A, i32 0 %B0 = extractelement <4 x float> %B, i32 0 - %sub0 = fsub float %A0, %B0 + %sub0 = fsub contract float %A0, %B0 %A2 = extractelement <4 x float> %A, i32 2 %B2 = extractelement <4 x float> %B, i32 2 - %sub2 = fsub float %A2, %B2 + %sub2 = fsub contract float %A2, %B2 %A1 = extractelement <4 x float> %A, i32 1 %B1 = extractelement <4 x float> %B, i32 1 - %add1 = fadd float %A1, %B1 + %add1 = fadd contract float %A1, %B1 %A3 = extractelement <4 x float> %A, i32 3 %B3 = extractelement <4 x float> %B, i32 3 - %add3 = fadd float %A3, %B3 + %add3 = fadd contract float %A3, %B3 %vecinsert1 = insertelement <4 x float> undef, float %sub0, i32 0 %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add1, i32 1 %vecinsert3 = insertelement <4 x float> %vecinsert2, float %sub2, i32 2 @@ -205,7 +205,7 @@ bb: ret <4 x float> %vecinsert4 } -define <2 x double> @buildvector_mul_addsub_pd128(<2 x double> %C, <2 x double> %D, <2 x double> %B) #0 { +define <2 x double> @buildvector_mul_addsub_pd128(<2 x double> %C, <2 x double> %D, <2 x double> %B) { ; NOFMA-LABEL: buildvector_mul_addsub_pd128: ; NOFMA: # %bb.0: # %bb ; NOFMA-NEXT: vmulpd %xmm1, %xmm0, %xmm0 @@ -222,19 +222,19 @@ define <2 x double> @buildvector_mul_addsub_pd128(<2 x double> %C, <2 x double> ; FMA4-NEXT: vfmaddsubpd {{.*#+}} xmm0 = (xmm0 * xmm1) +/- xmm2 ; FMA4-NEXT: retq bb: - %A = fmul <2 x double> %C, %D + %A = fmul contract <2 x double> %C, %D %A0 = extractelement <2 x double> %A, i32 0 %B0 = extractelement <2 x double> %B, i32 0 - %sub0 = fsub double %A0, %B0 + %sub0 = fsub contract double %A0, %B0 %A1 = extractelement <2 x double> %A, i32 1 %B1 = extractelement <2 x double> %B, i32 1 - %add1 = fadd double %A1, %B1 + %add1 = fadd contract double %A1, %B1 %vecinsert1 = insertelement <2 x double> undef, double %sub0, i32 0 %vecinsert2 = insertelement <2 x double> %vecinsert1, double %add1, i32 1 ret <2 x double> %vecinsert2 } -define <8 x float> @buildvector_mul_addsub_ps256(<8 x float> %C, <8 x float> %D, <8 x float> %B) #0 { +define <8 x float> @buildvector_mul_addsub_ps256(<8 x float> %C, <8 x float> %D, <8 x float> %B) { ; NOFMA-LABEL: buildvector_mul_addsub_ps256: ; NOFMA: # %bb.0: # %bb ; NOFMA-NEXT: vmulps %ymm1, %ymm0, %ymm0 @@ -251,31 +251,31 @@ define <8 x float> @buildvector_mul_addsub_ps256(<8 x float> %C, <8 x float> %D, ; FMA4-NEXT: vfmaddsubps {{.*#+}} ymm0 = (ymm0 * ymm1) +/- ymm2 ; FMA4-NEXT: retq bb: - %A = fmul <8 x float> %C, %D + %A = fmul contract <8 x float> %C, %D %A0 = extractelement <8 x float> %A, i32 0 %B0 = extractelement <8 x float> %B, i32 0 - %sub0 = fsub float %A0, %B0 + %sub0 = fsub contract float %A0, %B0 %A2 = extractelement <8 x float> %A, i32 2 %B2 = extractelement <8 x float> %B, i32 2 - %sub2 = fsub float %A2, %B2 + %sub2 = fsub contract float %A2, %B2 %A4 = extractelement <8 x float> %A, i32 4 %B4 = extractelement <8 x float> %B, i32 4 - %sub4 = fsub float %A4, %B4 + %sub4 = fsub contract float %A4, %B4 %A6 = extractelement <8 x float> %A, i32 6 %B6 = extractelement <8 x float> %B, i32 6 - %sub6 = fsub float %A6, %B6 + %sub6 = fsub contract float %A6, %B6 %A1 = extractelement <8 x float> %A, i32 1 %B1 = extractelement <8 x float> %B, i32 1 - %add1 = fadd float %A1, %B1 + %add1 = fadd contract float %A1, %B1 %A3 = extractelement <8 x float> %A, i32 3 %B3 = extractelement <8 x float> %B, i32 3 - %add3 = fadd float %A3, %B3 + %add3 = fadd contract float %A3, %B3 %A5 = extractelement <8 x float> %A, i32 5 %B5 = extractelement <8 x float> %B, i32 5 - %add5 = fadd float %A5, %B5 + %add5 = fadd contract float %A5, %B5 %A7 = extractelement <8 x float> %A, i32 7 %B7 = extractelement <8 x float> %B, i32 7 - %add7 = fadd float %A7, %B7 + %add7 = fadd contract float %A7, %B7 %vecinsert1 = insertelement <8 x float> undef, float %sub0, i32 0 %vecinsert2 = insertelement <8 x float> %vecinsert1, float %add1, i32 1 %vecinsert3 = insertelement <8 x float> %vecinsert2, float %sub2, i32 2 @@ -287,7 +287,7 @@ bb: ret <8 x float> %vecinsert8 } -define <4 x double> @buildvector_mul_addsub_pd256(<4 x double> %C, <4 x double> %D, <4 x double> %B) #0 { +define <4 x double> @buildvector_mul_addsub_pd256(<4 x double> %C, <4 x double> %D, <4 x double> %B) { ; NOFMA-LABEL: buildvector_mul_addsub_pd256: ; NOFMA: # %bb.0: # %bb ; NOFMA-NEXT: vmulpd %ymm1, %ymm0, %ymm0 @@ -304,19 +304,19 @@ define <4 x double> @buildvector_mul_addsub_pd256(<4 x double> %C, <4 x double> ; FMA4-NEXT: vfmaddsubpd {{.*#+}} ymm0 = (ymm0 * ymm1) +/- ymm2 ; FMA4-NEXT: retq bb: - %A = fmul <4 x double> %C, %D + %A = fmul contract <4 x double> %C, %D %A0 = extractelement <4 x double> %A, i32 0 %B0 = extractelement <4 x double> %B, i32 0 - %sub0 = fsub double %A0, %B0 + %sub0 = fsub contract double %A0, %B0 %A2 = extractelement <4 x double> %A, i32 2 %B2 = extractelement <4 x double> %B, i32 2 - %sub2 = fsub double %A2, %B2 + %sub2 = fsub contract double %A2, %B2 %A1 = extractelement <4 x double> %A, i32 1 %B1 = extractelement <4 x double> %B, i32 1 - %add1 = fadd double %A1, %B1 + %add1 = fadd contract double %A1, %B1 %A3 = extractelement <4 x double> %A, i32 3 %B3 = extractelement <4 x double> %B, i32 3 - %add3 = fadd double %A3, %B3 + %add3 = fadd contract double %A3, %B3 %vecinsert1 = insertelement <4 x double> undef, double %sub0, i32 0 %vecinsert2 = insertelement <4 x double> %vecinsert1, double %add1, i32 1 %vecinsert3 = insertelement <4 x double> %vecinsert2, double %sub2, i32 2 @@ -324,7 +324,7 @@ bb: ret <4 x double> %vecinsert4 } -define <16 x float> @buildvector_mul_addsub_ps512(<16 x float> %C, <16 x float> %D, <16 x float> %B) #0 { +define <16 x float> @buildvector_mul_addsub_ps512(<16 x float> %C, <16 x float> %D, <16 x float> %B) { ; NOFMA-LABEL: buildvector_mul_addsub_ps512: ; NOFMA: # %bb.0: # %bb ; NOFMA-NEXT: vmulps %ymm3, %ymm1, %ymm1 @@ -350,55 +350,55 @@ define <16 x float> @buildvector_mul_addsub_ps512(<16 x float> %C, <16 x float> ; FMA4-NEXT: vfmaddsubps {{.*#+}} ymm1 = (ymm1 * ymm3) +/- ymm5 ; FMA4-NEXT: retq bb: - %A = fmul <16 x float> %C, %D + %A = fmul contract <16 x float> %C, %D %A0 = extractelement <16 x float> %A, i32 0 %B0 = extractelement <16 x float> %B, i32 0 - %sub0 = fsub float %A0, %B0 + %sub0 = fsub contract float %A0, %B0 %A2 = extractelement <16 x float> %A, i32 2 %B2 = extractelement <16 x float> %B, i32 2 - %sub2 = fsub float %A2, %B2 + %sub2 = fsub contract float %A2, %B2 %A4 = extractelement <16 x float> %A, i32 4 %B4 = extractelement <16 x float> %B, i32 4 - %sub4 = fsub float %A4, %B4 + %sub4 = fsub contract float %A4, %B4 %A6 = extractelement <16 x float> %A, i32 6 %B6 = extractelement <16 x float> %B, i32 6 - %sub6 = fsub float %A6, %B6 + %sub6 = fsub contract float %A6, %B6 %A8 = extractelement <16 x float> %A, i32 8 %B8 = extractelement <16 x float> %B, i32 8 - %sub8 = fsub float %A8, %B8 + %sub8 = fsub contract float %A8, %B8 %A10 = extractelement <16 x float> %A, i32 10 %B10 = extractelement <16 x float> %B, i32 10 - %sub10 = fsub float %A10, %B10 + %sub10 = fsub contract float %A10, %B10 %A12 = extractelement <16 x float> %A, i32 12 %B12 = extractelement <16 x float> %B, i32 12 - %sub12 = fsub float %A12, %B12 + %sub12 = fsub contract float %A12, %B12 %A14 = extractelement <16 x float> %A, i32 14 %B14 = extractelement <16 x float> %B, i32 14 - %sub14 = fsub float %A14, %B14 + %sub14 = fsub contract float %A14, %B14 %A1 = extractelement <16 x float> %A, i32 1 %B1 = extractelement <16 x float> %B, i32 1 - %add1 = fadd float %A1, %B1 + %add1 = fadd contract float %A1, %B1 %A3 = extractelement <16 x float> %A, i32 3 %B3 = extractelement <16 x float> %B, i32 3 - %add3 = fadd float %A3, %B3 + %add3 = fadd contract float %A3, %B3 %A5 = extractelement <16 x float> %A, i32 5 %B5 = extractelement <16 x float> %B, i32 5 - %add5 = fadd float %A5, %B5 + %add5 = fadd contract float %A5, %B5 %A7 = extractelement <16 x float> %A, i32 7 %B7 = extractelement <16 x float> %B, i32 7 - %add7 = fadd float %A7, %B7 + %add7 = fadd contract float %A7, %B7 %A9 = extractelement <16 x float> %A, i32 9 %B9 = extractelement <16 x float> %B, i32 9 - %add9 = fadd float %A9, %B9 + %add9 = fadd contract float %A9, %B9 %A11 = extractelement <16 x float> %A, i32 11 %B11 = extractelement <16 x float> %B, i32 11 - %add11 = fadd float %A11, %B11 + %add11 = fadd contract float %A11, %B11 %A13 = extractelement <16 x float> %A, i32 13 %B13 = extractelement <16 x float> %B, i32 13 - %add13 = fadd float %A13, %B13 + %add13 = fadd contract float %A13, %B13 %A15 = extractelement <16 x float> %A, i32 15 %B15 = extractelement <16 x float> %B, i32 15 - %add15 = fadd float %A15, %B15 + %add15 = fadd contract float %A15, %B15 %vecinsert1 = insertelement <16 x float> undef, float %sub0, i32 0 %vecinsert2 = insertelement <16 x float> %vecinsert1, float %add1, i32 1 %vecinsert3 = insertelement <16 x float> %vecinsert2, float %sub2, i32 2 @@ -418,7 +418,7 @@ bb: ret <16 x float> %vecinsert16 } -define <8 x double> @buildvector_mul_addsub_pd512(<8 x double> %C, <8 x double> %D, <8 x double> %B) #0 { +define <8 x double> @buildvector_mul_addsub_pd512(<8 x double> %C, <8 x double> %D, <8 x double> %B) { ; NOFMA-LABEL: buildvector_mul_addsub_pd512: ; NOFMA: # %bb.0: # %bb ; NOFMA-NEXT: vmulpd %ymm3, %ymm1, %ymm1 @@ -444,28 +444,28 @@ define <8 x double> @buildvector_mul_addsub_pd512(<8 x double> %C, <8 x double> ; FMA4-NEXT: vfmaddsubpd {{.*#+}} ymm1 = (ymm1 * ymm3) +/- ymm5 ; FMA4-NEXT: retq bb: - %A = fmul <8 x double> %C, %D + %A = fmul contract <8 x double> %C, %D %A0 = extractelement <8 x double> %A, i32 0 %B0 = extractelement <8 x double> %B, i32 0 - %sub0 = fsub double %A0, %B0 + %sub0 = fsub contract double %A0, %B0 %A2 = extractelement <8 x double> %A, i32 2 %B2 = extractelement <8 x double> %B, i32 2 - %sub2 = fsub double %A2, %B2 + %sub2 = fsub contract double %A2, %B2 %A4 = extractelement <8 x double> %A, i32 4 %B4 = extractelement <8 x double> %B, i32 4 - %sub4 = fsub double %A4, %B4 + %sub4 = fsub contract double %A4, %B4 %A6 = extractelement <8 x double> %A, i32 6 %B6 = extractelement <8 x double> %B, i32 6 - %sub6 = fsub double %A6, %B6 + %sub6 = fsub contract double %A6, %B6 %A1 = extractelement <8 x double> %A, i32 1 %B1 = extractelement <8 x double> %B, i32 1 - %add1 = fadd double %A1, %B1 + %add1 = fadd contract double %A1, %B1 %A3 = extractelement <8 x double> %A, i32 3 %B3 = extractelement <8 x double> %B, i32 3 - %add3 = fadd double %A3, %B3 + %add3 = fadd contract double %A3, %B3 %A7 = extractelement <8 x double> %A, i32 7 %B7 = extractelement <8 x double> %B, i32 7 - %add7 = fadd double %A7, %B7 + %add7 = fadd contract double %A7, %B7 %vecinsert1 = insertelement <8 x double> undef, double %sub0, i32 0 %vecinsert2 = insertelement <8 x double> %vecinsert1, double %add1, i32 1 %vecinsert3 = insertelement <8 x double> %vecinsert2, double %sub2, i32 2 @@ -477,7 +477,7 @@ bb: ret <8 x double> %vecinsert8 } -define <4 x float> @buildvector_mul_subadd_ps128(<4 x float> %C, <4 x float> %D, <4 x float> %B) #0 { +define <4 x float> @buildvector_mul_subadd_ps128(<4 x float> %C, <4 x float> %D, <4 x float> %B) { ; NOFMA-LABEL: buildvector_mul_subadd_ps128: ; NOFMA: # %bb.0: # %bb ; NOFMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 @@ -506,19 +506,19 @@ define <4 x float> @buildvector_mul_subadd_ps128(<4 x float> %C, <4 x float> %D, ; FMA4-NEXT: vfmsubaddps {{.*#+}} xmm0 = (xmm0 * xmm1) -/+ xmm2 ; FMA4-NEXT: retq bb: - %A = fmul <4 x float> %C, %D + %A = fmul contract <4 x float> %C, %D %A0 = extractelement <4 x float> %A, i32 0 %B0 = extractelement <4 x float> %B, i32 0 - %sub0 = fadd float %A0, %B0 + %sub0 = fadd contract float %A0, %B0 %A2 = extractelement <4 x float> %A, i32 2 %B2 = extractelement <4 x float> %B, i32 2 - %sub2 = fadd float %A2, %B2 + %sub2 = fadd contract float %A2, %B2 %A1 = extractelement <4 x float> %A, i32 1 %B1 = extractelement <4 x float> %B, i32 1 - %add1 = fsub float %A1, %B1 + %add1 = fsub contract float %A1, %B1 %A3 = extractelement <4 x float> %A, i32 3 %B3 = extractelement <4 x float> %B, i32 3 - %add3 = fsub float %A3, %B3 + %add3 = fsub contract float %A3, %B3 %vecinsert1 = insertelement <4 x float> undef, float %sub0, i32 0 %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add1, i32 1 %vecinsert3 = insertelement <4 x float> %vecinsert2, float %sub2, i32 2 @@ -526,7 +526,7 @@ bb: ret <4 x float> %vecinsert4 } -define <2 x double> @buildvector_mul_subadd_pd128(<2 x double> %C, <2 x double> %D, <2 x double> %B) #0 { +define <2 x double> @buildvector_mul_subadd_pd128(<2 x double> %C, <2 x double> %D, <2 x double> %B) { ; NOFMA-LABEL: buildvector_mul_subadd_pd128: ; NOFMA: # %bb.0: # %bb ; NOFMA-NEXT: vmulpd %xmm1, %xmm0, %xmm0 @@ -547,19 +547,19 @@ define <2 x double> @buildvector_mul_subadd_pd128(<2 x double> %C, <2 x double> ; FMA4-NEXT: vfmsubaddpd {{.*#+}} xmm0 = (xmm0 * xmm1) -/+ xmm2 ; FMA4-NEXT: retq bb: - %A = fmul <2 x double> %C, %D + %A = fmul contract <2 x double> %C, %D %A0 = extractelement <2 x double> %A, i32 0 %B0 = extractelement <2 x double> %B, i32 0 - %sub0 = fadd double %A0, %B0 + %sub0 = fadd contract double %A0, %B0 %A1 = extractelement <2 x double> %A, i32 1 %B1 = extractelement <2 x double> %B, i32 1 - %add1 = fsub double %A1, %B1 + %add1 = fsub contract double %A1, %B1 %vecinsert1 = insertelement <2 x double> undef, double %sub0, i32 0 %vecinsert2 = insertelement <2 x double> %vecinsert1, double %add1, i32 1 ret <2 x double> %vecinsert2 } -define <8 x float> @buildvector_mul_subadd_ps256(<8 x float> %C, <8 x float> %D, <8 x float> %B) #0 { +define <8 x float> @buildvector_mul_subadd_ps256(<8 x float> %C, <8 x float> %D, <8 x float> %B) { ; NOFMA-LABEL: buildvector_mul_subadd_ps256: ; NOFMA: # %bb.0: # %bb ; NOFMA-NEXT: vmulps %ymm1, %ymm0, %ymm0 @@ -604,31 +604,31 @@ define <8 x float> @buildvector_mul_subadd_ps256(<8 x float> %C, <8 x float> %D, ; FMA4-NEXT: vfmsubaddps {{.*#+}} ymm0 = (ymm0 * ymm1) -/+ ymm2 ; FMA4-NEXT: retq bb: - %A = fmul <8 x float> %C, %D + %A = fmul contract <8 x float> %C, %D %A0 = extractelement <8 x float> %A, i32 0 %B0 = extractelement <8 x float> %B, i32 0 - %sub0 = fadd float %A0, %B0 + %sub0 = fadd contract float %A0, %B0 %A2 = extractelement <8 x float> %A, i32 2 %B2 = extractelement <8 x float> %B, i32 2 - %sub2 = fadd float %A2, %B2 + %sub2 = fadd contract float %A2, %B2 %A4 = extractelement <8 x float> %A, i32 4 %B4 = extractelement <8 x float> %B, i32 4 - %sub4 = fadd float %A4, %B4 + %sub4 = fadd contract float %A4, %B4 %A6 = extractelement <8 x float> %A, i32 6 %B6 = extractelement <8 x float> %B, i32 6 - %sub6 = fadd float %A6, %B6 + %sub6 = fadd contract float %A6, %B6 %A1 = extractelement <8 x float> %A, i32 1 %B1 = extractelement <8 x float> %B, i32 1 - %add1 = fsub float %A1, %B1 + %add1 = fsub contract float %A1, %B1 %A3 = extractelement <8 x float> %A, i32 3 %B3 = extractelement <8 x float> %B, i32 3 - %add3 = fsub float %A3, %B3 + %add3 = fsub contract float %A3, %B3 %A5 = extractelement <8 x float> %A, i32 5 %B5 = extractelement <8 x float> %B, i32 5 - %add5 = fsub float %A5, %B5 + %add5 = fsub contract float %A5, %B5 %A7 = extractelement <8 x float> %A, i32 7 %B7 = extractelement <8 x float> %B, i32 7 - %add7 = fsub float %A7, %B7 + %add7 = fsub contract float %A7, %B7 %vecinsert1 = insertelement <8 x float> undef, float %sub0, i32 0 %vecinsert2 = insertelement <8 x float> %vecinsert1, float %add1, i32 1 %vecinsert3 = insertelement <8 x float> %vecinsert2, float %sub2, i32 2 @@ -640,7 +640,7 @@ bb: ret <8 x float> %vecinsert8 } -define <4 x double> @buildvector_mul_subadd_pd256(<4 x double> %C, <4 x double> %D, <4 x double> %B) #0 { +define <4 x double> @buildvector_mul_subadd_pd256(<4 x double> %C, <4 x double> %D, <4 x double> %B) { ; NOFMA-LABEL: buildvector_mul_subadd_pd256: ; NOFMA: # %bb.0: # %bb ; NOFMA-NEXT: vmulpd %ymm1, %ymm0, %ymm0 @@ -669,19 +669,19 @@ define <4 x double> @buildvector_mul_subadd_pd256(<4 x double> %C, <4 x double> ; FMA4-NEXT: vfmsubaddpd {{.*#+}} ymm0 = (ymm0 * ymm1) -/+ ymm2 ; FMA4-NEXT: retq bb: - %A = fmul <4 x double> %C, %D + %A = fmul contract <4 x double> %C, %D %A0 = extractelement <4 x double> %A, i32 0 %B0 = extractelement <4 x double> %B, i32 0 - %sub0 = fadd double %A0, %B0 + %sub0 = fadd contract double %A0, %B0 %A2 = extractelement <4 x double> %A, i32 2 %B2 = extractelement <4 x double> %B, i32 2 - %sub2 = fadd double %A2, %B2 + %sub2 = fadd contract double %A2, %B2 %A1 = extractelement <4 x double> %A, i32 1 %B1 = extractelement <4 x double> %B, i32 1 - %add1 = fsub double %A1, %B1 + %add1 = fsub contract double %A1, %B1 %A3 = extractelement <4 x double> %A, i32 3 %B3 = extractelement <4 x double> %B, i32 3 - %add3 = fsub double %A3, %B3 + %add3 = fsub contract double %A3, %B3 %vecinsert1 = insertelement <4 x double> undef, double %sub0, i32 0 %vecinsert2 = insertelement <4 x double> %vecinsert1, double %add1, i32 1 %vecinsert3 = insertelement <4 x double> %vecinsert2, double %sub2, i32 2 @@ -689,7 +689,7 @@ bb: ret <4 x double> %vecinsert4 } -define <16 x float> @buildvector_mul_subadd_ps512(<16 x float> %C, <16 x float> %D, <16 x float> %B) #0 { +define <16 x float> @buildvector_mul_subadd_ps512(<16 x float> %C, <16 x float> %D, <16 x float> %B) { ; NOFMA-LABEL: buildvector_mul_subadd_ps512: ; NOFMA: # %bb.0: # %bb ; NOFMA-NEXT: vmulps %ymm3, %ymm1, %ymm1 @@ -765,55 +765,55 @@ define <16 x float> @buildvector_mul_subadd_ps512(<16 x float> %C, <16 x float> ; FMA4-NEXT: vfmsubaddps {{.*#+}} ymm1 = (ymm1 * ymm3) -/+ ymm5 ; FMA4-NEXT: retq bb: - %A = fmul <16 x float> %C, %D + %A = fmul contract <16 x float> %C, %D %A0 = extractelement <16 x float> %A, i32 0 %B0 = extractelement <16 x float> %B, i32 0 - %sub0 = fadd float %A0, %B0 + %sub0 = fadd contract float %A0, %B0 %A2 = extractelement <16 x float> %A, i32 2 %B2 = extractelement <16 x float> %B, i32 2 - %sub2 = fadd float %A2, %B2 + %sub2 = fadd contract float %A2, %B2 %A4 = extractelement <16 x float> %A, i32 4 %B4 = extractelement <16 x float> %B, i32 4 - %sub4 = fadd float %A4, %B4 + %sub4 = fadd contract float %A4, %B4 %A6 = extractelement <16 x float> %A, i32 6 %B6 = extractelement <16 x float> %B, i32 6 - %sub6 = fadd float %A6, %B6 + %sub6 = fadd contract float %A6, %B6 %A8 = extractelement <16 x float> %A, i32 8 %B8 = extractelement <16 x float> %B, i32 8 - %sub8 = fadd float %A8, %B8 + %sub8 = fadd contract float %A8, %B8 %A10 = extractelement <16 x float> %A, i32 10 %B10 = extractelement <16 x float> %B, i32 10 - %sub10 = fadd float %A10, %B10 + %sub10 = fadd contract float %A10, %B10 %A12 = extractelement <16 x float> %A, i32 12 %B12 = extractelement <16 x float> %B, i32 12 - %sub12 = fadd float %A12, %B12 + %sub12 = fadd contract float %A12, %B12 %A14 = extractelement <16 x float> %A, i32 14 %B14 = extractelement <16 x float> %B, i32 14 - %sub14 = fadd float %A14, %B14 + %sub14 = fadd contract float %A14, %B14 %A1 = extractelement <16 x float> %A, i32 1 %B1 = extractelement <16 x float> %B, i32 1 - %add1 = fsub float %A1, %B1 + %add1 = fsub contract float %A1, %B1 %A3 = extractelement <16 x float> %A, i32 3 %B3 = extractelement <16 x float> %B, i32 3 - %add3 = fsub float %A3, %B3 + %add3 = fsub contract float %A3, %B3 %A5 = extractelement <16 x float> %A, i32 5 %B5 = extractelement <16 x float> %B, i32 5 - %add5 = fsub float %A5, %B5 + %add5 = fsub contract float %A5, %B5 %A7 = extractelement <16 x float> %A, i32 7 %B7 = extractelement <16 x float> %B, i32 7 - %add7 = fsub float %A7, %B7 + %add7 = fsub contract float %A7, %B7 %A9 = extractelement <16 x float> %A, i32 9 %B9 = extractelement <16 x float> %B, i32 9 - %add9 = fsub float %A9, %B9 + %add9 = fsub contract float %A9, %B9 %A11 = extractelement <16 x float> %A, i32 11 %B11 = extractelement <16 x float> %B, i32 11 - %add11 = fsub float %A11, %B11 + %add11 = fsub contract float %A11, %B11 %A13 = extractelement <16 x float> %A, i32 13 %B13 = extractelement <16 x float> %B, i32 13 - %add13 = fsub float %A13, %B13 + %add13 = fsub contract float %A13, %B13 %A15 = extractelement <16 x float> %A, i32 15 %B15 = extractelement <16 x float> %B, i32 15 - %add15 = fsub float %A15, %B15 + %add15 = fsub contract float %A15, %B15 %vecinsert1 = insertelement <16 x float> undef, float %sub0, i32 0 %vecinsert2 = insertelement <16 x float> %vecinsert1, float %add1, i32 1 %vecinsert3 = insertelement <16 x float> %vecinsert2, float %sub2, i32 2 @@ -833,7 +833,7 @@ bb: ret <16 x float> %vecinsert16 } -define <8 x double> @buildvector_mul_subadd_pd512(<8 x double> %C, <8 x double> %D, <8 x double> %B) #0 { +define <8 x double> @buildvector_mul_subadd_pd512(<8 x double> %C, <8 x double> %D, <8 x double> %B) { ; NOFMA-LABEL: buildvector_mul_subadd_pd512: ; NOFMA: # %bb.0: # %bb ; NOFMA-NEXT: vmulpd %ymm3, %ymm1, %ymm1 @@ -879,28 +879,28 @@ define <8 x double> @buildvector_mul_subadd_pd512(<8 x double> %C, <8 x double> ; FMA4-NEXT: vfmsubaddpd {{.*#+}} ymm1 = (ymm1 * ymm3) -/+ ymm5 ; FMA4-NEXT: retq bb: - %A = fmul <8 x double> %C, %D + %A = fmul contract <8 x double> %C, %D %A0 = extractelement <8 x double> %A, i32 0 %B0 = extractelement <8 x double> %B, i32 0 - %sub0 = fadd double %A0, %B0 + %sub0 = fadd contract double %A0, %B0 %A2 = extractelement <8 x double> %A, i32 2 %B2 = extractelement <8 x double> %B, i32 2 - %sub2 = fadd double %A2, %B2 + %sub2 = fadd contract double %A2, %B2 %A4 = extractelement <8 x double> %A, i32 4 %B4 = extractelement <8 x double> %B, i32 4 - %sub4 = fadd double %A4, %B4 + %sub4 = fadd contract double %A4, %B4 %A6 = extractelement <8 x double> %A, i32 6 %B6 = extractelement <8 x double> %B, i32 6 - %sub6 = fadd double %A6, %B6 + %sub6 = fadd contract double %A6, %B6 %A1 = extractelement <8 x double> %A, i32 1 %B1 = extractelement <8 x double> %B, i32 1 - %add1 = fsub double %A1, %B1 + %add1 = fsub contract double %A1, %B1 %A3 = extractelement <8 x double> %A, i32 3 %B3 = extractelement <8 x double> %B, i32 3 - %add3 = fsub double %A3, %B3 + %add3 = fsub contract double %A3, %B3 %A7 = extractelement <8 x double> %A, i32 7 %B7 = extractelement <8 x double> %B, i32 7 - %add7 = fsub double %A7, %B7 + %add7 = fsub contract double %A7, %B7 %vecinsert1 = insertelement <8 x double> undef, double %sub0, i32 0 %vecinsert2 = insertelement <8 x double> %vecinsert1, double %add1, i32 1 %vecinsert3 = insertelement <8 x double> %vecinsert2, double %sub2, i32 2 @@ -911,5 +911,3 @@ bb: %vecinsert8 = insertelement <8 x double> %vecinsert7, double %add7, i32 7 ret <8 x double> %vecinsert8 } - -attributes #0 = { nounwind "unsafe-fp-math"="true" } diff --git a/llvm/test/CodeGen/X86/fmsubadd-combine.ll b/llvm/test/CodeGen/X86/fmsubadd-combine.ll index 674a1d5..3f562dd 100644 --- a/llvm/test/CodeGen/X86/fmsubadd-combine.ll +++ b/llvm/test/CodeGen/X86/fmsubadd-combine.ll @@ -6,7 +6,7 @@ ; This test checks the fusing of MUL + SUB/ADD to FMSUBADD. -define <2 x double> @mul_subadd_pd128(<2 x double> %A, <2 x double> %B, <2 x double> %C) #0 { +define <2 x double> @mul_subadd_pd128(<2 x double> %A, <2 x double> %B, <2 x double> %C) { ; NOFMA-LABEL: mul_subadd_pd128: ; NOFMA: # %bb.0: # %entry ; NOFMA-NEXT: vmulpd %xmm1, %xmm0, %xmm0 @@ -25,14 +25,14 @@ define <2 x double> @mul_subadd_pd128(<2 x double> %A, <2 x double> %B, <2 x dou ; FMA4-NEXT: vfmsubaddpd {{.*#+}} xmm0 = (xmm0 * xmm1) -/+ xmm2 ; FMA4-NEXT: retq entry: - %AB = fmul <2 x double> %A, %B - %Sub = fsub <2 x double> %AB, %C - %Add = fadd <2 x double> %AB, %C + %AB = fmul contract<2 x double> %A, %B + %Sub = fsub contract<2 x double> %AB, %C + %Add = fadd contract<2 x double> %AB, %C %subadd = shufflevector <2 x double> %Add, <2 x double> %Sub, <2 x i32> <i32 0, i32 3> ret <2 x double> %subadd } -define <4 x float> @mul_subadd_ps128(<4 x float> %A, <4 x float> %B, <4 x float> %C) #0 { +define <4 x float> @mul_subadd_ps128(<4 x float> %A, <4 x float> %B, <4 x float> %C) { ; NOFMA-LABEL: mul_subadd_ps128: ; NOFMA: # %bb.0: # %entry ; NOFMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 @@ -51,14 +51,14 @@ define <4 x float> @mul_subadd_ps128(<4 x float> %A, <4 x float> %B, <4 x float> ; FMA4-NEXT: vfmsubaddps {{.*#+}} xmm0 = (xmm0 * xmm1) -/+ xmm2 ; FMA4-NEXT: retq entry: - %AB = fmul <4 x float> %A, %B - %Sub = fsub <4 x float> %AB, %C - %Add = fadd <4 x float> %AB, %C + %AB = fmul contract <4 x float> %A, %B + %Sub = fsub contract <4 x float> %AB, %C + %Add = fadd contract <4 x float> %AB, %C %subadd = shufflevector <4 x float> %Add, <4 x float> %Sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7> ret <4 x float> %subadd } -define <4 x double> @mul_subadd_pd256(<4 x double> %A, <4 x double> %B, <4 x double> %C) #0 { +define <4 x double> @mul_subadd_pd256(<4 x double> %A, <4 x double> %B, <4 x double> %C) { ; NOFMA-LABEL: mul_subadd_pd256: ; NOFMA: # %bb.0: # %entry ; NOFMA-NEXT: vmulpd %ymm1, %ymm0, %ymm0 @@ -77,14 +77,14 @@ define <4 x double> @mul_subadd_pd256(<4 x double> %A, <4 x double> %B, <4 x dou ; FMA4-NEXT: vfmsubaddpd {{.*#+}} ymm0 = (ymm0 * ymm1) -/+ ymm2 ; FMA4-NEXT: retq entry: - %AB = fmul <4 x double> %A, %B - %Sub = fsub <4 x double> %AB, %C - %Add = fadd <4 x double> %AB, %C + %AB = fmul contract <4 x double> %A, %B + %Sub = fsub contract <4 x double> %AB, %C + %Add = fadd contract <4 x double> %AB, %C %subadd = shufflevector <4 x double> %Add, <4 x double> %Sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7> ret <4 x double> %subadd } -define <8 x float> @mul_subadd_ps256(<8 x float> %A, <8 x float> %B, <8 x float> %C) #0 { +define <8 x float> @mul_subadd_ps256(<8 x float> %A, <8 x float> %B, <8 x float> %C) { ; NOFMA-LABEL: mul_subadd_ps256: ; NOFMA: # %bb.0: # %entry ; NOFMA-NEXT: vmulps %ymm1, %ymm0, %ymm0 @@ -103,14 +103,14 @@ define <8 x float> @mul_subadd_ps256(<8 x float> %A, <8 x float> %B, <8 x float> ; FMA4-NEXT: vfmsubaddps {{.*#+}} ymm0 = (ymm0 * ymm1) -/+ ymm2 ; FMA4-NEXT: retq entry: - %AB = fmul <8 x float> %A, %B - %Sub = fsub <8 x float> %AB, %C - %Add = fadd <8 x float> %AB, %C + %AB = fmul contract <8 x float> %A, %B + %Sub = fsub contract <8 x float> %AB, %C + %Add = fadd contract <8 x float> %AB, %C %subadd = shufflevector <8 x float> %Add, <8 x float> %Sub, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15> ret <8 x float> %subadd } -define <8 x double> @mul_subadd_pd512(<8 x double> %A, <8 x double> %B, <8 x double> %C) #0 { +define <8 x double> @mul_subadd_pd512(<8 x double> %A, <8 x double> %B, <8 x double> %C) { ; NOFMA-LABEL: mul_subadd_pd512: ; NOFMA: # %bb.0: # %entry ; NOFMA-NEXT: vmulpd %ymm2, %ymm0, %ymm0 @@ -140,14 +140,14 @@ define <8 x double> @mul_subadd_pd512(<8 x double> %A, <8 x double> %B, <8 x dou ; FMA4-NEXT: vfmsubaddpd {{.*#+}} ymm1 = (ymm1 * ymm3) -/+ ymm5 ; FMA4-NEXT: retq entry: - %AB = fmul <8 x double> %A, %B - %Sub = fsub <8 x double> %AB, %C - %Add = fadd <8 x double> %AB, %C + %AB = fmul contract <8 x double> %A, %B + %Sub = fsub contract <8 x double> %AB, %C + %Add = fadd contract <8 x double> %AB, %C %subadd = shufflevector <8 x double> %Add, <8 x double> %Sub, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15> ret <8 x double> %subadd } -define <16 x float> @mul_subadd_ps512(<16 x float> %A, <16 x float> %B, <16 x float> %C) #0 { +define <16 x float> @mul_subadd_ps512(<16 x float> %A, <16 x float> %B, <16 x float> %C) { ; NOFMA-LABEL: mul_subadd_ps512: ; NOFMA: # %bb.0: # %entry ; NOFMA-NEXT: vmulps %ymm2, %ymm0, %ymm0 @@ -177,15 +177,15 @@ define <16 x float> @mul_subadd_ps512(<16 x float> %A, <16 x float> %B, <16 x fl ; FMA4-NEXT: vfmsubaddps {{.*#+}} ymm1 = (ymm1 * ymm3) -/+ ymm5 ; FMA4-NEXT: retq entry: - %AB = fmul <16 x float> %A, %B - %Sub = fsub <16 x float> %AB, %C - %Add = fadd <16 x float> %AB, %C + %AB = fmul contract <16 x float> %A, %B + %Sub = fsub contract <16 x float> %AB, %C + %Add = fadd contract <16 x float> %AB, %C %subadd = shufflevector <16 x float> %Add, <16 x float> %Sub, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31> ret <16 x float> %subadd } ; This should not be matched to fmsubadd because the mul is on the wrong side of the fsub. -define <2 x double> @mul_subadd_bad_commute(<2 x double> %A, <2 x double> %B, <2 x double> %C) #0 { +define <2 x double> @mul_subadd_bad_commute(<2 x double> %A, <2 x double> %B, <2 x double> %C) { ; CHECK-LABEL: mul_subadd_bad_commute: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmulpd %xmm1, %xmm0, %xmm0 @@ -194,11 +194,9 @@ define <2 x double> @mul_subadd_bad_commute(<2 x double> %A, <2 x double> %B, <2 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1] ; CHECK-NEXT: retq entry: - %AB = fmul <2 x double> %A, %B + %AB = fmul contract <2 x double> %A, %B %Sub = fsub <2 x double> %C, %AB %Add = fadd <2 x double> %AB, %C %subadd = shufflevector <2 x double> %Add, <2 x double> %Sub, <2 x i32> <i32 0, i32 3> ret <2 x double> %subadd } - -attributes #0 = { nounwind "unsafe-fp-math"="true" } diff --git a/llvm/test/CodeGen/X86/freeze-binary.ll b/llvm/test/CodeGen/X86/freeze-binary.ll index 189de05..962ffe4 100644 --- a/llvm/test/CodeGen/X86/freeze-binary.ll +++ b/llvm/test/CodeGen/X86/freeze-binary.ll @@ -490,18 +490,19 @@ define i32 @freeze_ashr_exact(i32 %a0) nounwind { define i32 @freeze_ashr_exact_extra_use(i32 %a0, ptr %escape) nounwind { ; X86-LABEL: freeze_ashr_exact_extra_use: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: sarl $3, %eax -; X86-NEXT: movl %eax, (%ecx) +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: sarl $3, %ecx +; X86-NEXT: movl %ecx, (%eax) +; X86-NEXT: movl %ecx, %eax ; X86-NEXT: sarl $6, %eax ; X86-NEXT: retl ; ; X64-LABEL: freeze_ashr_exact_extra_use: ; X64: # %bb.0: +; X64-NEXT: sarl $3, %edi +; X64-NEXT: movl %edi, (%rsi) ; X64-NEXT: movl %edi, %eax -; X64-NEXT: sarl $3, %eax -; X64-NEXT: movl %eax, (%rsi) ; X64-NEXT: sarl $6, %eax ; X64-NEXT: retq %x = ashr exact i32 %a0, 3 @@ -603,18 +604,19 @@ define i32 @freeze_lshr_exact(i32 %a0) nounwind { define i32 @freeze_lshr_exact_extra_use(i32 %a0, ptr %escape) nounwind { ; X86-LABEL: freeze_lshr_exact_extra_use: ; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: shrl $3, %eax -; X86-NEXT: movl %eax, (%ecx) +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: shrl $3, %ecx +; X86-NEXT: movl %ecx, (%eax) +; X86-NEXT: movl %ecx, %eax ; X86-NEXT: shrl $5, %eax ; X86-NEXT: retl ; ; X64-LABEL: freeze_lshr_exact_extra_use: ; X64: # %bb.0: +; X64-NEXT: shrl $3, %edi +; X64-NEXT: movl %edi, (%rsi) ; X64-NEXT: movl %edi, %eax -; X64-NEXT: shrl $3, %eax -; X64-NEXT: movl %eax, (%rsi) ; X64-NEXT: shrl $5, %eax ; X64-NEXT: retq %x = lshr exact i32 %a0, 3 diff --git a/llvm/test/CodeGen/X86/freeze-vector.ll b/llvm/test/CodeGen/X86/freeze-vector.ll index 953a5e7..15b43c4 100644 --- a/llvm/test/CodeGen/X86/freeze-vector.ll +++ b/llvm/test/CodeGen/X86/freeze-vector.ll @@ -600,8 +600,8 @@ define void @freeze_buildvector_extrause(ptr %origin0, ptr %origin1, ptr %origin ; X86-NEXT: vpinsrd $1, (%edi), %xmm0, %xmm0 ; X86-NEXT: vpinsrd $2, (%esi), %xmm0, %xmm0 ; X86-NEXT: vpinsrd $3, (%edx), %xmm0, %xmm0 -; X86-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 -; X86-NEXT: vmovdqa %xmm0, (%ecx) +; X86-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 +; X86-NEXT: vmovdqa %xmm1, (%ecx) ; X86-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-NEXT: vmovdqa %xmm0, (%eax) ; X86-NEXT: popl %esi @@ -616,8 +616,8 @@ define void @freeze_buildvector_extrause(ptr %origin0, ptr %origin1, ptr %origin ; X64-NEXT: vpinsrd $2, (%rdx), %xmm0, %xmm0 ; X64-NEXT: vpinsrd $3, (%rcx), %xmm0, %xmm0 ; X64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15] -; X64-NEXT: vpand %xmm1, %xmm0, %xmm0 -; X64-NEXT: vmovdqa %xmm0, (%r9) +; X64-NEXT: vpand %xmm1, %xmm0, %xmm1 +; X64-NEXT: vmovdqa %xmm1, (%r9) ; X64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [7,7,7,7] ; X64-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-NEXT: vmovdqa %xmm0, (%r8) diff --git a/llvm/test/CodeGen/X86/knownbits-hadd-hsub.ll b/llvm/test/CodeGen/X86/knownbits-hadd-hsub.ll index 6376b4d..f3bb334 100644 --- a/llvm/test/CodeGen/X86/knownbits-hadd-hsub.ll +++ b/llvm/test/CodeGen/X86/knownbits-hadd-hsub.ll @@ -4,7 +4,14 @@ define <4 x i32> @hadd_select_v4i32(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: hadd_select_v4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm2 = [3,3,3,3] +; CHECK-NEXT: vpand %xmm2, %xmm0, %xmm0 +; CHECK-NEXT: vpand %xmm2, %xmm1, %xmm1 +; CHECK-NEXT: vphaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 = [9,9,9,9] +; CHECK-NEXT: vpmaxud %xmm1, %xmm0, %xmm1 +; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1 +; CHECK-NEXT: vpand %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq entry: %and1 = and <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3> @@ -73,7 +80,15 @@ entry: define <4 x i32> @hsub_select_shl_v4i32(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: hsub_select_shl_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm2 = [65535,65535,65535,65535] +; CHECK-NEXT: vpor %xmm2, %xmm0, %xmm0 +; CHECK-NEXT: vpor %xmm2, %xmm1, %xmm1 +; CHECK-NEXT: vphsubd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpslld $16, %xmm0, %xmm1 +; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm2 = [9,9,9,9] +; CHECK-NEXT: vpmaxud %xmm2, %xmm1, %xmm2 +; CHECK-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1 +; CHECK-NEXT: vpand %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq %or1 = or <4 x i32> %x, <i32 65535, i32 65535, i32 65535, i32 65535> %or2 = or <4 x i32> %y, <i32 65535, i32 65535, i32 65535, i32 65535> diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll index 3a4a638..fb2433d 100644 --- a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll +++ b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll @@ -730,36 +730,36 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vmovdqa (%rdi), %xmm2 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm5 +; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm4 ; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0 -; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0 -; AVX1-NEXT: vpsubq %xmm0, %xmm5, %xmm0 +; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpsubq %xmm0, %xmm4, %xmm0 +; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm5 ; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1 -; AVX1-NEXT: vpsubq %xmm1, %xmm4, %xmm1 +; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vpsubq %xmm1, %xmm5, %xmm1 ; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm6 ; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm7 ; AVX1-NEXT: vpsrlq $33, %xmm0, %xmm0 ; AVX1-NEXT: vpmovsxbq {{.*#+}} xmm8 = [1,1] -; AVX1-NEXT: vpor %xmm5, %xmm8, %xmm9 +; AVX1-NEXT: vpor %xmm4, %xmm8, %xmm9 ; AVX1-NEXT: vpmuludq %xmm0, %xmm9, %xmm0 -; AVX1-NEXT: vpsrlq $32, %xmm5, %xmm5 -; AVX1-NEXT: vpmuludq %xmm5, %xmm7, %xmm5 -; AVX1-NEXT: vpaddq %xmm0, %xmm5, %xmm0 +; AVX1-NEXT: vpsrlq $32, %xmm4, %xmm4 +; AVX1-NEXT: vpmuludq %xmm4, %xmm7, %xmm4 +; AVX1-NEXT: vpaddq %xmm0, %xmm4, %xmm0 ; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0 -; AVX1-NEXT: vpmuludq %xmm7, %xmm9, %xmm5 +; AVX1-NEXT: vpmuludq %xmm7, %xmm9, %xmm4 ; AVX1-NEXT: vpsrlq $33, %xmm1, %xmm1 -; AVX1-NEXT: vpor %xmm4, %xmm8, %xmm7 +; AVX1-NEXT: vpor %xmm5, %xmm8, %xmm7 ; AVX1-NEXT: vpmuludq %xmm7, %xmm1, %xmm1 -; AVX1-NEXT: vpsrlq $32, %xmm4, %xmm4 -; AVX1-NEXT: vpmuludq %xmm4, %xmm6, %xmm4 -; AVX1-NEXT: vpaddq %xmm1, %xmm4, %xmm1 +; AVX1-NEXT: vpsrlq $32, %xmm5, %xmm5 +; AVX1-NEXT: vpmuludq %xmm5, %xmm6, %xmm5 +; AVX1-NEXT: vpaddq %xmm1, %xmm5, %xmm1 ; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1 -; AVX1-NEXT: vpmuludq %xmm7, %xmm6, %xmm4 -; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpmuludq %xmm7, %xmm6, %xmm5 +; AVX1-NEXT: vpaddq %xmm3, %xmm5, %xmm3 ; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2 +; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2 ; AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -767,20 +767,20 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin ; AVX2-LABEL: vec256_i64_signed_mem_reg: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm1 -; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2 -; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1] -; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm3 +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,1,1,1] +; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm3 +; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vpxor %ymm3, %ymm0, %ymm0 +; AVX2-NEXT: vpsubq %ymm0, %ymm3, %ymm0 ; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm4 ; AVX2-NEXT: vpsrlq $33, %ymm0, %ymm0 -; AVX2-NEXT: vpmuludq %ymm3, %ymm0, %ymm0 -; AVX2-NEXT: vpsrlq $32, %ymm2, %ymm2 -; AVX2-NEXT: vpmuludq %ymm2, %ymm4, %ymm2 -; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vpmuludq %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpsrlq $32, %ymm3, %ymm3 +; AVX2-NEXT: vpmuludq %ymm3, %ymm4, %ymm3 +; AVX2-NEXT: vpaddq %ymm0, %ymm3, %ymm0 ; AVX2-NEXT: vpsllq $32, %ymm0, %ymm0 -; AVX2-NEXT: vpmuludq %ymm3, %ymm4, %ymm2 +; AVX2-NEXT: vpmuludq %ymm2, %ymm4, %ymm2 ; AVX2-NEXT: vpaddq %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: retq @@ -790,36 +790,36 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1 ; XOP-NEXT: vmovdqa (%rdi), %xmm2 ; XOP-NEXT: vmovdqa 16(%rdi), %xmm3 -; XOP-NEXT: vpcomgtq %xmm1, %xmm3, %xmm4 -; XOP-NEXT: vpcomgtq %xmm0, %xmm2, %xmm5 +; XOP-NEXT: vpcomgtq %xmm0, %xmm2, %xmm4 ; XOP-NEXT: vpsubq %xmm0, %xmm2, %xmm0 -; XOP-NEXT: vpxor %xmm5, %xmm0, %xmm0 -; XOP-NEXT: vpsubq %xmm0, %xmm5, %xmm0 +; XOP-NEXT: vpxor %xmm4, %xmm0, %xmm0 +; XOP-NEXT: vpsubq %xmm0, %xmm4, %xmm0 +; XOP-NEXT: vpcomgtq %xmm1, %xmm3, %xmm5 ; XOP-NEXT: vpsubq %xmm1, %xmm3, %xmm1 -; XOP-NEXT: vpxor %xmm4, %xmm1, %xmm1 -; XOP-NEXT: vpsubq %xmm1, %xmm4, %xmm1 +; XOP-NEXT: vpxor %xmm5, %xmm1, %xmm1 +; XOP-NEXT: vpsubq %xmm1, %xmm5, %xmm1 ; XOP-NEXT: vpsrlq $1, %xmm1, %xmm6 ; XOP-NEXT: vpsrlq $1, %xmm0, %xmm7 ; XOP-NEXT: vpsrlq $33, %xmm0, %xmm0 ; XOP-NEXT: vpmovsxbq {{.*#+}} xmm8 = [1,1] -; XOP-NEXT: vpor %xmm5, %xmm8, %xmm9 +; XOP-NEXT: vpor %xmm4, %xmm8, %xmm9 ; XOP-NEXT: vpmuludq %xmm0, %xmm9, %xmm0 -; XOP-NEXT: vpsrlq $32, %xmm5, %xmm5 -; XOP-NEXT: vpmuludq %xmm5, %xmm7, %xmm5 -; XOP-NEXT: vpaddq %xmm0, %xmm5, %xmm0 +; XOP-NEXT: vpsrlq $32, %xmm4, %xmm4 +; XOP-NEXT: vpmuludq %xmm4, %xmm7, %xmm4 +; XOP-NEXT: vpaddq %xmm0, %xmm4, %xmm0 ; XOP-NEXT: vpsllq $32, %xmm0, %xmm0 -; XOP-NEXT: vpmuludq %xmm7, %xmm9, %xmm5 +; XOP-NEXT: vpmuludq %xmm7, %xmm9, %xmm4 ; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1 -; XOP-NEXT: vpor %xmm4, %xmm8, %xmm7 +; XOP-NEXT: vpor %xmm5, %xmm8, %xmm7 ; XOP-NEXT: vpmuludq %xmm7, %xmm1, %xmm1 -; XOP-NEXT: vpsrlq $32, %xmm4, %xmm4 -; XOP-NEXT: vpmuludq %xmm4, %xmm6, %xmm4 -; XOP-NEXT: vpaddq %xmm1, %xmm4, %xmm1 +; XOP-NEXT: vpsrlq $32, %xmm5, %xmm5 +; XOP-NEXT: vpmuludq %xmm5, %xmm6, %xmm5 +; XOP-NEXT: vpaddq %xmm1, %xmm5, %xmm1 ; XOP-NEXT: vpsllq $32, %xmm1, %xmm1 -; XOP-NEXT: vpmuludq %xmm7, %xmm6, %xmm4 -; XOP-NEXT: vpaddq %xmm3, %xmm4, %xmm3 +; XOP-NEXT: vpmuludq %xmm7, %xmm6, %xmm5 +; XOP-NEXT: vpaddq %xmm3, %xmm5, %xmm3 ; XOP-NEXT: vpaddq %xmm1, %xmm3, %xmm1 -; XOP-NEXT: vpaddq %xmm2, %xmm5, %xmm2 +; XOP-NEXT: vpaddq %xmm2, %xmm4, %xmm2 ; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; XOP-NEXT: retq @@ -897,101 +897,101 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwind { ; AVX1-LABEL: vec256_i64_signed_reg_mem: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa (%rdi), %xmm1 -; AVX1-NEXT: vmovdqa 16(%rdi), %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm5 -; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm1 -; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1 -; AVX1-NEXT: vpsubq %xmm1, %xmm5, %xmm1 -; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vmovdqa (%rdi), %xmm2 +; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3 +; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm4 +; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm2 ; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2 -; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm6 -; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm7 -; AVX1-NEXT: vpsrlq $33, %xmm1, %xmm1 -; AVX1-NEXT: vpmovsxbq {{.*#+}} xmm8 = [1,1] -; AVX1-NEXT: vpor %xmm5, %xmm8, %xmm9 -; AVX1-NEXT: vpmuludq %xmm1, %xmm9, %xmm1 -; AVX1-NEXT: vpsrlq $32, %xmm5, %xmm5 -; AVX1-NEXT: vpmuludq %xmm5, %xmm7, %xmm5 -; AVX1-NEXT: vpaddq %xmm1, %xmm5, %xmm1 -; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1 -; AVX1-NEXT: vpmuludq %xmm7, %xmm9, %xmm5 +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm5 +; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm3 +; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpsubq %xmm3, %xmm5, %xmm3 +; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm6 +; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm7 ; AVX1-NEXT: vpsrlq $33, %xmm2, %xmm2 -; AVX1-NEXT: vpor %xmm4, %xmm8, %xmm7 -; AVX1-NEXT: vpmuludq %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpmovsxbq {{.*#+}} xmm8 = [1,1] +; AVX1-NEXT: vpor %xmm4, %xmm8, %xmm9 +; AVX1-NEXT: vpmuludq %xmm2, %xmm9, %xmm2 ; AVX1-NEXT: vpsrlq $32, %xmm4, %xmm4 -; AVX1-NEXT: vpmuludq %xmm4, %xmm6, %xmm4 +; AVX1-NEXT: vpmuludq %xmm4, %xmm7, %xmm4 ; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2 ; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2 -; AVX1-NEXT: vpmuludq %xmm7, %xmm6, %xmm4 -; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm3 -; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpaddq %xmm0, %xmm5, %xmm0 -; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vpmuludq %xmm7, %xmm9, %xmm4 +; AVX1-NEXT: vpsrlq $33, %xmm3, %xmm3 +; AVX1-NEXT: vpor %xmm5, %xmm8, %xmm7 +; AVX1-NEXT: vpmuludq %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpsrlq $32, %xmm5, %xmm5 +; AVX1-NEXT: vpmuludq %xmm5, %xmm6, %xmm5 +; AVX1-NEXT: vpaddq %xmm3, %xmm5, %xmm3 +; AVX1-NEXT: vpsllq $32, %xmm3, %xmm3 +; AVX1-NEXT: vpmuludq %xmm7, %xmm6, %xmm5 +; AVX1-NEXT: vpaddq %xmm1, %xmm5, %xmm1 +; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpaddq %xmm0, %xmm4, %xmm0 +; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: vec256_i64_signed_reg_mem: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm1 -; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2 -; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1] -; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm3 +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,1,1,1] +; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm3 +; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm1 -; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpsubq %ymm1, %ymm3, %ymm1 ; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm4 ; AVX2-NEXT: vpsrlq $33, %ymm1, %ymm1 -; AVX2-NEXT: vpmuludq %ymm3, %ymm1, %ymm1 -; AVX2-NEXT: vpsrlq $32, %ymm2, %ymm2 -; AVX2-NEXT: vpmuludq %ymm2, %ymm4, %ymm2 -; AVX2-NEXT: vpaddq %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vpmuludq %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpsrlq $32, %ymm3, %ymm3 +; AVX2-NEXT: vpmuludq %ymm3, %ymm4, %ymm3 +; AVX2-NEXT: vpaddq %ymm1, %ymm3, %ymm1 ; AVX2-NEXT: vpsllq $32, %ymm1, %ymm1 -; AVX2-NEXT: vpmuludq %ymm3, %ymm4, %ymm2 +; AVX2-NEXT: vpmuludq %ymm2, %ymm4, %ymm2 ; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; XOP-LABEL: vec256_i64_signed_reg_mem: ; XOP: # %bb.0: -; XOP-NEXT: vmovdqa (%rdi), %xmm1 -; XOP-NEXT: vmovdqa 16(%rdi), %xmm2 -; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3 -; XOP-NEXT: vpcomgtq %xmm2, %xmm3, %xmm4 -; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm5 -; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm1 -; XOP-NEXT: vpxor %xmm5, %xmm1, %xmm1 -; XOP-NEXT: vpsubq %xmm1, %xmm5, %xmm1 -; XOP-NEXT: vpsubq %xmm2, %xmm3, %xmm2 +; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1 +; XOP-NEXT: vmovdqa (%rdi), %xmm2 +; XOP-NEXT: vmovdqa 16(%rdi), %xmm3 +; XOP-NEXT: vpcomgtq %xmm2, %xmm0, %xmm4 +; XOP-NEXT: vpsubq %xmm2, %xmm0, %xmm2 ; XOP-NEXT: vpxor %xmm4, %xmm2, %xmm2 ; XOP-NEXT: vpsubq %xmm2, %xmm4, %xmm2 -; XOP-NEXT: vpsrlq $1, %xmm2, %xmm6 -; XOP-NEXT: vpsrlq $1, %xmm1, %xmm7 -; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1 -; XOP-NEXT: vpmovsxbq {{.*#+}} xmm8 = [1,1] -; XOP-NEXT: vpor %xmm5, %xmm8, %xmm9 -; XOP-NEXT: vpmuludq %xmm1, %xmm9, %xmm1 -; XOP-NEXT: vpsrlq $32, %xmm5, %xmm5 -; XOP-NEXT: vpmuludq %xmm5, %xmm7, %xmm5 -; XOP-NEXT: vpaddq %xmm1, %xmm5, %xmm1 -; XOP-NEXT: vpsllq $32, %xmm1, %xmm1 -; XOP-NEXT: vpmuludq %xmm7, %xmm9, %xmm5 +; XOP-NEXT: vpcomgtq %xmm3, %xmm1, %xmm5 +; XOP-NEXT: vpsubq %xmm3, %xmm1, %xmm3 +; XOP-NEXT: vpxor %xmm5, %xmm3, %xmm3 +; XOP-NEXT: vpsubq %xmm3, %xmm5, %xmm3 +; XOP-NEXT: vpsrlq $1, %xmm3, %xmm6 +; XOP-NEXT: vpsrlq $1, %xmm2, %xmm7 ; XOP-NEXT: vpsrlq $33, %xmm2, %xmm2 -; XOP-NEXT: vpor %xmm4, %xmm8, %xmm7 -; XOP-NEXT: vpmuludq %xmm7, %xmm2, %xmm2 +; XOP-NEXT: vpmovsxbq {{.*#+}} xmm8 = [1,1] +; XOP-NEXT: vpor %xmm4, %xmm8, %xmm9 +; XOP-NEXT: vpmuludq %xmm2, %xmm9, %xmm2 ; XOP-NEXT: vpsrlq $32, %xmm4, %xmm4 -; XOP-NEXT: vpmuludq %xmm4, %xmm6, %xmm4 +; XOP-NEXT: vpmuludq %xmm4, %xmm7, %xmm4 ; XOP-NEXT: vpaddq %xmm2, %xmm4, %xmm2 ; XOP-NEXT: vpsllq $32, %xmm2, %xmm2 -; XOP-NEXT: vpmuludq %xmm7, %xmm6, %xmm4 -; XOP-NEXT: vpaddq %xmm3, %xmm4, %xmm3 -; XOP-NEXT: vpaddq %xmm2, %xmm3, %xmm2 -; XOP-NEXT: vpaddq %xmm0, %xmm5, %xmm0 -; XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0 -; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; XOP-NEXT: vpmuludq %xmm7, %xmm9, %xmm4 +; XOP-NEXT: vpsrlq $33, %xmm3, %xmm3 +; XOP-NEXT: vpor %xmm5, %xmm8, %xmm7 +; XOP-NEXT: vpmuludq %xmm7, %xmm3, %xmm3 +; XOP-NEXT: vpsrlq $32, %xmm5, %xmm5 +; XOP-NEXT: vpmuludq %xmm5, %xmm6, %xmm5 +; XOP-NEXT: vpaddq %xmm3, %xmm5, %xmm3 +; XOP-NEXT: vpsllq $32, %xmm3, %xmm3 +; XOP-NEXT: vpmuludq %xmm7, %xmm6, %xmm5 +; XOP-NEXT: vpaddq %xmm1, %xmm5, %xmm1 +; XOP-NEXT: vpaddq %xmm3, %xmm1, %xmm1 +; XOP-NEXT: vpaddq %xmm0, %xmm4, %xmm0 +; XOP-NEXT: vpaddq %xmm2, %xmm0, %xmm0 +; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX512F-LABEL: vec256_i64_signed_reg_mem: @@ -1071,36 +1071,36 @@ define <4 x i64> @vec256_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind ; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1 ; AVX1-NEXT: vmovdqa (%rdi), %xmm2 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm5 +; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm4 ; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0 -; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0 -; AVX1-NEXT: vpsubq %xmm0, %xmm5, %xmm0 +; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpsubq %xmm0, %xmm4, %xmm0 +; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm5 ; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1 -; AVX1-NEXT: vpsubq %xmm1, %xmm4, %xmm1 +; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vpsubq %xmm1, %xmm5, %xmm1 ; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm6 ; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm7 ; AVX1-NEXT: vpsrlq $33, %xmm0, %xmm0 ; AVX1-NEXT: vpmovsxbq {{.*#+}} xmm8 = [1,1] -; AVX1-NEXT: vpor %xmm5, %xmm8, %xmm9 +; AVX1-NEXT: vpor %xmm4, %xmm8, %xmm9 ; AVX1-NEXT: vpmuludq %xmm0, %xmm9, %xmm0 -; AVX1-NEXT: vpsrlq $32, %xmm5, %xmm5 -; AVX1-NEXT: vpmuludq %xmm5, %xmm7, %xmm5 -; AVX1-NEXT: vpaddq %xmm0, %xmm5, %xmm0 +; AVX1-NEXT: vpsrlq $32, %xmm4, %xmm4 +; AVX1-NEXT: vpmuludq %xmm4, %xmm7, %xmm4 +; AVX1-NEXT: vpaddq %xmm0, %xmm4, %xmm0 ; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0 -; AVX1-NEXT: vpmuludq %xmm7, %xmm9, %xmm5 +; AVX1-NEXT: vpmuludq %xmm7, %xmm9, %xmm4 ; AVX1-NEXT: vpsrlq $33, %xmm1, %xmm1 -; AVX1-NEXT: vpor %xmm4, %xmm8, %xmm7 +; AVX1-NEXT: vpor %xmm5, %xmm8, %xmm7 ; AVX1-NEXT: vpmuludq %xmm7, %xmm1, %xmm1 -; AVX1-NEXT: vpsrlq $32, %xmm4, %xmm4 -; AVX1-NEXT: vpmuludq %xmm4, %xmm6, %xmm4 -; AVX1-NEXT: vpaddq %xmm1, %xmm4, %xmm1 +; AVX1-NEXT: vpsrlq $32, %xmm5, %xmm5 +; AVX1-NEXT: vpmuludq %xmm5, %xmm6, %xmm5 +; AVX1-NEXT: vpaddq %xmm1, %xmm5, %xmm1 ; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1 -; AVX1-NEXT: vpmuludq %xmm7, %xmm6, %xmm4 -; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpmuludq %xmm7, %xmm6, %xmm5 +; AVX1-NEXT: vpaddq %xmm3, %xmm5, %xmm3 ; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2 +; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2 ; AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -1109,20 +1109,20 @@ define <4 x i64> @vec256_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-NEXT: vmovdqa (%rsi), %ymm1 -; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2 -; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1] -; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm3 +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,1,1,1] +; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm3 +; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm1 -; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpsubq %ymm1, %ymm3, %ymm1 ; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm4 ; AVX2-NEXT: vpsrlq $33, %ymm1, %ymm1 -; AVX2-NEXT: vpmuludq %ymm3, %ymm1, %ymm1 -; AVX2-NEXT: vpsrlq $32, %ymm2, %ymm2 -; AVX2-NEXT: vpmuludq %ymm2, %ymm4, %ymm2 -; AVX2-NEXT: vpaddq %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vpmuludq %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpsrlq $32, %ymm3, %ymm3 +; AVX2-NEXT: vpmuludq %ymm3, %ymm4, %ymm3 +; AVX2-NEXT: vpaddq %ymm1, %ymm3, %ymm1 ; AVX2-NEXT: vpsllq $32, %ymm1, %ymm1 -; AVX2-NEXT: vpmuludq %ymm3, %ymm4, %ymm2 +; AVX2-NEXT: vpmuludq %ymm2, %ymm4, %ymm2 ; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq @@ -1133,36 +1133,36 @@ define <4 x i64> @vec256_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind ; XOP-NEXT: vmovdqa 16(%rsi), %xmm1 ; XOP-NEXT: vmovdqa (%rdi), %xmm2 ; XOP-NEXT: vmovdqa 16(%rdi), %xmm3 -; XOP-NEXT: vpcomgtq %xmm1, %xmm3, %xmm4 -; XOP-NEXT: vpcomgtq %xmm0, %xmm2, %xmm5 +; XOP-NEXT: vpcomgtq %xmm0, %xmm2, %xmm4 ; XOP-NEXT: vpsubq %xmm0, %xmm2, %xmm0 -; XOP-NEXT: vpxor %xmm5, %xmm0, %xmm0 -; XOP-NEXT: vpsubq %xmm0, %xmm5, %xmm0 +; XOP-NEXT: vpxor %xmm4, %xmm0, %xmm0 +; XOP-NEXT: vpsubq %xmm0, %xmm4, %xmm0 +; XOP-NEXT: vpcomgtq %xmm1, %xmm3, %xmm5 ; XOP-NEXT: vpsubq %xmm1, %xmm3, %xmm1 -; XOP-NEXT: vpxor %xmm4, %xmm1, %xmm1 -; XOP-NEXT: vpsubq %xmm1, %xmm4, %xmm1 +; XOP-NEXT: vpxor %xmm5, %xmm1, %xmm1 +; XOP-NEXT: vpsubq %xmm1, %xmm5, %xmm1 ; XOP-NEXT: vpsrlq $1, %xmm1, %xmm6 ; XOP-NEXT: vpsrlq $1, %xmm0, %xmm7 ; XOP-NEXT: vpsrlq $33, %xmm0, %xmm0 ; XOP-NEXT: vpmovsxbq {{.*#+}} xmm8 = [1,1] -; XOP-NEXT: vpor %xmm5, %xmm8, %xmm9 +; XOP-NEXT: vpor %xmm4, %xmm8, %xmm9 ; XOP-NEXT: vpmuludq %xmm0, %xmm9, %xmm0 -; XOP-NEXT: vpsrlq $32, %xmm5, %xmm5 -; XOP-NEXT: vpmuludq %xmm5, %xmm7, %xmm5 -; XOP-NEXT: vpaddq %xmm0, %xmm5, %xmm0 +; XOP-NEXT: vpsrlq $32, %xmm4, %xmm4 +; XOP-NEXT: vpmuludq %xmm4, %xmm7, %xmm4 +; XOP-NEXT: vpaddq %xmm0, %xmm4, %xmm0 ; XOP-NEXT: vpsllq $32, %xmm0, %xmm0 -; XOP-NEXT: vpmuludq %xmm7, %xmm9, %xmm5 +; XOP-NEXT: vpmuludq %xmm7, %xmm9, %xmm4 ; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1 -; XOP-NEXT: vpor %xmm4, %xmm8, %xmm7 +; XOP-NEXT: vpor %xmm5, %xmm8, %xmm7 ; XOP-NEXT: vpmuludq %xmm7, %xmm1, %xmm1 -; XOP-NEXT: vpsrlq $32, %xmm4, %xmm4 -; XOP-NEXT: vpmuludq %xmm4, %xmm6, %xmm4 -; XOP-NEXT: vpaddq %xmm1, %xmm4, %xmm1 +; XOP-NEXT: vpsrlq $32, %xmm5, %xmm5 +; XOP-NEXT: vpmuludq %xmm5, %xmm6, %xmm5 +; XOP-NEXT: vpaddq %xmm1, %xmm5, %xmm1 ; XOP-NEXT: vpsllq $32, %xmm1, %xmm1 -; XOP-NEXT: vpmuludq %xmm7, %xmm6, %xmm4 -; XOP-NEXT: vpaddq %xmm3, %xmm4, %xmm3 +; XOP-NEXT: vpmuludq %xmm7, %xmm6, %xmm5 +; XOP-NEXT: vpaddq %xmm3, %xmm5, %xmm3 ; XOP-NEXT: vpaddq %xmm1, %xmm3, %xmm1 -; XOP-NEXT: vpaddq %xmm2, %xmm5, %xmm2 +; XOP-NEXT: vpaddq %xmm2, %xmm4, %xmm2 ; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; XOP-NEXT: retq @@ -1627,27 +1627,27 @@ define <16 x i16> @vec256_i16_signed_mem_reg(ptr %a1_addr, <16 x i16> %a2) nounw define <16 x i16> @vec256_i16_signed_reg_mem(<16 x i16> %a1, ptr %a2_addr) nounwind { ; AVX1-LABEL: vec256_i16_signed_reg_mem: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa (%rdi), %xmm1 -; AVX1-NEXT: vmovdqa 16(%rdi), %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm4 -; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm5 -; AVX1-NEXT: vpminsw %xmm1, %xmm0, %xmm6 -; AVX1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1 -; AVX1-NEXT: vpsubw %xmm6, %xmm1, %xmm1 -; AVX1-NEXT: vpminsw %xmm2, %xmm3, %xmm6 -; AVX1-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vmovdqa (%rdi), %xmm2 +; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3 +; AVX1-NEXT: vpcmpgtw %xmm3, %xmm1, %xmm4 +; AVX1-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm5 +; AVX1-NEXT: vpminsw %xmm2, %xmm0, %xmm6 +; AVX1-NEXT: vpmaxsw %xmm2, %xmm0, %xmm2 ; AVX1-NEXT: vpsubw %xmm6, %xmm2, %xmm2 +; AVX1-NEXT: vpminsw %xmm3, %xmm1, %xmm6 +; AVX1-NEXT: vpmaxsw %xmm3, %xmm1, %xmm3 +; AVX1-NEXT: vpsubw %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3 ; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2 -; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1] ; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5 -; AVX1-NEXT: vpmullw %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vpmullw %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpor %xmm6, %xmm4, %xmm4 -; AVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpaddw %xmm0, %xmm1, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpaddw %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpaddw %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: vec256_i16_signed_reg_mem: @@ -1665,25 +1665,25 @@ define <16 x i16> @vec256_i16_signed_reg_mem(<16 x i16> %a1, ptr %a2_addr) nounw ; ; XOP-LABEL: vec256_i16_signed_reg_mem: ; XOP: # %bb.0: -; XOP-NEXT: vmovdqa (%rdi), %xmm1 -; XOP-NEXT: vmovdqa 16(%rdi), %xmm2 -; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3 -; XOP-NEXT: vpcomgtw %xmm2, %xmm3, %xmm4 -; XOP-NEXT: vpcomgtw %xmm1, %xmm0, %xmm5 -; XOP-NEXT: vpminsw %xmm2, %xmm3, %xmm6 -; XOP-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2 +; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1 +; XOP-NEXT: vmovdqa (%rdi), %xmm2 +; XOP-NEXT: vmovdqa 16(%rdi), %xmm3 +; XOP-NEXT: vpcomgtw %xmm3, %xmm1, %xmm4 +; XOP-NEXT: vpcomgtw %xmm2, %xmm0, %xmm5 +; XOP-NEXT: vpminsw %xmm3, %xmm1, %xmm6 +; XOP-NEXT: vpmaxsw %xmm3, %xmm1, %xmm3 +; XOP-NEXT: vpsubw %xmm6, %xmm3, %xmm3 +; XOP-NEXT: vpminsw %xmm2, %xmm0, %xmm6 +; XOP-NEXT: vpmaxsw %xmm2, %xmm0, %xmm2 ; XOP-NEXT: vpsubw %xmm6, %xmm2, %xmm2 -; XOP-NEXT: vpminsw %xmm1, %xmm0, %xmm6 -; XOP-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1 -; XOP-NEXT: vpsubw %xmm6, %xmm1, %xmm1 -; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1 ; XOP-NEXT: vpsrlw $1, %xmm2, %xmm2 +; XOP-NEXT: vpsrlw $1, %xmm3, %xmm3 ; XOP-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1] ; XOP-NEXT: vpor %xmm6, %xmm5, %xmm5 ; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4 -; XOP-NEXT: vpmacsww %xmm3, %xmm4, %xmm2, %xmm2 -; XOP-NEXT: vpmacsww %xmm0, %xmm5, %xmm1, %xmm0 -; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; XOP-NEXT: vpmacsww %xmm1, %xmm4, %xmm3, %xmm1 +; XOP-NEXT: vpmacsww %xmm0, %xmm5, %xmm2, %xmm0 +; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX512F-LABEL: vec256_i16_signed_reg_mem: @@ -2425,9 +2425,9 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind { ; AVX1-LABEL: vec256_i8_signed_reg_mem: ; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vmovdqa (%rdi), %xmm2 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm4 ; AVX1-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm5 ; AVX1-NEXT: vpminsb %xmm2, %xmm0, %xmm6 @@ -2487,38 +2487,38 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind ; ; XOP-LABEL: vec256_i8_signed_reg_mem: ; XOP: # %bb.0: -; XOP-NEXT: vmovdqa (%rdi), %xmm1 -; XOP-NEXT: vmovdqa 16(%rdi), %xmm2 -; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3 -; XOP-NEXT: vpcomgtb %xmm2, %xmm3, %xmm4 -; XOP-NEXT: vpcomgtb %xmm1, %xmm0, %xmm5 -; XOP-NEXT: vpminsb %xmm1, %xmm0, %xmm6 -; XOP-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1 -; XOP-NEXT: vpsubb %xmm6, %xmm1, %xmm1 -; XOP-NEXT: vpminsb %xmm2, %xmm3, %xmm6 -; XOP-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2 +; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1 +; XOP-NEXT: vmovdqa (%rdi), %xmm2 +; XOP-NEXT: vmovdqa 16(%rdi), %xmm3 +; XOP-NEXT: vpcomgtb %xmm3, %xmm1, %xmm4 +; XOP-NEXT: vpcomgtb %xmm2, %xmm0, %xmm5 +; XOP-NEXT: vpminsb %xmm2, %xmm0, %xmm6 +; XOP-NEXT: vpmaxsb %xmm2, %xmm0, %xmm2 ; XOP-NEXT: vpsubb %xmm6, %xmm2, %xmm2 +; XOP-NEXT: vpminsb %xmm3, %xmm1, %xmm6 +; XOP-NEXT: vpmaxsb %xmm3, %xmm1, %xmm3 +; XOP-NEXT: vpsubb %xmm6, %xmm3, %xmm3 ; XOP-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6 +; XOP-NEXT: vpshlb %xmm6, %xmm3, %xmm3 ; XOP-NEXT: vpshlb %xmm6, %xmm2, %xmm2 -; XOP-NEXT: vpshlb %xmm6, %xmm1, %xmm1 ; XOP-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] ; XOP-NEXT: vpor %xmm6, %xmm5, %xmm5 ; XOP-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] ; XOP-NEXT: vpandn %xmm5, %xmm7, %xmm8 -; XOP-NEXT: vpmaddubsw %xmm8, %xmm1, %xmm8 +; XOP-NEXT: vpmaddubsw %xmm8, %xmm2, %xmm8 ; XOP-NEXT: vpand %xmm7, %xmm5, %xmm5 -; XOP-NEXT: vpmaddubsw %xmm5, %xmm1, %xmm1 +; XOP-NEXT: vpmaddubsw %xmm5, %xmm2, %xmm2 ; XOP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,16,2,18,4,20,6,22,8,24,10,26,12,28,14,30] -; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm1, %xmm1 +; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm2, %xmm2 ; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4 ; XOP-NEXT: vpandn %xmm4, %xmm7, %xmm6 -; XOP-NEXT: vpmaddubsw %xmm6, %xmm2, %xmm6 +; XOP-NEXT: vpmaddubsw %xmm6, %xmm3, %xmm6 ; XOP-NEXT: vpand %xmm7, %xmm4, %xmm4 -; XOP-NEXT: vpmaddubsw %xmm4, %xmm2, %xmm2 -; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm2, %xmm2 -; XOP-NEXT: vpaddb %xmm3, %xmm2, %xmm2 -; XOP-NEXT: vpaddb %xmm0, %xmm1, %xmm0 -; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; XOP-NEXT: vpmaddubsw %xmm4, %xmm3, %xmm3 +; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm3, %xmm3 +; XOP-NEXT: vpaddb %xmm1, %xmm3, %xmm1 +; XOP-NEXT: vpaddb %xmm0, %xmm2, %xmm0 +; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX512F-LABEL: vec256_i8_signed_reg_mem: diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll index 5f6337e2..a4750b4 100644 --- a/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll +++ b/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll @@ -507,58 +507,58 @@ define <32 x i16> @vec512_i16_signed_mem_reg(ptr %a1_addr, <32 x i16> %a2) nounw define <32 x i16> @vec512_i16_signed_reg_mem(<32 x i16> %a1, ptr %a2_addr) nounwind { ; AVX512F-LABEL: vec512_i16_signed_reg_mem: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vmovdqa (%rdi), %ymm1 -; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm2 -; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3 -; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4 -; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5 +; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512F-NEXT: vmovdqa (%rdi), %ymm2 +; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3 +; AVX512F-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm4 +; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 -; AVX512F-NEXT: vpminsw %ymm2, %ymm3, %ymm5 -; AVX512F-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpminsw %ymm3, %ymm1, %ymm5 +; AVX512F-NEXT: vpmaxsw %ymm3, %ymm1, %ymm3 +; AVX512F-NEXT: vpsubw %ymm5, %ymm3, %ymm3 +; AVX512F-NEXT: vpminsw %ymm2, %ymm0, %ymm5 +; AVX512F-NEXT: vpmaxsw %ymm2, %ymm0, %ymm2 ; AVX512F-NEXT: vpsubw %ymm5, %ymm2, %ymm2 -; AVX512F-NEXT: vpminsw %ymm1, %ymm0, %ymm5 -; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1 -; AVX512F-NEXT: vpsubw %ymm5, %ymm1, %ymm1 -; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2 -; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm5 +; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3 +; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm5 ; AVX512F-NEXT: vpxor %xmm6, %xmm6, %xmm6 +; AVX512F-NEXT: vpsubw %ymm3, %ymm6, %ymm3 ; AVX512F-NEXT: vpsubw %ymm2, %ymm6, %ymm2 -; AVX512F-NEXT: vpsubw %ymm1, %ymm6, %ymm1 -; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 -; AVX512F-NEXT: vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5)) -; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 -; AVX512F-NEXT: vpaddw %ymm3, %ymm2, %ymm2 -; AVX512F-NEXT: vpaddw %ymm0, %ymm1, %ymm0 -; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 +; AVX512F-NEXT: vpternlogq {{.*#+}} zmm2 = zmm5 ^ (zmm4 & (zmm2 ^ zmm5)) +; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm3 +; AVX512F-NEXT: vpaddw %ymm1, %ymm3, %ymm1 +; AVX512F-NEXT: vpaddw %ymm0, %ymm2, %ymm0 +; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_reg_mem: ; AVX512VL-FALLBACK: # %bb.0: -; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm1 -; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm2 -; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3 -; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4 -; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5 +; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2 +; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3 +; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm4 +; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 -; AVX512VL-FALLBACK-NEXT: vpminsw %ymm2, %ymm3, %ymm5 -; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2 +; AVX512VL-FALLBACK-NEXT: vpminsw %ymm3, %ymm1, %ymm5 +; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm3, %ymm1, %ymm3 +; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm3, %ymm3 +; AVX512VL-FALLBACK-NEXT: vpminsw %ymm2, %ymm0, %ymm5 +; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm2, %ymm0, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm2, %ymm2 -; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm5 -; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1 -; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm1, %ymm1 -; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2 -; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm5 +; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm3, %ymm3 +; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm5 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm6, %xmm6, %xmm6 +; AVX512VL-FALLBACK-NEXT: vpsubw %ymm3, %ymm6, %ymm3 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm6, %ymm2 -; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm6, %ymm1 -; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 -; AVX512VL-FALLBACK-NEXT: vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5)) -; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 -; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm2, %ymm2 -; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 -; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 +; AVX512VL-FALLBACK-NEXT: vpternlogq {{.*#+}} zmm2 = zmm5 ^ (zmm4 & (zmm2 ^ zmm5)) +; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm3 +; AVX512VL-FALLBACK-NEXT: vpaddw %ymm1, %ymm3, %ymm1 +; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 +; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512VL-FALLBACK-NEXT: retq ; ; AVX512BW-LABEL: vec512_i16_signed_reg_mem: @@ -939,66 +939,66 @@ define <64 x i8> @vec512_i8_signed_mem_reg(ptr %a1_addr, <64 x i8> %a2) nounwind define <64 x i8> @vec512_i8_signed_reg_mem(<64 x i8> %a1, ptr %a2_addr) nounwind { ; AVX512F-LABEL: vec512_i8_signed_reg_mem: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vmovdqa (%rdi), %ymm1 -; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm2 -; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3 -; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm4 -; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5 +; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512F-NEXT: vmovdqa (%rdi), %ymm2 +; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3 +; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4 +; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 -; AVX512F-NEXT: vpminsb %ymm2, %ymm3, %ymm5 -; AVX512F-NEXT: vpmaxsb %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpminsb %ymm3, %ymm1, %ymm5 +; AVX512F-NEXT: vpmaxsb %ymm3, %ymm1, %ymm3 +; AVX512F-NEXT: vpsubb %ymm5, %ymm3, %ymm3 +; AVX512F-NEXT: vpminsb %ymm2, %ymm0, %ymm5 +; AVX512F-NEXT: vpmaxsb %ymm2, %ymm0, %ymm2 ; AVX512F-NEXT: vpsubb %ymm5, %ymm2, %ymm2 -; AVX512F-NEXT: vpminsb %ymm1, %ymm0, %ymm5 -; AVX512F-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1 -; AVX512F-NEXT: vpsubb %ymm5, %ymm1, %ymm1 -; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2 -; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm5 +; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3 +; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm5 ; AVX512F-NEXT: vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] ; AVX512F-NEXT: vpandq %zmm6, %zmm5, %zmm5 -; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2 +; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3 ; AVX512F-NEXT: vpxor %xmm7, %xmm7, %xmm7 +; AVX512F-NEXT: vpsubb %ymm3, %ymm7, %ymm3 +; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2 ; AVX512F-NEXT: vpsubb %ymm2, %ymm7, %ymm2 -; AVX512F-NEXT: vpand %ymm6, %ymm1, %ymm1 -; AVX512F-NEXT: vpsubb %ymm1, %ymm7, %ymm1 -; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 -; AVX512F-NEXT: vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5)) -; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 -; AVX512F-NEXT: vpaddb %ymm3, %ymm2, %ymm2 -; AVX512F-NEXT: vpaddb %ymm0, %ymm1, %ymm0 -; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 +; AVX512F-NEXT: vpternlogq {{.*#+}} zmm2 = zmm5 ^ (zmm4 & (zmm2 ^ zmm5)) +; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm3 +; AVX512F-NEXT: vpaddb %ymm1, %ymm3, %ymm1 +; AVX512F-NEXT: vpaddb %ymm0, %ymm2, %ymm0 +; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_reg_mem: ; AVX512VL-FALLBACK: # %bb.0: -; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm1 -; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm2 -; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3 -; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm4 -; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5 +; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2 +; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3 +; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4 +; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 -; AVX512VL-FALLBACK-NEXT: vpminsb %ymm2, %ymm3, %ymm5 -; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm2, %ymm3, %ymm2 +; AVX512VL-FALLBACK-NEXT: vpminsb %ymm3, %ymm1, %ymm5 +; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm3, %ymm1, %ymm3 +; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm3, %ymm3 +; AVX512VL-FALLBACK-NEXT: vpminsb %ymm2, %ymm0, %ymm5 +; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm2, %ymm0, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm2, %ymm2 -; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm0, %ymm5 -; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1 -; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm1, %ymm1 -; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2 -; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm5 +; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm3, %ymm3 +; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm5 ; AVX512VL-FALLBACK-NEXT: vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] ; AVX512VL-FALLBACK-NEXT: vpandq %zmm6, %zmm5, %zmm5 -; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm2, %ymm2 +; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm3, %ymm3 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm7, %xmm7, %xmm7 +; AVX512VL-FALLBACK-NEXT: vpsubb %ymm3, %ymm7, %ymm3 +; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm2, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm7, %ymm2 -; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm1, %ymm1 -; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm7, %ymm1 -; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 -; AVX512VL-FALLBACK-NEXT: vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5)) -; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 -; AVX512VL-FALLBACK-NEXT: vpaddb %ymm3, %ymm2, %ymm2 -; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm1, %ymm0 -; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 +; AVX512VL-FALLBACK-NEXT: vpternlogq {{.*#+}} zmm2 = zmm5 ^ (zmm4 & (zmm2 ^ zmm5)) +; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm3 +; AVX512VL-FALLBACK-NEXT: vpaddb %ymm1, %ymm3, %ymm1 +; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm2, %ymm0 +; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512VL-FALLBACK-NEXT: retq ; ; AVX512BW-LABEL: vec512_i8_signed_reg_mem: diff --git a/llvm/test/CodeGen/X86/midpoint-int.ll b/llvm/test/CodeGen/X86/midpoint-int.ll index 1921cf38..a75d42e 100644 --- a/llvm/test/CodeGen/X86/midpoint-int.ll +++ b/llvm/test/CodeGen/X86/midpoint-int.ll @@ -28,24 +28,27 @@ define i32 @scalar_i32_signed_reg_reg(i32 %a1, i32 %a2) nounwind { ; ; X86-LABEL: scalar_i32_signed_reg_reg: ; X86: # %bb.0: +; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: xorl %eax, %eax -; X86-NEXT: cmpl %esi, %ecx -; X86-NEXT: setle %al -; X86-NEXT: leal -1(%eax,%eax), %edx +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl %ecx, %edx +; X86-NEXT: subl %esi, %edx +; X86-NEXT: xorl %ebx, %ebx ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: subl %esi, %eax +; X86-NEXT: setle %bl +; X86-NEXT: leal -1(%ebx,%ebx), %esi ; X86-NEXT: jg .LBB0_2 ; X86-NEXT: # %bb.1: -; X86-NEXT: subl %ecx, %esi -; X86-NEXT: movl %esi, %eax +; X86-NEXT: negl %edx +; X86-NEXT: movl %edx, %eax ; X86-NEXT: .LBB0_2: ; X86-NEXT: shrl %eax -; X86-NEXT: imull %edx, %eax +; X86-NEXT: imull %esi, %eax ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi +; X86-NEXT: popl %ebx ; X86-NEXT: retl %t3 = icmp sgt i32 %a1, %a2 ; signed %t4 = select i1 %t3, i32 -1, i32 1 @@ -76,26 +79,27 @@ define i32 @scalar_i32_unsigned_reg_reg(i32 %a1, i32 %a2) nounwind { ; ; X86-LABEL: scalar_i32_unsigned_reg_reg: ; X86: # %bb.0: -; X86-NEXT: pushl %edi +; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-NEXT: xorl %eax, %eax -; X86-NEXT: movl %edi, %esi -; X86-NEXT: subl %ecx, %esi -; X86-NEXT: setae %al -; X86-NEXT: leal -1(%eax,%eax), %edx +; X86-NEXT: movl %ecx, %edx +; X86-NEXT: subl %esi, %edx +; X86-NEXT: xorl %ebx, %ebx ; X86-NEXT: movl %ecx, %eax -; X86-NEXT: subl %edi, %eax +; X86-NEXT: subl %esi, %eax +; X86-NEXT: setbe %bl +; X86-NEXT: leal -1(%ebx,%ebx), %esi ; X86-NEXT: ja .LBB1_2 ; X86-NEXT: # %bb.1: -; X86-NEXT: movl %esi, %eax +; X86-NEXT: negl %edx +; X86-NEXT: movl %edx, %eax ; X86-NEXT: .LBB1_2: ; X86-NEXT: shrl %eax -; X86-NEXT: imull %edx, %eax +; X86-NEXT: imull %esi, %eax ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi -; X86-NEXT: popl %edi +; X86-NEXT: popl %ebx ; X86-NEXT: retl %t3 = icmp ugt i32 %a1, %a2 %t4 = select i1 %t3, i32 -1, i32 1 @@ -128,25 +132,28 @@ define i32 @scalar_i32_signed_mem_reg(ptr %a1_addr, i32 %a2) nounwind { ; ; X86-LABEL: scalar_i32_signed_mem_reg: ; X86: # %bb.0: +; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl (%eax), %ecx -; X86-NEXT: xorl %eax, %eax -; X86-NEXT: cmpl %edx, %ecx -; X86-NEXT: setle %al -; X86-NEXT: leal -1(%eax,%eax), %esi +; X86-NEXT: movl %ecx, %edx +; X86-NEXT: subl %esi, %edx +; X86-NEXT: xorl %ebx, %ebx ; X86-NEXT: movl %ecx, %eax -; X86-NEXT: subl %edx, %eax +; X86-NEXT: subl %esi, %eax +; X86-NEXT: setle %bl +; X86-NEXT: leal -1(%ebx,%ebx), %esi ; X86-NEXT: jg .LBB2_2 ; X86-NEXT: # %bb.1: -; X86-NEXT: subl %ecx, %edx +; X86-NEXT: negl %edx ; X86-NEXT: movl %edx, %eax ; X86-NEXT: .LBB2_2: ; X86-NEXT: shrl %eax ; X86-NEXT: imull %esi, %eax ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi +; X86-NEXT: popl %ebx ; X86-NEXT: retl %a1 = load i32, ptr %a1_addr %t3 = icmp sgt i32 %a1, %a2 ; signed @@ -178,25 +185,28 @@ define i32 @scalar_i32_signed_reg_mem(i32 %a1, ptr %a2_addr) nounwind { ; ; X86-LABEL: scalar_i32_signed_reg_mem: ; X86: # %bb.0: +; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl (%eax), %esi -; X86-NEXT: xorl %eax, %eax -; X86-NEXT: cmpl %esi, %ecx -; X86-NEXT: setle %al -; X86-NEXT: leal -1(%eax,%eax), %edx +; X86-NEXT: movl %ecx, %edx +; X86-NEXT: subl %esi, %edx +; X86-NEXT: xorl %ebx, %ebx ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: subl %esi, %eax +; X86-NEXT: setle %bl +; X86-NEXT: leal -1(%ebx,%ebx), %esi ; X86-NEXT: jg .LBB3_2 ; X86-NEXT: # %bb.1: -; X86-NEXT: subl %ecx, %esi -; X86-NEXT: movl %esi, %eax +; X86-NEXT: negl %edx +; X86-NEXT: movl %edx, %eax ; X86-NEXT: .LBB3_2: ; X86-NEXT: shrl %eax -; X86-NEXT: imull %edx, %eax +; X86-NEXT: imull %esi, %eax ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi +; X86-NEXT: popl %ebx ; X86-NEXT: retl %a2 = load i32, ptr %a2_addr %t3 = icmp sgt i32 %a1, %a2 ; signed @@ -229,26 +239,29 @@ define i32 @scalar_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { ; ; X86-LABEL: scalar_i32_signed_mem_mem: ; X86: # %bb.0: +; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl (%ecx), %ecx ; X86-NEXT: movl (%eax), %esi -; X86-NEXT: xorl %eax, %eax -; X86-NEXT: cmpl %esi, %ecx -; X86-NEXT: setle %al -; X86-NEXT: leal -1(%eax,%eax), %edx +; X86-NEXT: movl %ecx, %edx +; X86-NEXT: subl %esi, %edx +; X86-NEXT: xorl %ebx, %ebx ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: subl %esi, %eax +; X86-NEXT: setle %bl +; X86-NEXT: leal -1(%ebx,%ebx), %esi ; X86-NEXT: jg .LBB4_2 ; X86-NEXT: # %bb.1: -; X86-NEXT: subl %ecx, %esi -; X86-NEXT: movl %esi, %eax +; X86-NEXT: negl %edx +; X86-NEXT: movl %edx, %eax ; X86-NEXT: .LBB4_2: ; X86-NEXT: shrl %eax -; X86-NEXT: imull %edx, %eax +; X86-NEXT: imull %esi, %eax ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi +; X86-NEXT: popl %ebx ; X86-NEXT: retl %a1 = load i32, ptr %a1_addr %a2 = load i32, ptr %a2_addr @@ -291,36 +304,34 @@ define i64 @scalar_i64_signed_reg_reg(i64 %a1, i64 %a2) nounwind { ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: cmpl %esi, %edx -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %eax -; X86-NEXT: setl %al -; X86-NEXT: movzbl %al, %edi -; X86-NEXT: negl %edi -; X86-NEXT: movl %edi, %ebx -; X86-NEXT: orl $1, %ebx +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl %esi, %eax ; X86-NEXT: subl %edx, %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp -; X86-NEXT: sbbl %ecx, %ebp +; X86-NEXT: movl %ecx, %edi +; X86-NEXT: sbbl %ebp, %edi ; X86-NEXT: subl %esi, %edx -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: sbbl %ecx, %ebp +; X86-NEXT: setl %bl +; X86-NEXT: movzbl %bl, %ebx ; X86-NEXT: jl .LBB5_2 ; X86-NEXT: # %bb.1: ; X86-NEXT: movl %edx, %eax -; X86-NEXT: movl %ecx, %ebp +; X86-NEXT: movl %ebp, %edi ; X86-NEXT: .LBB5_2: -; X86-NEXT: shrdl $1, %ebp, %eax -; X86-NEXT: shrl %ebp -; X86-NEXT: imull %eax, %edi -; X86-NEXT: mull %ebx -; X86-NEXT: addl %edi, %edx +; X86-NEXT: negl %ebx +; X86-NEXT: shrdl $1, %edi, %eax +; X86-NEXT: shrl %edi +; X86-NEXT: movl %eax, %ebp ; X86-NEXT: imull %ebx, %ebp +; X86-NEXT: orl $1, %ebx +; X86-NEXT: mull %ebx ; X86-NEXT: addl %ebp, %edx -; X86-NEXT: addl {{[0-9]+}}(%esp), %eax -; X86-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X86-NEXT: imull %edi, %ebx +; X86-NEXT: addl %ebx, %edx +; X86-NEXT: addl %esi, %eax +; X86-NEXT: adcl %ecx, %edx ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx @@ -359,10 +370,10 @@ define i64 @scalar_i64_unsigned_reg_reg(i64 %a1, i64 %a2) nounwind { ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: xorl %edx, %edx ; X86-NEXT: cmpl %eax, %ebp ; X86-NEXT: sbbl %ecx, %esi @@ -429,45 +440,36 @@ define i64 @scalar_i64_signed_mem_reg(ptr %a1_addr, i64 %a2) nounwind { ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: subl $12, %esp ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl (%eax), %ebx -; X86-NEXT: movl 4(%eax), %esi -; X86-NEXT: cmpl %ebx, %edx -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: sbbl %esi, %eax -; X86-NEXT: setl %al -; X86-NEXT: movzbl %al, %edi -; X86-NEXT: negl %edi -; X86-NEXT: movl %edi, %eax -; X86-NEXT: orl $1, %eax -; X86-NEXT: movl %eax, (%esp) # 4-byte Spill -; X86-NEXT: movl %ebx, %eax +; X86-NEXT: movl (%eax), %esi +; X86-NEXT: movl 4(%eax), %ecx +; X86-NEXT: movl %esi, %eax ; X86-NEXT: subl %edx, %eax -; X86-NEXT: movl %esi, %ebp +; X86-NEXT: movl %ecx, %edi +; X86-NEXT: sbbl %ebp, %edi +; X86-NEXT: subl %esi, %edx ; X86-NEXT: sbbl %ecx, %ebp -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: subl %ebx, %edx -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: sbbl %esi, %ecx +; X86-NEXT: setl %bl +; X86-NEXT: movzbl %bl, %ebx ; X86-NEXT: jl .LBB7_2 ; X86-NEXT: # %bb.1: ; X86-NEXT: movl %edx, %eax -; X86-NEXT: movl %ecx, %ebp +; X86-NEXT: movl %ebp, %edi ; X86-NEXT: .LBB7_2: -; X86-NEXT: shrdl $1, %ebp, %eax -; X86-NEXT: shrl %ebp -; X86-NEXT: imull %eax, %edi -; X86-NEXT: movl (%esp), %ecx # 4-byte Reload -; X86-NEXT: mull %ecx -; X86-NEXT: addl %edi, %edx -; X86-NEXT: imull %ecx, %ebp +; X86-NEXT: negl %ebx +; X86-NEXT: shrdl $1, %edi, %eax +; X86-NEXT: shrl %edi +; X86-NEXT: movl %eax, %ebp +; X86-NEXT: imull %ebx, %ebp +; X86-NEXT: orl $1, %ebx +; X86-NEXT: mull %ebx ; X86-NEXT: addl %ebp, %edx -; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: addl $12, %esp +; X86-NEXT: imull %edi, %ebx +; X86-NEXT: addl %ebx, %edx +; X86-NEXT: addl %esi, %eax +; X86-NEXT: adcl %ecx, %edx ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx @@ -508,37 +510,35 @@ define i64 @scalar_i64_signed_reg_mem(i64 %a1, ptr %a2_addr) nounwind { ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl (%eax), %edx -; X86-NEXT: movl 4(%eax), %ecx -; X86-NEXT: cmpl %esi, %edx -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %eax -; X86-NEXT: setl %al -; X86-NEXT: movzbl %al, %edi -; X86-NEXT: negl %edi -; X86-NEXT: movl %edi, %ebx -; X86-NEXT: orl $1, %ebx +; X86-NEXT: movl 4(%eax), %ebp ; X86-NEXT: movl %esi, %eax ; X86-NEXT: subl %edx, %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp -; X86-NEXT: sbbl %ecx, %ebp +; X86-NEXT: movl %ecx, %edi +; X86-NEXT: sbbl %ebp, %edi ; X86-NEXT: subl %esi, %edx -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: sbbl %ecx, %ebp +; X86-NEXT: setl %bl +; X86-NEXT: movzbl %bl, %ebx ; X86-NEXT: jl .LBB8_2 ; X86-NEXT: # %bb.1: ; X86-NEXT: movl %edx, %eax -; X86-NEXT: movl %ecx, %ebp +; X86-NEXT: movl %ebp, %edi ; X86-NEXT: .LBB8_2: -; X86-NEXT: shrdl $1, %ebp, %eax -; X86-NEXT: shrl %ebp -; X86-NEXT: imull %eax, %edi -; X86-NEXT: mull %ebx -; X86-NEXT: addl %edi, %edx +; X86-NEXT: negl %ebx +; X86-NEXT: shrdl $1, %edi, %eax +; X86-NEXT: shrl %edi +; X86-NEXT: movl %eax, %ebp ; X86-NEXT: imull %ebx, %ebp +; X86-NEXT: orl $1, %ebx +; X86-NEXT: mull %ebx ; X86-NEXT: addl %ebp, %edx -; X86-NEXT: addl {{[0-9]+}}(%esp), %eax -; X86-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X86-NEXT: imull %edi, %ebx +; X86-NEXT: addl %ebx, %edx +; X86-NEXT: addl %esi, %eax +; X86-NEXT: adcl %ecx, %edx ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx @@ -579,46 +579,37 @@ define i64 @scalar_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: subl $12, %esp ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl (%ecx), %ebx -; X86-NEXT: movl 4(%ecx), %esi +; X86-NEXT: movl (%ecx), %esi +; X86-NEXT: movl 4(%ecx), %ecx ; X86-NEXT: movl (%eax), %edx -; X86-NEXT: movl 4(%eax), %ecx -; X86-NEXT: cmpl %ebx, %edx -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: sbbl %esi, %eax -; X86-NEXT: setl %al -; X86-NEXT: movzbl %al, %edi -; X86-NEXT: negl %edi -; X86-NEXT: movl %edi, %eax -; X86-NEXT: orl $1, %eax -; X86-NEXT: movl %eax, (%esp) # 4-byte Spill -; X86-NEXT: movl %ebx, %eax +; X86-NEXT: movl 4(%eax), %ebp +; X86-NEXT: movl %esi, %eax ; X86-NEXT: subl %edx, %eax -; X86-NEXT: movl %esi, %ebp +; X86-NEXT: movl %ecx, %edi +; X86-NEXT: sbbl %ebp, %edi +; X86-NEXT: subl %esi, %edx ; X86-NEXT: sbbl %ecx, %ebp -; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: subl %ebx, %edx -; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: sbbl %esi, %ecx +; X86-NEXT: setl %bl +; X86-NEXT: movzbl %bl, %ebx ; X86-NEXT: jl .LBB9_2 ; X86-NEXT: # %bb.1: ; X86-NEXT: movl %edx, %eax -; X86-NEXT: movl %ecx, %ebp +; X86-NEXT: movl %ebp, %edi ; X86-NEXT: .LBB9_2: -; X86-NEXT: shrdl $1, %ebp, %eax -; X86-NEXT: shrl %ebp -; X86-NEXT: imull %eax, %edi -; X86-NEXT: movl (%esp), %ecx # 4-byte Reload -; X86-NEXT: mull %ecx -; X86-NEXT: addl %edi, %edx -; X86-NEXT: imull %ecx, %ebp +; X86-NEXT: negl %ebx +; X86-NEXT: shrdl $1, %edi, %eax +; X86-NEXT: shrl %edi +; X86-NEXT: movl %eax, %ebp +; X86-NEXT: imull %ebx, %ebp +; X86-NEXT: orl $1, %ebx +; X86-NEXT: mull %ebx ; X86-NEXT: addl %ebp, %edx -; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload -; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload -; X86-NEXT: addl $12, %esp +; X86-NEXT: imull %edi, %ebx +; X86-NEXT: addl %ebx, %edx +; X86-NEXT: addl %esi, %eax +; X86-NEXT: adcl %ecx, %edx ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx @@ -667,17 +658,16 @@ define i16 @scalar_i16_signed_reg_reg(i16 %a1, i16 %a2) nounwind { ; X86: # %bb.0: ; X86-NEXT: pushl %ebx ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: xorl %ebx, %ebx ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: subw %dx, %ax +; X86-NEXT: setle %bl +; X86-NEXT: leal -1(%ebx,%ebx), %edx ; X86-NEXT: jg .LBB10_2 ; X86-NEXT: # %bb.1: ; X86-NEXT: negl %eax ; X86-NEXT: .LBB10_2: -; X86-NEXT: xorl %ebx, %ebx -; X86-NEXT: cmpw %dx, %cx -; X86-NEXT: setle %bl -; X86-NEXT: leal -1(%ebx,%ebx), %edx ; X86-NEXT: movzwl %ax, %eax ; X86-NEXT: shrl %eax ; X86-NEXT: imull %edx, %eax @@ -720,17 +710,16 @@ define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind { ; X86: # %bb.0: ; X86-NEXT: pushl %ebx ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: xorl %ebx, %ebx ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: subw %dx, %ax +; X86-NEXT: setbe %bl +; X86-NEXT: leal -1(%ebx,%ebx), %edx ; X86-NEXT: ja .LBB11_2 ; X86-NEXT: # %bb.1: ; X86-NEXT: negl %eax ; X86-NEXT: .LBB11_2: -; X86-NEXT: xorl %ebx, %ebx -; X86-NEXT: cmpw %cx, %dx -; X86-NEXT: setae %bl -; X86-NEXT: leal -1(%ebx,%ebx), %edx ; X86-NEXT: movzwl %ax, %eax ; X86-NEXT: shrl %eax ; X86-NEXT: imull %edx, %eax @@ -777,16 +766,15 @@ define i16 @scalar_i16_signed_mem_reg(ptr %a1_addr, i16 %a2) nounwind { ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movzwl (%eax), %ecx +; X86-NEXT: xorl %ebx, %ebx ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: subw %dx, %ax +; X86-NEXT: setle %bl +; X86-NEXT: leal -1(%ebx,%ebx), %edx ; X86-NEXT: jg .LBB12_2 ; X86-NEXT: # %bb.1: ; X86-NEXT: negl %eax ; X86-NEXT: .LBB12_2: -; X86-NEXT: xorl %ebx, %ebx -; X86-NEXT: cmpw %dx, %cx -; X86-NEXT: setle %bl -; X86-NEXT: leal -1(%ebx,%ebx), %edx ; X86-NEXT: movzwl %ax, %eax ; X86-NEXT: shrl %eax ; X86-NEXT: imull %edx, %eax @@ -829,19 +817,18 @@ define i16 @scalar_i16_signed_reg_mem(i16 %a1, ptr %a2_addr) nounwind { ; X86-LABEL: scalar_i16_signed_reg_mem: ; X86: # %bb.0: ; X86-NEXT: pushl %ebx -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movzwl (%eax), %edx +; X86-NEXT: xorl %ebx, %ebx ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: subw %dx, %ax +; X86-NEXT: setle %bl +; X86-NEXT: leal -1(%ebx,%ebx), %edx ; X86-NEXT: jg .LBB13_2 ; X86-NEXT: # %bb.1: ; X86-NEXT: negl %eax ; X86-NEXT: .LBB13_2: -; X86-NEXT: xorl %ebx, %ebx -; X86-NEXT: cmpw %dx, %cx -; X86-NEXT: setle %bl -; X86-NEXT: leal -1(%ebx,%ebx), %edx ; X86-NEXT: movzwl %ax, %eax ; X86-NEXT: shrl %eax ; X86-NEXT: imull %edx, %eax @@ -888,16 +875,15 @@ define i16 @scalar_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzwl (%ecx), %ecx ; X86-NEXT: movzwl (%eax), %edx +; X86-NEXT: xorl %ebx, %ebx ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: subw %dx, %ax +; X86-NEXT: setle %bl +; X86-NEXT: leal -1(%ebx,%ebx), %edx ; X86-NEXT: jg .LBB14_2 ; X86-NEXT: # %bb.1: ; X86-NEXT: negl %eax ; X86-NEXT: .LBB14_2: -; X86-NEXT: xorl %ebx, %ebx -; X86-NEXT: cmpw %dx, %cx -; X86-NEXT: setle %bl -; X86-NEXT: leal -1(%ebx,%ebx), %edx ; X86-NEXT: movzwl %ax, %eax ; X86-NEXT: shrl %eax ; X86-NEXT: imull %edx, %eax @@ -946,17 +932,16 @@ define i8 @scalar_i8_signed_reg_reg(i8 %a1, i8 %a2) nounwind { ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movb {{[0-9]+}}(%esp), %ah -; X86-NEXT: cmpb %ah, %cl -; X86-NEXT: setg %dl -; X86-NEXT: negb %dl -; X86-NEXT: orb $1, %dl ; X86-NEXT: movb %cl, %al ; X86-NEXT: subb %ah, %al +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB15_2 ; X86-NEXT: # %bb.1: ; X86-NEXT: subb %cl, %ah ; X86-NEXT: movb %ah, %al ; X86-NEXT: .LBB15_2: +; X86-NEXT: negb %dl +; X86-NEXT: orb $1, %dl ; X86-NEXT: shrb %al ; X86-NEXT: mulb %dl ; X86-NEXT: addb %cl, %al @@ -993,18 +978,17 @@ define i8 @scalar_i8_unsigned_reg_reg(i8 %a1, i8 %a2) nounwind { ; X86-LABEL: scalar_i8_unsigned_reg_reg: ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movb {{[0-9]+}}(%esp), %ch -; X86-NEXT: xorl %edx, %edx -; X86-NEXT: movb %ch, %ah -; X86-NEXT: subb %cl, %ah -; X86-NEXT: sbbl %edx, %edx -; X86-NEXT: orb $1, %dl +; X86-NEXT: movb {{[0-9]+}}(%esp), %ah ; X86-NEXT: movb %cl, %al -; X86-NEXT: subb %ch, %al +; X86-NEXT: subb %ah, %al +; X86-NEXT: seta %dl ; X86-NEXT: ja .LBB16_2 ; X86-NEXT: # %bb.1: +; X86-NEXT: subb %cl, %ah ; X86-NEXT: movb %ah, %al ; X86-NEXT: .LBB16_2: +; X86-NEXT: negb %dl +; X86-NEXT: orb $1, %dl ; X86-NEXT: shrb %al ; X86-NEXT: mulb %dl ; X86-NEXT: addb %cl, %al @@ -1046,17 +1030,16 @@ define i8 @scalar_i8_signed_mem_reg(ptr %a1_addr, i8 %a2) nounwind { ; X86-NEXT: movb {{[0-9]+}}(%esp), %ah ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzbl (%ecx), %ecx -; X86-NEXT: cmpb %ah, %cl -; X86-NEXT: setg %dl -; X86-NEXT: negb %dl -; X86-NEXT: orb $1, %dl ; X86-NEXT: movb %cl, %al ; X86-NEXT: subb %ah, %al +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB17_2 ; X86-NEXT: # %bb.1: ; X86-NEXT: subb %cl, %ah ; X86-NEXT: movb %ah, %al ; X86-NEXT: .LBB17_2: +; X86-NEXT: negb %dl +; X86-NEXT: orb $1, %dl ; X86-NEXT: shrb %al ; X86-NEXT: mulb %dl ; X86-NEXT: addb %cl, %al @@ -1096,17 +1079,16 @@ define i8 @scalar_i8_signed_reg_mem(i8 %a1, ptr %a2_addr) nounwind { ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movb (%eax), %ah -; X86-NEXT: cmpb %ah, %cl -; X86-NEXT: setg %dl -; X86-NEXT: negb %dl -; X86-NEXT: orb $1, %dl ; X86-NEXT: movb %cl, %al ; X86-NEXT: subb %ah, %al +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB18_2 ; X86-NEXT: # %bb.1: ; X86-NEXT: subb %cl, %ah ; X86-NEXT: movb %ah, %al ; X86-NEXT: .LBB18_2: +; X86-NEXT: negb %dl +; X86-NEXT: orb $1, %dl ; X86-NEXT: shrb %al ; X86-NEXT: mulb %dl ; X86-NEXT: addb %cl, %al @@ -1148,17 +1130,16 @@ define i8 @scalar_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzbl (%ecx), %ecx ; X86-NEXT: movb (%eax), %ah -; X86-NEXT: cmpb %ah, %cl -; X86-NEXT: setg %dl -; X86-NEXT: negb %dl -; X86-NEXT: orb $1, %dl ; X86-NEXT: movb %cl, %al ; X86-NEXT: subb %ah, %al +; X86-NEXT: setg %dl ; X86-NEXT: jg .LBB19_2 ; X86-NEXT: # %bb.1: ; X86-NEXT: subb %cl, %ah ; X86-NEXT: movb %ah, %al ; X86-NEXT: .LBB19_2: +; X86-NEXT: negb %dl +; X86-NEXT: orb $1, %dl ; X86-NEXT: shrb %al ; X86-NEXT: mulb %dl ; X86-NEXT: addb %cl, %al diff --git a/llvm/test/CodeGen/X86/oddsubvector.ll b/llvm/test/CodeGen/X86/oddsubvector.ll index a1da40e7..f539830 100644 --- a/llvm/test/CodeGen/X86/oddsubvector.ll +++ b/llvm/test/CodeGen/X86/oddsubvector.ll @@ -155,10 +155,10 @@ define <16 x i32> @PR42819(ptr %a0) { define void @PR42833() { ; SSE2-LABEL: PR42833: ; SSE2: # %bb.0: -; SSE2-NEXT: movl b(%rip), %eax -; SSE2-NEXT: movdqa c+128(%rip), %xmm0 ; SSE2-NEXT: movdqa c+144(%rip), %xmm2 -; SSE2-NEXT: addl c+128(%rip), %eax +; SSE2-NEXT: movdqa c+128(%rip), %xmm0 +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: addl b(%rip), %eax ; SSE2-NEXT: movd %eax, %xmm1 ; SSE2-NEXT: movd %eax, %xmm3 ; SSE2-NEXT: paddd %xmm0, %xmm3 @@ -191,10 +191,10 @@ define void @PR42833() { ; ; SSE42-LABEL: PR42833: ; SSE42: # %bb.0: -; SSE42-NEXT: movl b(%rip), %eax -; SSE42-NEXT: movdqa c+128(%rip), %xmm0 ; SSE42-NEXT: movdqa c+144(%rip), %xmm1 -; SSE42-NEXT: addl c+128(%rip), %eax +; SSE42-NEXT: movdqa c+128(%rip), %xmm0 +; SSE42-NEXT: movd %xmm0, %eax +; SSE42-NEXT: addl b(%rip), %eax ; SSE42-NEXT: movd %eax, %xmm2 ; SSE42-NEXT: paddd %xmm0, %xmm2 ; SSE42-NEXT: movdqa d+144(%rip), %xmm3 diff --git a/llvm/test/CodeGen/X86/pmulh.ll b/llvm/test/CodeGen/X86/pmulh.ll index 300da68..ead7110 100644 --- a/llvm/test/CodeGen/X86/pmulh.ll +++ b/llvm/test/CodeGen/X86/pmulh.ll @@ -2166,3 +2166,708 @@ define <8 x i16> @sse2_pmulhu_w_const(<8 x i16> %a0, <8 x i16> %a1) { } declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) +define <8 x i16> @zext_mul_and_shift17(<8 x i16> %a, <8 x i16> %b) { +; SSE-LABEL: zext_mul_and_shift17: +; SSE: # %bb.0: +; SSE-NEXT: pmulhuw %xmm1, %xmm0 +; SSE-NEXT: psrlw $1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: zext_mul_and_shift17: +; AVX: # %bb.0: +; AVX-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0 +; AVX-NEXT: retq + %a.ext = zext <8 x i16> %a to <8 x i32> + %b.ext = zext <8 x i16> %b to <8 x i32> + %mul = mul <8 x i32> %a.ext, %b.ext + %shift = lshr <8 x i32> %mul, splat(i32 17) + %trunc = trunc <8 x i32> %shift to <8 x i16> + ret <8 x i16> %trunc +} + +define <8 x i16> @zext_mul_and_shift24(<8 x i16> %a, <8 x i16> %b) { +; SSE-LABEL: zext_mul_and_shift24: +; SSE: # %bb.0: +; SSE-NEXT: pmulhuw %xmm1, %xmm0 +; SSE-NEXT: psrlw $8, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: zext_mul_and_shift24: +; AVX: # %bb.0: +; AVX-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 +; AVX-NEXT: retq + %a.ext = zext <8 x i16> %a to <8 x i32> + %b.ext = zext <8 x i16> %b to <8 x i32> + %mul = mul <8 x i32> %a.ext, %b.ext + %shift = lshr <8 x i32> %mul, splat(i32 24) + %trunc = trunc <8 x i32> %shift to <8 x i16> + ret <8 x i16> %trunc +} + +define <8 x i16> @zext_mul_and_shift31(<8 x i16> %a, <8 x i16> %b) { +; SSE-LABEL: zext_mul_and_shift31: +; SSE: # %bb.0: +; SSE-NEXT: pmulhuw %xmm1, %xmm0 +; SSE-NEXT: psrlw $15, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: zext_mul_and_shift31: +; AVX: # %bb.0: +; AVX-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX-NEXT: retq + %a.ext = zext <8 x i16> %a to <8 x i32> + %b.ext = zext <8 x i16> %b to <8 x i32> + %mul = mul <8 x i32> %a.ext, %b.ext + %shift = lshr <8 x i32> %mul, splat(i32 31) + %trunc = trunc <8 x i32> %shift to <8 x i16> + ret <8 x i16> %trunc +} + +define <8 x i16> @sext_mul_and_shift17(<8 x i16> %a, <8 x i16> %b) { +; SSE-LABEL: sext_mul_and_shift17: +; SSE: # %bb.0: +; SSE-NEXT: pmulhw %xmm1, %xmm0 +; SSE-NEXT: psrlw $1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sext_mul_and_shift17: +; AVX: # %bb.0: +; AVX-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0 +; AVX-NEXT: retq + %a.ext = sext <8 x i16> %a to <8 x i32> + %b.ext = sext <8 x i16> %b to <8 x i32> + %mul = mul <8 x i32> %a.ext, %b.ext + %shift = lshr <8 x i32> %mul, splat(i32 17) + %trunc = trunc <8 x i32> %shift to <8 x i16> + ret <8 x i16> %trunc +} + +define <8 x i16> @sext_mul_and_shift24(<8 x i16> %a, <8 x i16> %b) { +; SSE-LABEL: sext_mul_and_shift24: +; SSE: # %bb.0: +; SSE-NEXT: pmulhw %xmm1, %xmm0 +; SSE-NEXT: psrlw $8, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sext_mul_and_shift24: +; AVX: # %bb.0: +; AVX-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 +; AVX-NEXT: retq + %a.ext = sext <8 x i16> %a to <8 x i32> + %b.ext = sext <8 x i16> %b to <8 x i32> + %mul = mul <8 x i32> %a.ext, %b.ext + %shift = lshr <8 x i32> %mul, splat(i32 24) + %trunc = trunc <8 x i32> %shift to <8 x i16> + ret <8 x i16> %trunc +} + +define <8 x i16> @sext_mul_and_shift31(<8 x i16> %a, <8 x i16> %b) { +; SSE-LABEL: sext_mul_and_shift31: +; SSE: # %bb.0: +; SSE-NEXT: pmulhw %xmm1, %xmm0 +; SSE-NEXT: psrlw $15, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sext_mul_and_shift31: +; AVX: # %bb.0: +; AVX-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX-NEXT: retq + %a.ext = sext <8 x i16> %a to <8 x i32> + %b.ext = sext <8 x i16> %b to <8 x i32> + %mul = mul <8 x i32> %a.ext, %b.ext + %shift = lshr <8 x i32> %mul, splat(i32 31) + %trunc = trunc <8 x i32> %shift to <8 x i16> + ret <8 x i16> %trunc +} + +define <4 x i16> @sext_mulhw_v4i16_shift17(<4 x i16> %a, <4 x i16> %b) { +; SSE-LABEL: sext_mulhw_v4i16_shift17: +; SSE: # %bb.0: +; SSE-NEXT: pmulhw %xmm1, %xmm0 +; SSE-NEXT: psrlw $1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sext_mulhw_v4i16_shift17: +; AVX: # %bb.0: +; AVX-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0 +; AVX-NEXT: retq + %a1 = sext <4 x i16> %a to <4 x i32> + %b1 = sext <4 x i16> %b to <4 x i32> + %c = mul <4 x i32> %a1, %b1 + %d = lshr <4 x i32> %c, splat (i32 17) + %e = trunc <4 x i32> %d to <4 x i16> + ret <4 x i16> %e +} + +define <4 x i16> @sext_mulhw_v4i16_shift24(<4 x i16> %a, <4 x i16> %b) { +; SSE-LABEL: sext_mulhw_v4i16_shift24: +; SSE: # %bb.0: +; SSE-NEXT: pmulhw %xmm1, %xmm0 +; SSE-NEXT: psrlw $8, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sext_mulhw_v4i16_shift24: +; AVX: # %bb.0: +; AVX-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 +; AVX-NEXT: retq + %a1 = sext <4 x i16> %a to <4 x i32> + %b1 = sext <4 x i16> %b to <4 x i32> + %c = mul <4 x i32> %a1, %b1 + %d = lshr <4 x i32> %c, splat (i32 24) + %e = trunc <4 x i32> %d to <4 x i16> + ret <4 x i16> %e +} + +define <4 x i16> @sext_mulhw_v4i16_shift31(<4 x i16> %a, <4 x i16> %b) { +; SSE-LABEL: sext_mulhw_v4i16_shift31: +; SSE: # %bb.0: +; SSE-NEXT: pmulhw %xmm1, %xmm0 +; SSE-NEXT: psrlw $15, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sext_mulhw_v4i16_shift31: +; AVX: # %bb.0: +; AVX-NEXT: vpmulhw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX-NEXT: retq + %a1 = sext <4 x i16> %a to <4 x i32> + %b1 = sext <4 x i16> %b to <4 x i32> + %c = mul <4 x i32> %a1, %b1 + %d = lshr <4 x i32> %c, splat (i32 31) + %e = trunc <4 x i32> %d to <4 x i16> + ret <4 x i16> %e +} + +define <4 x i16> @and_mulhuw_v4i16_shift17(<4 x i64> %a, <4 x i64> %b) { +; SSE2-LABEL: and_mulhuw_v4i16_shift17: +; SSE2: # %bb.0: +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2] +; SSE2-NEXT: pslld $16, %xmm2 +; SSE2-NEXT: psrad $16, %xmm2 +; SSE2-NEXT: xorps %xmm3, %xmm3 +; SSE2-NEXT: packssdw %xmm3, %xmm2 +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; SSE2-NEXT: pslld $16, %xmm0 +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: packssdw %xmm3, %xmm0 +; SSE2-NEXT: pmulhuw %xmm2, %xmm0 +; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: and_mulhuw_v4i16_shift17: +; SSE41: # %bb.0: +; SSE41-NEXT: pxor %xmm4, %xmm4 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7] +; SSE41-NEXT: packusdw %xmm3, %xmm2 +; SSE41-NEXT: packusdw %xmm4, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7] +; SSE41-NEXT: packusdw %xmm1, %xmm0 +; SSE41-NEXT: packusdw %xmm4, %xmm0 +; SSE41-NEXT: pmulhuw %xmm2, %xmm0 +; SSE41-NEXT: psrlw $1, %xmm0 +; SSE41-NEXT: retq +; +; AVX2-LABEL: and_mulhuw_v4i16_shift17: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: and_mulhuw_v4i16_shift17: +; AVX512: # %bb.0: +; AVX512-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpmovqw %zmm0, %xmm0 +; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %a1 = and <4 x i64> %a, <i64 65535, i64 65535, i64 65535, i64 65535> + %b1 = and <4 x i64> %b, <i64 65535, i64 65535, i64 65535, i64 65535> + %c = mul <4 x i64> %a1, %b1 + %d = lshr <4 x i64> %c, splat (i64 17) + %e = trunc <4 x i64> %d to <4 x i16> + ret <4 x i16> %e +} + +define <4 x i16> @and_mulhuw_v4i16_shift24(<4 x i64> %a, <4 x i64> %b) { +; SSE2-LABEL: and_mulhuw_v4i16_shift24: +; SSE2: # %bb.0: +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2] +; SSE2-NEXT: pslld $16, %xmm2 +; SSE2-NEXT: psrad $16, %xmm2 +; SSE2-NEXT: xorps %xmm3, %xmm3 +; SSE2-NEXT: packssdw %xmm3, %xmm2 +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; SSE2-NEXT: pslld $16, %xmm0 +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: packssdw %xmm3, %xmm0 +; SSE2-NEXT: pmulhuw %xmm2, %xmm0 +; SSE2-NEXT: psrlw $8, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: and_mulhuw_v4i16_shift24: +; SSE41: # %bb.0: +; SSE41-NEXT: pxor %xmm4, %xmm4 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7] +; SSE41-NEXT: packusdw %xmm3, %xmm2 +; SSE41-NEXT: packusdw %xmm4, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7] +; SSE41-NEXT: packusdw %xmm1, %xmm0 +; SSE41-NEXT: packusdw %xmm4, %xmm0 +; SSE41-NEXT: pmulhuw %xmm2, %xmm0 +; SSE41-NEXT: psrlw $8, %xmm0 +; SSE41-NEXT: retq +; +; AVX2-LABEL: and_mulhuw_v4i16_shift24: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: and_mulhuw_v4i16_shift24: +; AVX512: # %bb.0: +; AVX512-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpmovqw %zmm0, %xmm0 +; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %a1 = and <4 x i64> %a, <i64 65535, i64 65535, i64 65535, i64 65535> + %b1 = and <4 x i64> %b, <i64 65535, i64 65535, i64 65535, i64 65535> + %c = mul <4 x i64> %a1, %b1 + %d = lshr <4 x i64> %c, splat (i64 24) + %e = trunc <4 x i64> %d to <4 x i16> + ret <4 x i16> %e +} + +define <4 x i16> @and_mulhuw_v4i16_shift31(<4 x i64> %a, <4 x i64> %b) { +; SSE2-LABEL: and_mulhuw_v4i16_shift31: +; SSE2: # %bb.0: +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2] +; SSE2-NEXT: pslld $16, %xmm2 +; SSE2-NEXT: psrad $16, %xmm2 +; SSE2-NEXT: xorps %xmm3, %xmm3 +; SSE2-NEXT: packssdw %xmm3, %xmm2 +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; SSE2-NEXT: pslld $16, %xmm0 +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: packssdw %xmm3, %xmm0 +; SSE2-NEXT: pmulhuw %xmm2, %xmm0 +; SSE2-NEXT: psrlw $15, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: and_mulhuw_v4i16_shift31: +; SSE41: # %bb.0: +; SSE41-NEXT: pxor %xmm4, %xmm4 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7] +; SSE41-NEXT: packusdw %xmm3, %xmm2 +; SSE41-NEXT: packusdw %xmm4, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7] +; SSE41-NEXT: packusdw %xmm1, %xmm0 +; SSE41-NEXT: packusdw %xmm4, %xmm0 +; SSE41-NEXT: pmulhuw %xmm2, %xmm0 +; SSE41-NEXT: psrlw $15, %xmm0 +; SSE41-NEXT: retq +; +; AVX2-LABEL: and_mulhuw_v4i16_shift31: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: and_mulhuw_v4i16_shift31: +; AVX512: # %bb.0: +; AVX512-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpmovqw %zmm0, %xmm0 +; AVX512-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %a1 = and <4 x i64> %a, <i64 65535, i64 65535, i64 65535, i64 65535> + %b1 = and <4 x i64> %b, <i64 65535, i64 65535, i64 65535, i64 65535> + %c = mul <4 x i64> %a1, %b1 + %d = lshr <4 x i64> %c, splat (i64 31) + %e = trunc <4 x i64> %d to <4 x i16> + ret <4 x i16> %e +} + +define <8 x i16> @lshr_mulhuw_v8i16_shift17(<8 x i32> %a, <8 x i32> %b) { +; SSE2-LABEL: lshr_mulhuw_v8i16_shift17: +; SSE2: # %bb.0: +; SSE2-NEXT: psrad $16, %xmm3 +; SSE2-NEXT: psrad $16, %xmm2 +; SSE2-NEXT: packssdw %xmm3, %xmm2 +; SSE2-NEXT: psrad $16, %xmm1 +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: packssdw %xmm1, %xmm0 +; SSE2-NEXT: pmulhuw %xmm2, %xmm0 +; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: lshr_mulhuw_v8i16_shift17: +; SSE41: # %bb.0: +; SSE41-NEXT: psrld $16, %xmm1 +; SSE41-NEXT: psrld $16, %xmm0 +; SSE41-NEXT: packusdw %xmm1, %xmm0 +; SSE41-NEXT: psrld $16, %xmm3 +; SSE41-NEXT: psrld $16, %xmm2 +; SSE41-NEXT: packusdw %xmm3, %xmm2 +; SSE41-NEXT: pmulhuw %xmm2, %xmm0 +; SSE41-NEXT: psrlw $1, %xmm0 +; SSE41-NEXT: retq +; +; AVX2-LABEL: lshr_mulhuw_v8i16_shift17: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1 +; AVX2-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: lshr_mulhuw_v8i16_shift17: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX512-NEXT: vpsrld $16, %ymm1, %ymm1 +; AVX512-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %a1 = lshr <8 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> + %b1 = lshr <8 x i32> %b, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> + %c = mul <8 x i32> %a1, %b1 + %d = lshr <8 x i32> %c, splat (i32 17) + %e = trunc <8 x i32> %d to <8 x i16> + ret <8 x i16> %e +} + +define <8 x i16> @lshr_mulhuw_v8i16_shift24(<8 x i32> %a, <8 x i32> %b) { +; SSE2-LABEL: lshr_mulhuw_v8i16_shift24: +; SSE2: # %bb.0: +; SSE2-NEXT: psrad $16, %xmm3 +; SSE2-NEXT: psrad $16, %xmm2 +; SSE2-NEXT: packssdw %xmm3, %xmm2 +; SSE2-NEXT: psrad $16, %xmm1 +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: packssdw %xmm1, %xmm0 +; SSE2-NEXT: pmulhuw %xmm2, %xmm0 +; SSE2-NEXT: psrlw $8, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: lshr_mulhuw_v8i16_shift24: +; SSE41: # %bb.0: +; SSE41-NEXT: psrld $16, %xmm1 +; SSE41-NEXT: psrld $16, %xmm0 +; SSE41-NEXT: packusdw %xmm1, %xmm0 +; SSE41-NEXT: psrld $16, %xmm3 +; SSE41-NEXT: psrld $16, %xmm2 +; SSE41-NEXT: packusdw %xmm3, %xmm2 +; SSE41-NEXT: pmulhuw %xmm2, %xmm0 +; SSE41-NEXT: psrlw $8, %xmm0 +; SSE41-NEXT: retq +; +; AVX2-LABEL: lshr_mulhuw_v8i16_shift24: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1 +; AVX2-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: lshr_mulhuw_v8i16_shift24: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX512-NEXT: vpsrld $16, %ymm1, %ymm1 +; AVX512-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %a1 = lshr <8 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> + %b1 = lshr <8 x i32> %b, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> + %c = mul <8 x i32> %a1, %b1 + %d = lshr <8 x i32> %c, splat (i32 24) + %e = trunc <8 x i32> %d to <8 x i16> + ret <8 x i16> %e +} + +define <8 x i16> @lshr_mulhuw_v8i16_shift31(<8 x i32> %a, <8 x i32> %b) { +; SSE2-LABEL: lshr_mulhuw_v8i16_shift31: +; SSE2: # %bb.0: +; SSE2-NEXT: psrad $16, %xmm3 +; SSE2-NEXT: psrad $16, %xmm2 +; SSE2-NEXT: packssdw %xmm3, %xmm2 +; SSE2-NEXT: psrad $16, %xmm1 +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: packssdw %xmm1, %xmm0 +; SSE2-NEXT: pmulhuw %xmm2, %xmm0 +; SSE2-NEXT: psrlw $15, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: lshr_mulhuw_v8i16_shift31: +; SSE41: # %bb.0: +; SSE41-NEXT: psrld $16, %xmm1 +; SSE41-NEXT: psrld $16, %xmm0 +; SSE41-NEXT: packusdw %xmm1, %xmm0 +; SSE41-NEXT: psrld $16, %xmm3 +; SSE41-NEXT: psrld $16, %xmm2 +; SSE41-NEXT: packusdw %xmm3, %xmm2 +; SSE41-NEXT: pmulhuw %xmm2, %xmm0 +; SSE41-NEXT: psrlw $15, %xmm0 +; SSE41-NEXT: retq +; +; AVX2-LABEL: lshr_mulhuw_v8i16_shift31: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1 +; AVX2-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: lshr_mulhuw_v8i16_shift31: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX512-NEXT: vpsrld $16, %ymm1, %ymm1 +; AVX512-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %a1 = lshr <8 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> + %b1 = lshr <8 x i32> %b, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> + %c = mul <8 x i32> %a1, %b1 + %d = lshr <8 x i32> %c, splat (i32 31) + %e = trunc <8 x i32> %d to <8 x i16> + ret <8 x i16> %e +} + +define <16 x i16> @and_mulhuw_v16i16_shift17(<16 x i32> %a, <16 x i32> %b) { +; SSE2-LABEL: and_mulhuw_v16i16_shift17: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [32767,32767,32767,32767] +; SSE2-NEXT: pand %xmm8, %xmm3 +; SSE2-NEXT: pand %xmm8, %xmm2 +; SSE2-NEXT: packssdw %xmm3, %xmm2 +; SSE2-NEXT: pand %xmm8, %xmm1 +; SSE2-NEXT: pand %xmm8, %xmm0 +; SSE2-NEXT: packssdw %xmm1, %xmm0 +; SSE2-NEXT: pand %xmm8, %xmm7 +; SSE2-NEXT: pand %xmm8, %xmm6 +; SSE2-NEXT: packssdw %xmm7, %xmm6 +; SSE2-NEXT: pmulhw %xmm2, %xmm6 +; SSE2-NEXT: pand %xmm8, %xmm5 +; SSE2-NEXT: pand %xmm4, %xmm8 +; SSE2-NEXT: packssdw %xmm5, %xmm8 +; SSE2-NEXT: pmulhw %xmm8, %xmm0 +; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: psrlw $1, %xmm6 +; SSE2-NEXT: movdqa %xmm6, %xmm1 +; SSE2-NEXT: retq +; +; SSE41-LABEL: and_mulhuw_v16i16_shift17: +; SSE41: # %bb.0: +; SSE41-NEXT: pmovsxwd {{.*#+}} xmm8 = [32767,32767,32767,32767] +; SSE41-NEXT: pand %xmm8, %xmm3 +; SSE41-NEXT: pand %xmm8, %xmm2 +; SSE41-NEXT: packusdw %xmm3, %xmm2 +; SSE41-NEXT: pand %xmm8, %xmm1 +; SSE41-NEXT: pand %xmm8, %xmm0 +; SSE41-NEXT: packusdw %xmm1, %xmm0 +; SSE41-NEXT: pand %xmm8, %xmm7 +; SSE41-NEXT: pand %xmm8, %xmm6 +; SSE41-NEXT: packusdw %xmm7, %xmm6 +; SSE41-NEXT: pmulhw %xmm2, %xmm6 +; SSE41-NEXT: pand %xmm8, %xmm5 +; SSE41-NEXT: pand %xmm4, %xmm8 +; SSE41-NEXT: packusdw %xmm5, %xmm8 +; SSE41-NEXT: pmulhw %xmm8, %xmm0 +; SSE41-NEXT: psrlw $1, %xmm0 +; SSE41-NEXT: psrlw $1, %xmm6 +; SSE41-NEXT: movdqa %xmm6, %xmm1 +; SSE41-NEXT: retq +; +; AVX2-LABEL: and_mulhuw_v16i16_shift17: +; AVX2: # %bb.0: +; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm4 = [32767,32767,32767,32767,32767,32767,32767,32767] +; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm0 +; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm1 +; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2 +; AVX2-NEXT: vpmulhuw %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm2 +; AVX2-NEXT: vpmulhuw %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: and_mulhuw_v16i16_shift17: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmovdw %zmm1, %ymm1 +; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm2 = [32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767] +; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512F-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 +; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: and_mulhuw_v16i16_shift17: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpbroadcastd {{.*#+}} zmm2 = [32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767] +; AVX512BW-NEXT: vpandd %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandd %zmm2, %zmm1, %zmm1 +; AVX512BW-NEXT: vpmulhuw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512BW-NEXT: vpsrlw $1, %ymm0, %ymm0 +; AVX512BW-NEXT: retq + %a1 = and <16 x i32> %a, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> + %b1 = and <16 x i32> %b, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> + %c = mul <16 x i32> %a1, %b1 + %d = lshr <16 x i32> %c, splat (i32 17) + %e = trunc <16 x i32> %d to <16 x i16> + ret <16 x i16> %e +} + +define <16 x i16> @and_mulhuw_v16i16_shift24(<16 x i32> %a, <16 x i32> %b) { +; SSE2-LABEL: and_mulhuw_v16i16_shift24: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [32767,32767,32767,32767] +; SSE2-NEXT: pand %xmm8, %xmm3 +; SSE2-NEXT: pand %xmm8, %xmm2 +; SSE2-NEXT: packssdw %xmm3, %xmm2 +; SSE2-NEXT: pand %xmm8, %xmm1 +; SSE2-NEXT: pand %xmm8, %xmm0 +; SSE2-NEXT: packssdw %xmm1, %xmm0 +; SSE2-NEXT: pand %xmm8, %xmm7 +; SSE2-NEXT: pand %xmm8, %xmm6 +; SSE2-NEXT: packssdw %xmm7, %xmm6 +; SSE2-NEXT: pmulhw %xmm2, %xmm6 +; SSE2-NEXT: pand %xmm8, %xmm5 +; SSE2-NEXT: pand %xmm4, %xmm8 +; SSE2-NEXT: packssdw %xmm5, %xmm8 +; SSE2-NEXT: pmulhw %xmm8, %xmm0 +; SSE2-NEXT: psrlw $8, %xmm0 +; SSE2-NEXT: psrlw $8, %xmm6 +; SSE2-NEXT: movdqa %xmm6, %xmm1 +; SSE2-NEXT: retq +; +; SSE41-LABEL: and_mulhuw_v16i16_shift24: +; SSE41: # %bb.0: +; SSE41-NEXT: pmovsxwd {{.*#+}} xmm8 = [32767,32767,32767,32767] +; SSE41-NEXT: pand %xmm8, %xmm3 +; SSE41-NEXT: pand %xmm8, %xmm2 +; SSE41-NEXT: packusdw %xmm3, %xmm2 +; SSE41-NEXT: pand %xmm8, %xmm1 +; SSE41-NEXT: pand %xmm8, %xmm0 +; SSE41-NEXT: packusdw %xmm1, %xmm0 +; SSE41-NEXT: pand %xmm8, %xmm7 +; SSE41-NEXT: pand %xmm8, %xmm6 +; SSE41-NEXT: packusdw %xmm7, %xmm6 +; SSE41-NEXT: pmulhw %xmm2, %xmm6 +; SSE41-NEXT: pand %xmm8, %xmm5 +; SSE41-NEXT: pand %xmm4, %xmm8 +; SSE41-NEXT: packusdw %xmm5, %xmm8 +; SSE41-NEXT: pmulhw %xmm8, %xmm0 +; SSE41-NEXT: psrlw $8, %xmm0 +; SSE41-NEXT: psrlw $8, %xmm6 +; SSE41-NEXT: movdqa %xmm6, %xmm1 +; SSE41-NEXT: retq +; +; AVX2-LABEL: and_mulhuw_v16i16_shift24: +; AVX2: # %bb.0: +; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm4 = [32767,32767,32767,32767,32767,32767,32767,32767] +; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm0 +; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm1 +; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2 +; AVX2-NEXT: vpmulhuw %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm2 +; AVX2-NEXT: vpmulhuw %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: and_mulhuw_v16i16_shift24: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmovdw %zmm1, %ymm1 +; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm2 = [32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767] +; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1 +; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512F-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 +; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: and_mulhuw_v16i16_shift24: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpbroadcastd {{.*#+}} zmm2 = [32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767,32767] +; AVX512BW-NEXT: vpandd %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandd %zmm2, %zmm1, %zmm1 +; AVX512BW-NEXT: vpmulhuw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512BW-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX512BW-NEXT: retq + %a1 = and <16 x i32> %a, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> + %b1 = and <16 x i32> %b, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> + %c = mul <16 x i32> %a1, %b1 + %d = lshr <16 x i32> %c, splat (i32 24) + %e = trunc <16 x i32> %d to <16 x i16> + ret <16 x i16> %e +} + +define <16 x i16> @and_mulhuw_v16i16_shift31(<16 x i32> %a, <16 x i32> %b) { +; SSE-LABEL: and_mulhuw_v16i16_shift31: +; SSE: # %bb.0: +; SSE-NEXT: xorps %xmm0, %xmm0 +; SSE-NEXT: xorps %xmm1, %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: and_mulhuw_v16i16_shift31: +; AVX: # %bb.0: +; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX-NEXT: retq + %a1 = and <16 x i32> %a, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> + %b1 = and <16 x i32> %b, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> + %c = mul <16 x i32> %a1, %b1 + %d = lshr <16 x i32> %c, splat (i32 31) + %e = trunc <16 x i32> %d to <16 x i16> + ret <16 x i16> %e +} diff --git a/llvm/test/CodeGen/X86/pr30284.ll b/llvm/test/CodeGen/X86/pr30284.ll index f4fb1b3..708f0f7 100644 --- a/llvm/test/CodeGen/X86/pr30284.ll +++ b/llvm/test/CodeGen/X86/pr30284.ll @@ -19,14 +19,12 @@ define void @f_f___un_3C_unf_3E_un_3C_unf_3E_(<16 x i1> %x) { ; CHECK-NEXT: vpmovsxbd %xmm0, %zmm0 ; CHECK-NEXT: vpslld $31, %zmm0, %zmm0 ; CHECK-NEXT: vpmovd2m %zmm0, %k1 -; CHECK-NEXT: vmovapd 0, %zmm0 -; CHECK-NEXT: vmovapd 64, %zmm1 -; CHECK-NEXT: vbroadcastsd {{.*#+}} zmm2 = [0,16,0,16,0,16,0,16,0,16,0,16,0,16,0,16] +; CHECK-NEXT: vpbroadcastq {{.*#+}} zmm0 = [0,16,0,16,0,16,0,16,0,16,0,16,0,16,0,16] +; CHECK-NEXT: vporq 64, %zmm0, %zmm1 +; CHECK-NEXT: vporq 0, %zmm0, %zmm0 ; CHECK-NEXT: kshiftrw $8, %k1, %k2 -; CHECK-NEXT: vorpd %zmm2, %zmm1, %zmm1 {%k2} -; CHECK-NEXT: vorpd %zmm2, %zmm0, %zmm0 {%k1} -; CHECK-NEXT: vmovapd %zmm0, 0 -; CHECK-NEXT: vmovapd %zmm1, 64 +; CHECK-NEXT: vmovdqa64 %zmm0, 0 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm1, 64 {%k2} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retl %a_load22 = load <16 x i64>, ptr null, align 1 diff --git a/llvm/test/CodeGen/X86/pr38539.ll b/llvm/test/CodeGen/X86/pr38539.ll index b633c28a..4124553 100644 --- a/llvm/test/CodeGen/X86/pr38539.ll +++ b/llvm/test/CodeGen/X86/pr38539.ll @@ -23,7 +23,7 @@ define void @f() nounwind { ; X86-NEXT: pushl %esi ; X86-NEXT: andl $-16, %esp ; X86-NEXT: subl $160, %esp -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edi +; X86-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movzbl (%eax), %eax diff --git a/llvm/test/CodeGen/X86/pr76416.ll b/llvm/test/CodeGen/X86/pr76416.ll deleted file mode 100644 index 68e9ef9..0000000 --- a/llvm/test/CodeGen/X86/pr76416.ll +++ /dev/null @@ -1,79 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s | FileCheck %s - -target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" -target triple = "x86_64-unknown-linux-gnu" - -; -; Reproducer from https://github.com/llvm/llvm-project/issues/76416 -; - -@load_p = external global ptr, align 8 -@load_data = external global i8, align 1 - -define dso_local void @pr76416() { -; CHECK-LABEL: pr76416: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movl $0, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: cmpl $3, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: jg .LBB0_3 -; CHECK-NEXT: .p2align 4 -; CHECK-NEXT: .LBB0_2: # %for.body -; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: #APP -; CHECK-NEXT: #NO_APP -; CHECK-NEXT: incl -{{[0-9]+}}(%rsp) -; CHECK-NEXT: cmpl $3, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: jle .LBB0_2 -; CHECK-NEXT: .LBB0_3: # %for.end -; CHECK-NEXT: movl $0, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: movq load_p@GOTPCREL(%rip), %rax -; CHECK-NEXT: movq load_data@GOTPCREL(%rip), %rcx -; CHECK-NEXT: .p2align 4 -; CHECK-NEXT: .LBB0_4: # %for.cond1 -; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: #APP -; CHECK-NEXT: #NO_APP -; CHECK-NEXT: movq (%rax), %rdx -; CHECK-NEXT: movslq -{{[0-9]+}}(%rsp), %rsi -; CHECK-NEXT: movzbl (%rdx,%rsi), %edx -; CHECK-NEXT: movb %dl, (%rcx) -; CHECK-NEXT: leal 1(%rsi), %edx -; CHECK-NEXT: movl %edx, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: jmp .LBB0_4 -entry: - %alloca = alloca i32, align 4 - store i32 0, ptr %alloca, align 4 - br label %for.cond - -for.cond: ; preds = %for.body, %entry - %load.from.alloca.0 = load i32, ptr %alloca, align 4 - %cmp = icmp slt i32 %load.from.alloca.0, 4 - br i1 %cmp, label %for.body, label %for.end - -for.body: ; preds = %for.cond - call void asm sideeffect "", "{ax},~{dirflag},~{fpsr},~{flags}"(i8 0) nounwind - %load.from.alloca.1 = load i32, ptr %alloca, align 4 - %inc = add nsw i32 %load.from.alloca.1, 1 - store i32 %inc, ptr %alloca, align 4 - br label %for.cond - -for.end: ; preds = %for.cond - store i32 0, ptr %alloca, align 4 - br label %for.cond1 - -for.cond1: ; preds = %for.cond1, %for.end - call void asm sideeffect "", "N{dx},~{dirflag},~{fpsr},~{flags}"(i32 poison) nounwind - %load.from.load_p = load ptr, ptr @load_p, align 8 - %regs = getelementptr inbounds { [4 x i8] }, ptr %load.from.load_p, i32 0, i32 0 - %load.from.alloca.2 = load i32, ptr %alloca, align 4 - %idxprom = sext i32 %load.from.alloca.2 to i64 - %arrayidx = getelementptr inbounds [4 x i8], ptr %regs, i64 0, i64 %idxprom - %load.with.gep.ptr = load i8, ptr %arrayidx, align 1 - store i8 %load.with.gep.ptr, ptr @load_data, align 1 - %load.from.alloca.3 = load i32, ptr %alloca, align 4 - %inc2 = add nsw i32 %load.from.alloca.3, 1 - store i32 %inc2, ptr %alloca, align 4 - br label %for.cond1 -} diff --git a/llvm/test/CodeGen/X86/pr78897.ll b/llvm/test/CodeGen/X86/pr78897.ll index 4613c2b..db77baa 100644 --- a/llvm/test/CodeGen/X86/pr78897.ll +++ b/llvm/test/CodeGen/X86/pr78897.ll @@ -22,7 +22,7 @@ define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind { ; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: pxor %xmm0, %xmm0 ; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 -; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [17,17,17,17,17,17,17,17,u,u,u,u,u,u,u,u] +; X86-SSE2-NEXT: movq {{.*#+}} xmm1 = [17,17,17,17,17,17,17,17,0,0,0,0,0,0,0,0] ; X86-SSE2-NEXT: pand %xmm0, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1] ; X86-SSE2-NEXT: movd %xmm2, %esi diff --git a/llvm/test/CodeGen/X86/sibcall.ll b/llvm/test/CodeGen/X86/sibcall.ll index 4a0a68e..2759a98 100644 --- a/llvm/test/CodeGen/X86/sibcall.ll +++ b/llvm/test/CodeGen/X86/sibcall.ll @@ -444,21 +444,11 @@ define dso_local void @t15(ptr noalias sret(%struct.foo) %agg.result) nounwind ; ; X64-LABEL: t15: ; X64: # %bb.0: -; X64-NEXT: pushq %rbx -; X64-NEXT: movq %rdi, %rbx -; X64-NEXT: callq f -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: popq %rbx -; X64-NEXT: retq +; X64-NEXT: jmp f # TAILCALL ; ; X32-LABEL: t15: ; X32: # %bb.0: -; X32-NEXT: pushq %rbx -; X32-NEXT: movq %rdi, %rbx -; X32-NEXT: callq f -; X32-NEXT: movl %ebx, %eax -; X32-NEXT: popq %rbx -; X32-NEXT: retq +; X32-NEXT: jmp f # TAILCALL tail call fastcc void @f(ptr noalias sret(%struct.foo) %agg.result) nounwind ret void } @@ -607,32 +597,15 @@ declare dso_local fastcc double @foo20(double) nounwind define fastcc void @t21_sret_to_sret(ptr noalias sret(%struct.foo) %agg.result) nounwind { ; X86-LABEL: t21_sret_to_sret: ; X86: # %bb.0: -; X86-NEXT: pushl %esi -; X86-NEXT: subl $8, %esp -; X86-NEXT: movl %ecx, %esi -; X86-NEXT: calll t21_f_sret -; X86-NEXT: movl %esi, %eax -; X86-NEXT: addl $8, %esp -; X86-NEXT: popl %esi -; X86-NEXT: retl +; X86-NEXT: jmp t21_f_sret # TAILCALL ; ; X64-LABEL: t21_sret_to_sret: ; X64: # %bb.0: -; X64-NEXT: pushq %rbx -; X64-NEXT: movq %rdi, %rbx -; X64-NEXT: callq t21_f_sret -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: popq %rbx -; X64-NEXT: retq +; X64-NEXT: jmp t21_f_sret # TAILCALL ; ; X32-LABEL: t21_sret_to_sret: ; X32: # %bb.0: -; X32-NEXT: pushq %rbx -; X32-NEXT: movq %rdi, %rbx -; X32-NEXT: callq t21_f_sret -; X32-NEXT: movl %ebx, %eax -; X32-NEXT: popq %rbx -; X32-NEXT: retq +; X32-NEXT: jmp t21_f_sret # TAILCALL tail call fastcc void @t21_f_sret(ptr noalias sret(%struct.foo) %agg.result) nounwind ret void } @@ -640,34 +613,15 @@ define fastcc void @t21_sret_to_sret(ptr noalias sret(%struct.foo) %agg.result) define fastcc void @t21_sret_to_sret_more_args(ptr noalias sret(%struct.foo) %agg.result, i32 %a, i32 %b) nounwind { ; X86-LABEL: t21_sret_to_sret_more_args: ; X86: # %bb.0: -; X86-NEXT: pushl %esi -; X86-NEXT: subl $8, %esp -; X86-NEXT: movl %ecx, %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl %eax, (%esp) -; X86-NEXT: calll f_sret@PLT -; X86-NEXT: movl %esi, %eax -; X86-NEXT: addl $8, %esp -; X86-NEXT: popl %esi -; X86-NEXT: retl +; X86-NEXT: jmp f_sret@PLT # TAILCALL ; ; X64-LABEL: t21_sret_to_sret_more_args: ; X64: # %bb.0: -; X64-NEXT: pushq %rbx -; X64-NEXT: movq %rdi, %rbx -; X64-NEXT: callq f_sret@PLT -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: popq %rbx -; X64-NEXT: retq +; X64-NEXT: jmp f_sret@PLT # TAILCALL ; ; X32-LABEL: t21_sret_to_sret_more_args: ; X32: # %bb.0: -; X32-NEXT: pushq %rbx -; X32-NEXT: movq %rdi, %rbx -; X32-NEXT: callq f_sret@PLT -; X32-NEXT: movl %ebx, %eax -; X32-NEXT: popq %rbx -; X32-NEXT: retq +; X32-NEXT: jmp f_sret@PLT # TAILCALL tail call fastcc void @f_sret(ptr noalias sret(%struct.foo) %agg.result, i32 %a, i32 %b) nounwind ret void } @@ -675,35 +629,18 @@ define fastcc void @t21_sret_to_sret_more_args(ptr noalias sret(%struct.foo) %ag define fastcc void @t21_sret_to_sret_second_arg_sret(ptr noalias %agg.result, ptr noalias sret(%struct.foo) %ret) nounwind { ; X86-LABEL: t21_sret_to_sret_second_arg_sret: ; X86: # %bb.0: -; X86-NEXT: pushl %esi -; X86-NEXT: subl $8, %esp -; X86-NEXT: movl %edx, %esi ; X86-NEXT: movl %edx, %ecx -; X86-NEXT: calll t21_f_sret -; X86-NEXT: movl %esi, %eax -; X86-NEXT: addl $8, %esp -; X86-NEXT: popl %esi -; X86-NEXT: retl +; X86-NEXT: jmp t21_f_sret # TAILCALL ; ; X64-LABEL: t21_sret_to_sret_second_arg_sret: ; X64: # %bb.0: -; X64-NEXT: pushq %rbx -; X64-NEXT: movq %rsi, %rbx ; X64-NEXT: movq %rsi, %rdi -; X64-NEXT: callq t21_f_sret -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: popq %rbx -; X64-NEXT: retq +; X64-NEXT: jmp t21_f_sret # TAILCALL ; ; X32-LABEL: t21_sret_to_sret_second_arg_sret: ; X32: # %bb.0: -; X32-NEXT: pushq %rbx -; X32-NEXT: movq %rsi, %rbx ; X32-NEXT: movq %rsi, %rdi -; X32-NEXT: callq t21_f_sret -; X32-NEXT: movl %ebx, %eax -; X32-NEXT: popq %rbx -; X32-NEXT: retq +; X32-NEXT: jmp t21_f_sret # TAILCALL tail call fastcc void @t21_f_sret(ptr noalias sret(%struct.foo) %ret) nounwind ret void } @@ -725,27 +662,17 @@ define fastcc void @t21_sret_to_sret_more_args2(ptr noalias sret(%struct.foo) %a ; ; X64-LABEL: t21_sret_to_sret_more_args2: ; X64: # %bb.0: -; X64-NEXT: pushq %rbx ; X64-NEXT: movl %esi, %eax -; X64-NEXT: movq %rdi, %rbx ; X64-NEXT: movl %edx, %esi ; X64-NEXT: movl %eax, %edx -; X64-NEXT: callq f_sret@PLT -; X64-NEXT: movq %rbx, %rax -; X64-NEXT: popq %rbx -; X64-NEXT: retq +; X64-NEXT: jmp f_sret@PLT # TAILCALL ; ; X32-LABEL: t21_sret_to_sret_more_args2: ; X32: # %bb.0: -; X32-NEXT: pushq %rbx ; X32-NEXT: movl %esi, %eax -; X32-NEXT: movq %rdi, %rbx ; X32-NEXT: movl %edx, %esi ; X32-NEXT: movl %eax, %edx -; X32-NEXT: callq f_sret@PLT -; X32-NEXT: movl %ebx, %eax -; X32-NEXT: popq %rbx -; X32-NEXT: retq +; X32-NEXT: jmp f_sret@PLT # TAILCALL tail call fastcc void @f_sret(ptr noalias sret(%struct.foo) %agg.result, i32 %b, i32 %a) nounwind ret void } @@ -977,6 +904,176 @@ define ccc void @t22_non_sret_to_sret(ptr %agg.result) nounwind { ret void } +; Not tailcallable, caller and callee have different return types. +define void @t23_sret_to_non_sret(ptr noalias sret(%struct.foo) align 4 %agg.result, ptr %arg) { +; X86-LABEL: t23_sret_to_non_sret: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: subl $8, %esp +; X86-NEXT: .cfi_def_cfa_offset 16 +; X86-NEXT: .cfi_offset %esi, -8 +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, (%esp) +; X86-NEXT: calll callee_1@PLT +; X86-NEXT: movl %esi, %eax +; X86-NEXT: addl $8, %esp +; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl $4 +; +; X64-LABEL: t23_sret_to_non_sret: +; X64: # %bb.0: +; X64-NEXT: pushq %rbx +; X64-NEXT: .cfi_def_cfa_offset 16 +; X64-NEXT: .cfi_offset %rbx, -16 +; X64-NEXT: movq %rdi, %rbx +; X64-NEXT: movq %rsi, %rdi +; X64-NEXT: callq callee_1@PLT +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: popq %rbx +; X64-NEXT: .cfi_def_cfa_offset 8 +; X64-NEXT: retq +; +; X32-LABEL: t23_sret_to_non_sret: +; X32: # %bb.0: +; X32-NEXT: pushq %rbx +; X32-NEXT: .cfi_def_cfa_offset 16 +; X32-NEXT: .cfi_offset %rbx, -16 +; X32-NEXT: movq %rdi, %rbx +; X32-NEXT: movq %rsi, %rdi +; X32-NEXT: callq callee_1@PLT +; X32-NEXT: movl %ebx, %eax +; X32-NEXT: popq %rbx +; X32-NEXT: .cfi_def_cfa_offset 8 +; X32-NEXT: retq + tail call void @callee_1(ptr %arg) + ret void +} + +; Not tailcallable, caller and callee have the same return type, but different return values. +define void @t24_sret_to_sret_different_val(ptr noalias sret(%struct.foo) align 4 %agg.result, ptr %arg) { +; X86-LABEL: t24_sret_to_sret_different_val: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: subl $24, %esp +; X86-NEXT: .cfi_def_cfa_offset 32 +; X86-NEXT: .cfi_offset %esi, -8 +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: xorps %xmm0, %xmm0 +; X86-NEXT: movsd %xmm0, 8(%esi) +; X86-NEXT: movsd %xmm0, (%esi) +; X86-NEXT: leal {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, (%esp) +; X86-NEXT: calll callee_2@PLT +; X86-NEXT: subl $4, %esp +; X86-NEXT: movl %esi, %eax +; X86-NEXT: addl $24, %esp +; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl $4 +; +; X64-LABEL: t24_sret_to_sret_different_val: +; X64: # %bb.0: +; X64-NEXT: pushq %rbx +; X64-NEXT: .cfi_def_cfa_offset 16 +; X64-NEXT: subq $16, %rsp +; X64-NEXT: .cfi_def_cfa_offset 32 +; X64-NEXT: .cfi_offset %rbx, -16 +; X64-NEXT: movq %rdi, %rbx +; X64-NEXT: movq $0, 8(%rdi) +; X64-NEXT: movq $0, (%rdi) +; X64-NEXT: movq %rsp, %rdi +; X64-NEXT: callq callee_2@PLT +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: addq $16, %rsp +; X64-NEXT: .cfi_def_cfa_offset 16 +; X64-NEXT: popq %rbx +; X64-NEXT: .cfi_def_cfa_offset 8 +; X64-NEXT: retq +; +; X32-LABEL: t24_sret_to_sret_different_val: +; X32: # %bb.0: +; X32-NEXT: pushq %rbx +; X32-NEXT: .cfi_def_cfa_offset 16 +; X32-NEXT: subl $16, %esp +; X32-NEXT: .cfi_def_cfa_offset 32 +; X32-NEXT: .cfi_offset %rbx, -16 +; X32-NEXT: movq %rdi, %rbx +; X32-NEXT: movq $0, 8(%ebx) +; X32-NEXT: movq $0, (%ebx) +; X32-NEXT: movl %esp, %edi +; X32-NEXT: callq callee_2@PLT +; X32-NEXT: movl %ebx, %eax +; X32-NEXT: addl $16, %esp +; X32-NEXT: .cfi_def_cfa_offset 16 +; X32-NEXT: popq %rbx +; X32-NEXT: .cfi_def_cfa_offset 8 +; X32-NEXT: retq + %callee.return = alloca %struct.foo, align 4 + tail call void @llvm.memset.p0.i64(ptr align 4 %agg.result, i8 0, i64 16, i1 false) + tail call void @callee_2(ptr sret(%struct.foo) align 4 %callee.return) + ret void +} + +; Not tailcallable, caller and callee have the same return type, but different return values. +define void @t25_sret_to_sret_different_val(ptr noalias sret(%struct.foo) align 8 %agg.result, ptr %arg) { +; X86-LABEL: t25_sret_to_sret_different_val: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: subl $8, %esp +; X86-NEXT: .cfi_def_cfa_offset 16 +; X86-NEXT: .cfi_offset %esi, -8 +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, (%esp) +; X86-NEXT: calll callee_2@PLT +; X86-NEXT: subl $4, %esp +; X86-NEXT: movl %esi, %eax +; X86-NEXT: addl $8, %esp +; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl $4 +; +; X64-LABEL: t25_sret_to_sret_different_val: +; X64: # %bb.0: +; X64-NEXT: pushq %rbx +; X64-NEXT: .cfi_def_cfa_offset 16 +; X64-NEXT: .cfi_offset %rbx, -16 +; X64-NEXT: movq %rdi, %rbx +; X64-NEXT: movq %rsi, %rdi +; X64-NEXT: callq callee_2@PLT +; X64-NEXT: movq %rbx, %rax +; X64-NEXT: popq %rbx +; X64-NEXT: .cfi_def_cfa_offset 8 +; X64-NEXT: retq +; +; X32-LABEL: t25_sret_to_sret_different_val: +; X32: # %bb.0: +; X32-NEXT: pushq %rbx +; X32-NEXT: .cfi_def_cfa_offset 16 +; X32-NEXT: .cfi_offset %rbx, -16 +; X32-NEXT: movq %rdi, %rbx +; X32-NEXT: movq %rsi, %rdi +; X32-NEXT: callq callee_2@PLT +; X32-NEXT: movl %ebx, %eax +; X32-NEXT: popq %rbx +; X32-NEXT: .cfi_def_cfa_offset 8 +; X32-NEXT: retq + tail call void @callee_2(ptr sret(%struct.foo) align 8 %arg) + ret void +} + +declare void @llvm.memset.p0.i64(ptr, i8, i64, i1) +declare void @callee_1(ptr) +declare void @callee_2(ptr noalias sret(%struct.foo)) + declare dso_local fastcc void @t21_f_sret(ptr noalias sret(%struct.foo)) nounwind declare dso_local fastcc void @t21_f_sret2(ptr noalias sret(%struct.foo), ptr noalias) nounwind declare dso_local fastcc void @t21_f_non_sret(ptr) nounwind diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath.ll b/llvm/test/CodeGen/X86/sqrt-fastmath.ll index 5cd604c..a260b32 100644 --- a/llvm/test/CodeGen/X86/sqrt-fastmath.ll +++ b/llvm/test/CodeGen/X86/sqrt-fastmath.ll @@ -410,34 +410,34 @@ define <4 x float> @v4f32_estimate(<4 x float> %x) #1 { define <4 x float> @v4f32_estimate2(<4 x float> %x) #5 { ; SSE-LABEL: v4f32_estimate2: ; SSE: # %bb.0: -; SSE-NEXT: rsqrtps %xmm0, %xmm2 -; SSE-NEXT: mulps %xmm0, %xmm2 -; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38] -; SSE-NEXT: cmpleps %xmm0, %xmm1 -; SSE-NEXT: andps %xmm2, %xmm1 -; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: movaps {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN] +; SSE-NEXT: andps %xmm0, %xmm1 +; SSE-NEXT: movaps {{.*#+}} xmm2 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38] +; SSE-NEXT: cmpleps %xmm1, %xmm2 +; SSE-NEXT: rsqrtps %xmm0, %xmm1 +; SSE-NEXT: mulps %xmm1, %xmm0 +; SSE-NEXT: andps %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX1-LABEL: v4f32_estimate2: ; AVX1: # %bb.0: -; AVX1-NEXT: vrsqrtps %xmm0, %xmm1 -; AVX1-NEXT: vmulps %xmm1, %xmm0, %xmm1 -; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38] -; AVX1-NEXT: vcmpleps %xmm0, %xmm2, %xmm0 -; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vcmpleps %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vrsqrtps %xmm0, %xmm2 +; AVX1-NEXT: vmulps %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: retq ; ; AVX512-LABEL: v4f32_estimate2: ; AVX512: # %bb.0: -; AVX512-NEXT: vrsqrtps %xmm0, %xmm1 -; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm1 -; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN] -; AVX512-NEXT: vandps %xmm2, %xmm0, %xmm0 +; AVX512-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN] +; AVX512-NEXT: vandps %xmm1, %xmm0, %xmm1 ; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38] -; AVX512-NEXT: vcmpleps %xmm0, %xmm2, %xmm0 -; AVX512-NEXT: vandps %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vcmpleps %xmm1, %xmm2, %xmm1 +; AVX512-NEXT: vrsqrtps %xmm0, %xmm2 +; AVX512-NEXT: vmulps %xmm2, %xmm0, %xmm0 +; AVX512-NEXT: vandps %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: retq %sqrt = tail call fast <4 x float> @llvm.sqrt.v4f32(<4 x float> %x) ret <4 x float> %sqrt diff --git a/llvm/test/CodeGen/X86/subreg-fail.mir b/llvm/test/CodeGen/X86/subreg-fail.mir index dc69071..c8146f0 100644 --- a/llvm/test/CodeGen/X86/subreg-fail.mir +++ b/llvm/test/CodeGen/X86/subreg-fail.mir @@ -14,8 +14,8 @@ tracksRegLiveness: true body: | bb.0: ; CHECK-LABEL: name: test1 - ; CHECK: undef [[MOV32rm:%[0-9]+]].sub_32bit:gr64_nosp = MOV32rm undef %1:gr64, 1, $noreg, 0, $noreg, implicit-def [[MOV32rm]] :: (volatile load (s32) from `ptr undef`) - ; CHECK-NEXT: undef [[MOV32rm1:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32rm undef %4:gr64, 1, $noreg, 0, $noreg, implicit-def [[MOV32rm1]] :: (volatile load (s32) from `ptr undef`) + ; CHECK: undef [[MOV32rm:%[0-9]+]].sub_32bit:gr64_nosp = MOV32rm undef %1:gr64, 1, $noreg, 0, $noreg :: (volatile load (s32) from `ptr undef`) + ; CHECK-NEXT: undef [[MOV32rm1:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32rm undef %4:gr64, 1, $noreg, 0, $noreg :: (volatile load (s32) from `ptr undef`) ; CHECK-NEXT: [[MOV32rm1:%[0-9]+]]:gr64_with_sub_8bit = SHL64ri [[MOV32rm1]], 32, implicit-def dead $eflags ; CHECK-NEXT: [[LEA64r:%[0-9]+]]:gr64_with_sub_8bit = LEA64r [[MOV32rm1]], 1, [[MOV32rm]], 256, $noreg ; CHECK-NEXT: [[LEA64r:%[0-9]+]]:gr64_with_sub_8bit = SHR64ri [[LEA64r]], 8, implicit-def dead $eflags diff --git a/llvm/test/CodeGen/X86/subreg-to-reg-coalescing.mir b/llvm/test/CodeGen/X86/subreg-to-reg-coalescing.mir deleted file mode 100644 index e4fb812..0000000 --- a/llvm/test/CodeGen/X86/subreg-to-reg-coalescing.mir +++ /dev/null @@ -1,451 +0,0 @@ -# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2 -# RUN: llc -mtriple=x86_64-- -run-pass=register-coalescer -o - %s | FileCheck %s --match-full-lines - -# We cannot lose the liveness of the high subregister of %1 when -# coalesced with %0, so introduce an implicit-def of the super -# register on the MOV. - ---- -name: coalesce_mov32r0_into_subreg_to_reg64 -frameInfo: - adjustsStack: true -tracksRegLiveness: true -body: | - bb.0: - ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64 - ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_]] - ; CHECK-NEXT: dead $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi - ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: RET 0 - ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - %0:gr32 = MOV32r0 implicit-def dead $eflags - %1:gr64 = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit - $rdi = COPY %1 - CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - RET 0 - -... - ---- -name: subreg_to_reg_folds_to_undef -frameInfo: - adjustsStack: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $rax - - ; CHECK-LABEL: name: subreg_to_reg_folds_to_undef - ; CHECK: liveins: $rax - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY $rax - ; CHECK-NEXT: undef [[MOV32rr:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32rr [[COPY]].sub_32bit, implicit-def [[MOV32rr]] - ; CHECK-NEXT: RET 0, implicit [[MOV32rr]] - %0:gr64 = COPY killed $rax - %1:gr32 = COPY killed %0.sub_32bit - %2:gr32 = MOV32rr killed %1 - %3:gr64 = SUBREG_TO_REG 0, killed %2, %subreg.sub_32bit - %4:gr64 = COPY killed %3 - RET 0, implicit %4 - -... - ---- -name: coalesce_mov32r0_subreg_def_into_subreg_to_reg64 -frameInfo: - adjustsStack: true -tracksRegLiveness: true -body: | - bb.0: - ; CHECK-LABEL: name: coalesce_mov32r0_subreg_def_into_subreg_to_reg64 - ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_]] - ; CHECK-NEXT: dead $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi - ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: RET 0 - ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - undef %0.sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags - %1:gr64 = SUBREG_TO_REG 0, killed %0.sub_32bit, %subreg.sub_32bit - $rdi = COPY %1 - CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - RET 0 - -... - ---- -name: coalesce_mov32r0_into_subreg_def_with_super_def_to_reg64 -frameInfo: - adjustsStack: true -tracksRegLiveness: true -body: | - bb.0: - ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_def_with_super_def_to_reg64 - ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_]], implicit-def [[MOV32r0_]] - ; CHECK-NEXT: dead $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi, implicit-def $rdi - ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: RET 0 - ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - undef %0.sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def %0 - %1:gr64 = SUBREG_TO_REG 0, killed %0.sub_32bit, %subreg.sub_32bit - $rdi = COPY %1 - CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - RET 0 - -... - ---- -name: coalesce_mov32r0_into_subreg_to_reg64_already_defs_other_subreg -frameInfo: - adjustsStack: true -tracksRegLiveness: true -body: | - bb.0: - ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_already_defs_other_subreg - ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def undef [[MOV32r0_]].sub_8bit, implicit-def [[MOV32r0_]] - ; CHECK-NEXT: INLINEASM &"", 0 /* attdialect */, implicit [[MOV32r0_]] - ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit undef $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: RET 0 - ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - %0:gr32 = MOV32r0 implicit-def dead $eflags, implicit-def undef %0.sub_8bit - %1:gr64 = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit - INLINEASM &"", 0, implicit %1 - CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit undef $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - RET 0 - -... - - -# Reduced realistic case which was asserting after introducing new implicit-defs ---- -name: coalesce_needs_implicit_defs -frameInfo: - adjustsStack: true -tracksRegLiveness: true -body: | - ; CHECK-LABEL: name: coalesce_needs_implicit_defs - ; CHECK: bb.0: - ; CHECK-NEXT: successors: %bb.1(0x80000000) - ; CHECK-NEXT: liveins: $rdi - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rdi - ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_]], implicit-def [[MOV32r0_]] - ; CHECK-NEXT: undef [[MOV32r0_1:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def [[MOV32r0_1]], implicit-def [[MOV32r0_1]] - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: bb.1: - ; CHECK-NEXT: successors: %bb.1(0x80000000) - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef [[MOV32r0_2:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags - ; CHECK-NEXT: TEST64rr [[MOV32r0_1]], [[MOV32r0_1]], implicit-def $eflags - ; CHECK-NEXT: [[MOV32r0_2:%[0-9]+]].sub_8bit:gr64_with_sub_8bit = SETCCr 4, implicit killed $eflags - ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: dead $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi - ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: [[MOV32r0_2:%[0-9]+]]:gr64_with_sub_8bit = SHL64ri [[MOV32r0_2]], 4, implicit-def dead $eflags - ; CHECK-NEXT: [[MOV32r0_2:%[0-9]+]]:gr64_with_sub_8bit = ADD64rr [[MOV32r0_2]], [[COPY]], implicit-def dead $eflags - ; CHECK-NEXT: [[MOV32r0_1:%[0-9]+]]:gr64_with_sub_8bit = COPY [[MOV32r0_2]] - ; CHECK-NEXT: JMP_1 %bb.1 - bb.0: - liveins: $rdi - - %0:gr64 = COPY killed $rdi - %1:gr32 = MOV32r0 implicit-def dead $eflags - %2:gr64 = SUBREG_TO_REG 0, %1, %subreg.sub_32bit - %3:gr64 = COPY killed %2 - - bb.1: - %4:gr64 = COPY killed %3 - %5:gr32 = MOV32r0 implicit-def dead $eflags - TEST64rr killed %4, %4, implicit-def $eflags - %6:gr8 = SETCCr 4, implicit killed $eflags - %7:gr32 = COPY killed %5 - %7.sub_8bit:gr32 = COPY killed %6 - %8:gr64 = SUBREG_TO_REG 0, killed %7, %subreg.sub_32bit - ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - %9:gr64 = SUBREG_TO_REG 0, %1, %subreg.sub_32bit - $rdi = COPY %9 - CALL64r killed %9, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - %10:gr64 = COPY killed %8 - %10:gr64 = SHL64ri %10, 4, implicit-def dead $eflags - %11:gr64 = COPY killed %10 - %11:gr64 = ADD64rr %11, %0, implicit-def dead $eflags - %3:gr64 = COPY killed %11 - JMP_1 %bb.1 - -... - -# Make sure to add the 'undef' flag to the result register %2, -# because the top 32bits are not defined. ---- -name: coalesce_add_implicitdef_and_undef -frameInfo: - adjustsStack: true -tracksRegLiveness: true -body: | - ; CHECK-LABEL: name: coalesce_add_implicitdef_and_undef - ; CHECK: bb.0: - ; CHECK-NEXT: successors: %bb.1(0x80000000) - ; CHECK-NEXT: liveins: $eflags, $edx - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = COPY $edx - ; CHECK-NEXT: JMP_1 %bb.1 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: bb.1: - ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = ADD32ri [[COPY]].sub_32bit, -34, implicit-def $eflags, implicit-def [[COPY]] - ; CHECK-NEXT: FAKE_USE [[COPY]] - ; CHECK-NEXT: RET 0 - bb.0: - liveins: $eflags, $edx - %0:gr32 = COPY $edx - JMP_1 %bb.1 - - bb.1: - %1:gr32 = COPY %0 - %1:gr32 = ADD32ri %1, -34, implicit-def $eflags - %2:gr64_with_sub_8bit = SUBREG_TO_REG 0, killed %1, %subreg.sub_32bit - FAKE_USE %2 - RET 0 -... - -# We can't mark the destination register as 'undef' or add implicit-def -# because the top 24 bits of %0:gr32 are retained by the SUBREG_TO_REG. -# -# For example, if this were to result in: -# -# undef %2.sub_32bit:gr64_with_sub_8bit = COPY $edx -# %1:gr8 = SETCCr 4, implicit $eflags -# JMP_1 %bb.1 -# -# bb.1: -# undef %2.sub_8bit:gr64_with_sub_8bit = COPY %1, implicit-def %2 -# -# Then this says that the top 56 bits of %2 are undef. That's not correct -# because only the top 32 bits are undef. ---- -name: coalesce_dont_add_implicitdef_or_undef -frameInfo: - adjustsStack: true -tracksRegLiveness: true -body: | - ; CHECK-LABEL: name: coalesce_dont_add_implicitdef_or_undef - ; CHECK: bb.0: - ; CHECK-NEXT: successors: %bb.1(0x80000000) - ; CHECK-NEXT: liveins: $eflags, $edx - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = COPY $edx - ; CHECK-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags - ; CHECK-NEXT: JMP_1 %bb.1 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: bb.1: - ; CHECK-NEXT: [[COPY:%[0-9]+]].sub_8bit:gr64_with_sub_8bit = COPY [[SETCCr]] - ; CHECK-NEXT: FAKE_USE [[COPY]] - ; CHECK-NEXT: RET 0 - bb.0: - liveins: $eflags, $edx - %0:gr32 = COPY $edx - %1:gr8 = SETCCr 4, implicit killed $eflags - JMP_1 %bb.1 - - bb.1: - %0.sub_8bit:gr32 = COPY %1 - %2:gr64_with_sub_8bit = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit - FAKE_USE %2 - RET 0 -... - ---- -name: coalesce_mov32r0_into_subreg_to_reg64_physreg_def -frameInfo: - adjustsStack: true -tracksRegLiveness: true -body: | - bb.0: - ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_physreg_def - ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: dead $edi = MOV32r0 implicit-def dead $eflags, implicit-def $rdi - ; CHECK-NEXT: CALL64r killed $rdi, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: RET 0 - ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - %0:gr32 = MOV32r0 implicit-def dead $eflags - $rdi = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit - CALL64r killed $rdi, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - RET 0 - -... - ---- -name: coalesce_mov32r0_into_subreg_to_reg64_physreg_use -frameInfo: - adjustsStack: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $eax - ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_physreg_use - ; CHECK: liveins: $eax - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: $eax = MOV32r0 implicit-def dead $eflags - ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, $eax, %subreg.sub_32bit - ; CHECK-NEXT: $rdi = COPY [[SUBREG_TO_REG]] - ; CHECK-NEXT: CALL64r [[SUBREG_TO_REG]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: RET 0 - ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - $eax = MOV32r0 implicit-def dead $eflags - %1:gr64 = SUBREG_TO_REG 0, killed $eax, %subreg.sub_32bit - $rdi = COPY %1 - CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - RET 0 - -... - -# Coalesced instruction is a copy with other implicit operands ---- -name: coalesce_copy_into_subreg_to_reg64 -frameInfo: - adjustsStack: true -tracksRegLiveness: true -body: | - bb.0: - liveins: $eax - ; CHECK-LABEL: name: coalesce_copy_into_subreg_to_reg64 - ; CHECK: liveins: $eax - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = COPY $eax, implicit-def dead $eflags, implicit-def [[COPY]] - ; CHECK-NEXT: $rdi = COPY [[COPY]] - ; CHECK-NEXT: CALL64r [[COPY]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: RET 0 - ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - %0:gr32 = COPY $eax, implicit-def dead $eflags - %1:gr64 = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit - $rdi = COPY %1 - CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - RET 0 - -... - ---- -name: coalesce_mov32r0_into_subreg_to_reg64_multiple_redef_value -frameInfo: - adjustsStack: true -tracksRegLiveness: true -body: | - bb.0: - ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_multiple_redef_value - ; CHECK: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: undef [[MOV32r0_:%[0-9]+]].sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags - ; CHECK-NEXT: INLINEASM &"", 0 /* attdialect */, implicit-def undef [[MOV32r0_]].sub_32bit, implicit [[MOV32r0_]].sub_32bit, implicit-def [[MOV32r0_]] - ; CHECK-NEXT: $rdi = COPY [[MOV32r0_]] - ; CHECK-NEXT: CALL64r [[MOV32r0_]], csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: RET 0 - ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - %0:gr32 = MOV32r0 implicit-def dead $eflags - INLINEASM &"", 0, implicit-def %0, implicit %0 - %1:gr64 = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit - $rdi = COPY %1 - CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - RET 0 - -... - ---- -name: coalesce_mov32r0_into_subreg_to_reg64_def_is_block_liveout -frameInfo: - adjustsStack: true -tracksRegLiveness: true -body: | - ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_def_is_block_liveout - ; CHECK: bb.0: - ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: INLINEASM &"", 0 /* attdialect */, implicit-def undef %1.sub_32bit, implicit-def %1 - ; CHECK-NEXT: JCC_1 %bb.1, 4, implicit undef $eflags - ; CHECK-NEXT: JMP_1 %bb.2 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: bb.1: - ; CHECK-NEXT: $rdi = COPY %1 - ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: CALL64r %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: RET 0 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: bb.2: - bb.0: - INLINEASM &"", 0, implicit-def %0:gr32 - JCC_1 %bb.1, 4, implicit undef $eflags - JMP_1 %bb.2 - - bb.1: - %1:gr64 = SUBREG_TO_REG 0, killed %0, %subreg.sub_32bit - $rdi = COPY %1 - ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - CALL64r killed %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - RET 0 - - bb.2: - -... - ---- -name: coalesce_mov32r0_into_subreg_to_reg64_def_is_phi_def -frameInfo: - adjustsStack: true -tracksRegLiveness: true -body: | - ; CHECK-LABEL: name: coalesce_mov32r0_into_subreg_to_reg64_def_is_phi_def - ; CHECK: bb.0: - ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: INLINEASM &"", 0 /* attdialect */, implicit-def undef %1.sub_32bit, implicit-def %1 - ; CHECK-NEXT: JCC_1 %bb.1, 4, implicit undef $eflags - ; CHECK-NEXT: JMP_1 %bb.2 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: bb.1: - ; CHECK-NEXT: successors: %bb.1(0x80000000) - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $rdi = COPY %1 - ; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: CALL64r %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - ; CHECK-NEXT: JMP_1 %bb.1 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: bb.2: - bb.0: - - INLINEASM &"", 0, implicit-def %0:gr32 - JCC_1 %bb.1, 4, implicit undef $eflags - JMP_1 %bb.2 - - bb.1: - %1:gr64 = SUBREG_TO_REG 0, %0, %subreg.sub_32bit - $rdi = COPY %1 - ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - CALL64r %1, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax - ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp - JMP_1 %bb.1 - - bb.2: - -... diff --git a/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll b/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll index 5c5f704..6b07891 100644 --- a/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll +++ b/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll @@ -62,10 +62,11 @@ entry: define i32 @simplify_demanded_bits_drop_flag(i1 zeroext %x, i1 zeroext %y) nounwind { ; CHECK-LABEL: simplify_demanded_bits_drop_flag: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: negl %edi +; CHECK-NEXT: # kill: def $esi killed $esi def $rsi ; CHECK-NEXT: shll $2, %esi -; CHECK-NEXT: xorl %edi, %esi -; CHECK-NEXT: movslq %esi, %rax +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: negq %rax +; CHECK-NEXT: xorq %rsi, %rax ; CHECK-NEXT: imulq $-1634202141, %rax, %rax # imm = 0x9E980DE3 ; CHECK-NEXT: movq %rax, %rcx ; CHECK-NEXT: shrq $63, %rcx diff --git a/llvm/test/CodeGen/X86/ushl_sat_vec.ll b/llvm/test/CodeGen/X86/ushl_sat_vec.ll index ebb5e13..b8e83da 100644 --- a/llvm/test/CodeGen/X86/ushl_sat_vec.ll +++ b/llvm/test/CodeGen/X86/ushl_sat_vec.ll @@ -281,7 +281,7 @@ define <8 x i16> @vec_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; X64-AVX2-NEXT: vpsllvd %ymm1, %ymm2, %ymm2 -; X64-AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u] +; X64-AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; X64-AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3] ; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero ; X64-AVX2-NEXT: vpsrlvd %ymm1, %ymm3, %ymm1 diff --git a/llvm/test/CodeGen/X86/var-permute-128.ll b/llvm/test/CodeGen/X86/var-permute-128.ll index 7f4111e..6174011 100644 --- a/llvm/test/CodeGen/X86/var-permute-128.ll +++ b/llvm/test/CodeGen/X86/var-permute-128.ll @@ -501,39 +501,39 @@ define <8 x i16> @var_shuffle_zero_v8i16(<8 x i16> %v, <8 x i16> %indices) nounw ; SSE3-NEXT: pextrw $0, %xmm1, %eax ; SSE3-NEXT: pextrw $1, %xmm1, %ecx ; SSE3-NEXT: pextrw $2, %xmm1, %edx -; SSE3-NEXT: pextrw $3, %xmm1, %esi -; SSE3-NEXT: pextrw $4, %xmm1, %edi -; SSE3-NEXT: pextrw $5, %xmm1, %r8d -; SSE3-NEXT: pextrw $6, %xmm1, %r9d -; SSE3-NEXT: pextrw $7, %xmm1, %r10d +; SSE3-NEXT: pextrw $3, %xmm1, %edi +; SSE3-NEXT: pextrw $4, %xmm1, %r8d +; SSE3-NEXT: pextrw $5, %xmm1, %r9d +; SSE3-NEXT: pextrw $6, %xmm1, %r10d +; SSE3-NEXT: pextrw $7, %xmm1, %esi ; SSE3-NEXT: movdqa %xmm2, -24(%rsp) ; SSE3-NEXT: andl $7, %eax +; SSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax ; SSE3-NEXT: andl $7, %ecx +; SSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx ; SSE3-NEXT: andl $7, %edx -; SSE3-NEXT: andl $7, %esi +; SSE3-NEXT: movzwl -24(%rsp,%rdx,2), %edx ; SSE3-NEXT: andl $7, %edi +; SSE3-NEXT: movzwl -24(%rsp,%rdi,2), %edi ; SSE3-NEXT: andl $7, %r8d +; SSE3-NEXT: movzwl -24(%rsp,%r8,2), %r8d ; SSE3-NEXT: andl $7, %r9d +; SSE3-NEXT: movzwl -24(%rsp,%r9,2), %r9d ; SSE3-NEXT: andl $7, %r10d ; SSE3-NEXT: movzwl -24(%rsp,%r10,2), %r10d -; SSE3-NEXT: movd %r10d, %xmm1 -; SSE3-NEXT: movzwl -24(%rsp,%r9,2), %r9d -; SSE3-NEXT: movd %r9d, %xmm2 +; SSE3-NEXT: andl $7, %esi +; SSE3-NEXT: movzwl -24(%rsp,%rsi,2), %esi +; SSE3-NEXT: movd %esi, %xmm1 +; SSE3-NEXT: movd %r10d, %xmm2 ; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE3-NEXT: movzwl -24(%rsp,%r8,2), %r8d -; SSE3-NEXT: movd %r8d, %xmm1 -; SSE3-NEXT: movzwl -24(%rsp,%rdi,2), %edi -; SSE3-NEXT: movd %edi, %xmm3 +; SSE3-NEXT: movd %r9d, %xmm1 +; SSE3-NEXT: movd %r8d, %xmm3 ; SSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] ; SSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; SSE3-NEXT: movzwl -24(%rsp,%rsi,2), %esi -; SSE3-NEXT: movd %esi, %xmm1 -; SSE3-NEXT: movzwl -24(%rsp,%rdx,2), %edx +; SSE3-NEXT: movd %edi, %xmm1 ; SSE3-NEXT: movd %edx, %xmm2 ; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx ; SSE3-NEXT: movd %ecx, %xmm1 -; SSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax ; SSE3-NEXT: movd %eax, %xmm4 ; SSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3] ; SSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] @@ -1053,8 +1053,9 @@ define <2 x double> @var_shuffle_zero_v2f64(<2 x double> %v, <2 x i64> %indices) ; SSE3-NEXT: movq %xmm1, %rcx ; SSE3-NEXT: andl $1, %ecx ; SSE3-NEXT: movaps %xmm0, -24(%rsp) -; SSE3-NEXT: movsd -24(%rsp,%rax,8), %xmm0 # xmm0 = mem[0],zero -; SSE3-NEXT: movhps -24(%rsp,%rcx,8), %xmm0 # xmm0 = xmm0[0,1],mem[0,1] +; SSE3-NEXT: movq -24(%rsp,%rax,8), %xmm0 # xmm0 = mem[0],zero +; SSE3-NEXT: movq -24(%rsp,%rcx,8), %xmm1 # xmm1 = mem[0],zero +; SSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE3-NEXT: pandn %xmm0, %xmm2 ; SSE3-NEXT: movdqa %xmm2, %xmm0 ; SSE3-NEXT: retq @@ -1077,8 +1078,9 @@ define <2 x double> @var_shuffle_zero_v2f64(<2 x double> %v, <2 x i64> %indices) ; SSSE3-NEXT: movq %xmm1, %rcx ; SSSE3-NEXT: andl $1, %ecx ; SSSE3-NEXT: movaps %xmm0, -24(%rsp) -; SSSE3-NEXT: movsd -24(%rsp,%rax,8), %xmm0 # xmm0 = mem[0],zero -; SSSE3-NEXT: movhps -24(%rsp,%rcx,8), %xmm0 # xmm0 = xmm0[0,1],mem[0,1] +; SSSE3-NEXT: movq -24(%rsp,%rax,8), %xmm0 # xmm0 = mem[0],zero +; SSSE3-NEXT: movq -24(%rsp,%rcx,8), %xmm1 # xmm1 = mem[0],zero +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSSE3-NEXT: pandn %xmm0, %xmm2 ; SSSE3-NEXT: movdqa %xmm2, %xmm0 ; SSSE3-NEXT: retq @@ -1251,16 +1253,16 @@ define <4 x float> @var_shuffle_zero_v4f32(<4 x float> %v, <4 x i32> %indices) n ; SSE3-NEXT: movd %xmm1, %esi ; SSE3-NEXT: movaps %xmm2, -24(%rsp) ; SSE3-NEXT: andl $3, %eax +; SSE3-NEXT: movd -24(%rsp,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero ; SSE3-NEXT: andl $3, %ecx +; SSE3-NEXT: movd -24(%rsp,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero ; SSE3-NEXT: andl $3, %edx +; SSE3-NEXT: movd -24(%rsp,%rdx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero ; SSE3-NEXT: andl $3, %esi -; SSE3-NEXT: movd -24(%rsp,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero -; SSE3-NEXT: movd -24(%rsp,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero -; SSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE3-NEXT: movd -24(%rsp,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero -; SSE3-NEXT: movd -24(%rsp,%rcx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero -; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE3-NEXT: movd -24(%rsp,%rsi,4), %xmm4 # xmm4 = mem[0],zero,zero,zero +; SSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] ; SSE3-NEXT: pandn %xmm1, %xmm0 ; SSE3-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-bo-select.ll b/llvm/test/CodeGen/X86/vector-bo-select.ll index 11e7fe85..0e37e5a 100644 --- a/llvm/test/CodeGen/X86/vector-bo-select.ll +++ b/llvm/test/CodeGen/X86/vector-bo-select.ll @@ -468,29 +468,29 @@ define <16 x float> @fsub_v16f32_swap(<16 x i1> %b, <16 x float> noundef %x, <16 ; ; SSE42-LABEL: fsub_v16f32_swap: ; SSE42: # %bb.0: -; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,3,2,3] +; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[3,3,3,3] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm8 ; SSE42-NEXT: psrad $31, %xmm8 -; SSE42-NEXT: pandn %xmm7, %xmm8 +; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm8 +; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm9 +; SSE42-NEXT: psrad $31, %xmm9 +; SSE42-NEXT: pandn %xmm7, %xmm9 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm7 ; SSE42-NEXT: psrad $31, %xmm7 ; SSE42-NEXT: pandn %xmm6, %xmm7 -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm6 -; SSE42-NEXT: psrad $31, %xmm6 -; SSE42-NEXT: pandn %xmm5, %xmm6 -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm0 ; SSE42-NEXT: psrad $31, %xmm0 -; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm0 -; SSE42-NEXT: subps %xmm6, %xmm1 +; SSE42-NEXT: pandn %xmm5, %xmm0 +; SSE42-NEXT: subps %xmm0, %xmm1 ; SSE42-NEXT: subps %xmm7, %xmm2 -; SSE42-NEXT: subps %xmm8, %xmm3 -; SSE42-NEXT: subps %xmm0, %xmm4 +; SSE42-NEXT: subps %xmm9, %xmm3 +; SSE42-NEXT: subps %xmm8, %xmm4 ; SSE42-NEXT: movaps %xmm1, %xmm0 ; SSE42-NEXT: movaps %xmm2, %xmm1 ; SSE42-NEXT: movaps %xmm3, %xmm2 @@ -562,33 +562,32 @@ define <16 x float> @fsub_v16f32_commute_swap(<16 x i1> %b, <16 x float> noundef ; ; SSE42-LABEL: fsub_v16f32_commute_swap: ; SSE42: # %bb.0: -; SSE42-NEXT: movaps %xmm2, %xmm8 -; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3] -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm2 -; SSE42-NEXT: psrad $31, %xmm2 -; SSE42-NEXT: pandn %xmm7, %xmm2 +; SSE42-NEXT: movaps %xmm3, %xmm8 +; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm3 +; SSE42-NEXT: psrad $31, %xmm3 +; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm3 +; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm9 +; SSE42-NEXT: psrad $31, %xmm9 +; SSE42-NEXT: pandn %xmm7, %xmm9 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm7 ; SSE42-NEXT: psrad $31, %xmm7 ; SSE42-NEXT: pandn %xmm6, %xmm7 -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm6 -; SSE42-NEXT: psrad $31, %xmm6 -; SSE42-NEXT: pandn %xmm5, %xmm6 -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3] -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm5 -; SSE42-NEXT: psrad $31, %xmm5 -; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm5 -; SSE42-NEXT: subps %xmm1, %xmm6 -; SSE42-NEXT: subps %xmm8, %xmm7 -; SSE42-NEXT: subps %xmm3, %xmm2 -; SSE42-NEXT: subps %xmm4, %xmm5 -; SSE42-NEXT: movaps %xmm6, %xmm0 +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm0 +; SSE42-NEXT: psrad $31, %xmm0 +; SSE42-NEXT: pandn %xmm5, %xmm0 +; SSE42-NEXT: subps %xmm1, %xmm0 +; SSE42-NEXT: subps %xmm2, %xmm7 +; SSE42-NEXT: subps %xmm8, %xmm9 +; SSE42-NEXT: subps %xmm4, %xmm3 ; SSE42-NEXT: movaps %xmm7, %xmm1 -; SSE42-NEXT: movaps %xmm5, %xmm3 +; SSE42-NEXT: movaps %xmm9, %xmm2 ; SSE42-NEXT: retq ; ; AVX2-LABEL: fsub_v16f32_commute_swap: @@ -2407,29 +2406,29 @@ define <16 x i32> @sub_v16i32_swap(<16 x i1> %b, <16 x i32> noundef %x, <16 x i3 ; ; SSE42-LABEL: sub_v16i32_swap: ; SSE42: # %bb.0: -; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,3,2,3] +; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[3,3,3,3] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm8 ; SSE42-NEXT: psrad $31, %xmm8 -; SSE42-NEXT: pandn %xmm7, %xmm8 +; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm8 +; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm9 +; SSE42-NEXT: psrad $31, %xmm9 +; SSE42-NEXT: pandn %xmm7, %xmm9 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm7 ; SSE42-NEXT: psrad $31, %xmm7 ; SSE42-NEXT: pandn %xmm6, %xmm7 -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm6 -; SSE42-NEXT: psrad $31, %xmm6 -; SSE42-NEXT: pandn %xmm5, %xmm6 -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm0 ; SSE42-NEXT: psrad $31, %xmm0 -; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm0 -; SSE42-NEXT: psubd %xmm6, %xmm1 +; SSE42-NEXT: pandn %xmm5, %xmm0 +; SSE42-NEXT: psubd %xmm0, %xmm1 ; SSE42-NEXT: psubd %xmm7, %xmm2 -; SSE42-NEXT: psubd %xmm8, %xmm3 -; SSE42-NEXT: psubd %xmm0, %xmm4 +; SSE42-NEXT: psubd %xmm9, %xmm3 +; SSE42-NEXT: psubd %xmm8, %xmm4 ; SSE42-NEXT: movdqa %xmm1, %xmm0 ; SSE42-NEXT: movdqa %xmm2, %xmm1 ; SSE42-NEXT: movdqa %xmm3, %xmm2 @@ -2501,33 +2500,32 @@ define <16 x i32> @sub_v16i32_commute_swap(<16 x i1> %b, <16 x i32> noundef %x, ; ; SSE42-LABEL: sub_v16i32_commute_swap: ; SSE42: # %bb.0: -; SSE42-NEXT: movdqa %xmm2, %xmm8 -; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3] -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm2 -; SSE42-NEXT: psrad $31, %xmm2 -; SSE42-NEXT: pandn %xmm7, %xmm2 +; SSE42-NEXT: movdqa %xmm3, %xmm8 +; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm3 +; SSE42-NEXT: psrad $31, %xmm3 +; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm3 +; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm9 +; SSE42-NEXT: psrad $31, %xmm9 +; SSE42-NEXT: pandn %xmm7, %xmm9 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm7 ; SSE42-NEXT: psrad $31, %xmm7 ; SSE42-NEXT: pandn %xmm6, %xmm7 -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm6 -; SSE42-NEXT: psrad $31, %xmm6 -; SSE42-NEXT: pandn %xmm5, %xmm6 -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3] -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm5 -; SSE42-NEXT: psrad $31, %xmm5 -; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm5 -; SSE42-NEXT: psubd %xmm1, %xmm6 -; SSE42-NEXT: psubd %xmm8, %xmm7 -; SSE42-NEXT: psubd %xmm3, %xmm2 -; SSE42-NEXT: psubd %xmm4, %xmm5 -; SSE42-NEXT: movdqa %xmm6, %xmm0 +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm0 +; SSE42-NEXT: psrad $31, %xmm0 +; SSE42-NEXT: pandn %xmm5, %xmm0 +; SSE42-NEXT: psubd %xmm1, %xmm0 +; SSE42-NEXT: psubd %xmm2, %xmm7 +; SSE42-NEXT: psubd %xmm8, %xmm9 +; SSE42-NEXT: psubd %xmm4, %xmm3 ; SSE42-NEXT: movdqa %xmm7, %xmm1 -; SSE42-NEXT: movdqa %xmm5, %xmm3 +; SSE42-NEXT: movdqa %xmm9, %xmm2 ; SSE42-NEXT: retq ; ; AVX2-LABEL: sub_v16i32_commute_swap: @@ -3371,41 +3369,41 @@ define <16 x i32> @shl_v16i32_swap(<16 x i1> %b, <16 x i32> noundef %x, <16 x i3 ; ; SSE42-LABEL: shl_v16i32_swap: ; SSE42: # %bb.0: -; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,3,2,3] +; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[3,3,3,3] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm8 ; SSE42-NEXT: psrad $31, %xmm8 -; SSE42-NEXT: pandn %xmm7, %xmm8 +; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm8 +; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm9 +; SSE42-NEXT: psrad $31, %xmm9 +; SSE42-NEXT: pandn %xmm7, %xmm9 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm7 ; SSE42-NEXT: psrad $31, %xmm7 ; SSE42-NEXT: pandn %xmm6, %xmm7 -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm6 -; SSE42-NEXT: psrad $31, %xmm6 -; SSE42-NEXT: pandn %xmm5, %xmm6 -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3] -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm5 -; SSE42-NEXT: psrad $31, %xmm5 -; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm5 -; SSE42-NEXT: pslld $23, %xmm6 -; SSE42-NEXT: movdqa {{.*#+}} xmm9 = [1065353216,1065353216,1065353216,1065353216] -; SSE42-NEXT: paddd %xmm9, %xmm6 -; SSE42-NEXT: cvttps2dq %xmm6, %xmm0 +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm0 +; SSE42-NEXT: psrad $31, %xmm0 +; SSE42-NEXT: pandn %xmm5, %xmm0 +; SSE42-NEXT: pslld $23, %xmm0 +; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216] +; SSE42-NEXT: paddd %xmm5, %xmm0 +; SSE42-NEXT: cvttps2dq %xmm0, %xmm0 ; SSE42-NEXT: pmulld %xmm1, %xmm0 ; SSE42-NEXT: pslld $23, %xmm7 -; SSE42-NEXT: paddd %xmm9, %xmm7 +; SSE42-NEXT: paddd %xmm5, %xmm7 ; SSE42-NEXT: cvttps2dq %xmm7, %xmm1 ; SSE42-NEXT: pmulld %xmm2, %xmm1 -; SSE42-NEXT: pslld $23, %xmm8 -; SSE42-NEXT: paddd %xmm9, %xmm8 -; SSE42-NEXT: cvttps2dq %xmm8, %xmm2 +; SSE42-NEXT: pslld $23, %xmm9 +; SSE42-NEXT: paddd %xmm5, %xmm9 +; SSE42-NEXT: cvttps2dq %xmm9, %xmm2 ; SSE42-NEXT: pmulld %xmm3, %xmm2 -; SSE42-NEXT: pslld $23, %xmm5 -; SSE42-NEXT: paddd %xmm9, %xmm5 -; SSE42-NEXT: cvttps2dq %xmm5, %xmm3 +; SSE42-NEXT: pslld $23, %xmm8 +; SSE42-NEXT: paddd %xmm5, %xmm8 +; SSE42-NEXT: cvttps2dq %xmm8, %xmm3 ; SSE42-NEXT: pmulld %xmm4, %xmm3 ; SSE42-NEXT: retq ; @@ -3508,11 +3506,16 @@ define <16 x i32> @shl_v16i32_commute_swap(<16 x i1> %b, <16 x i32> noundef %x, ; ; SSE42-LABEL: shl_v16i32_commute_swap: ; SSE42: # %bb.0: -; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,3,2,3] +; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[3,3,3,3] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm8 ; SSE42-NEXT: psrad $31, %xmm8 -; SSE42-NEXT: pandn %xmm7, %xmm8 +; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm8 +; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm9 +; SSE42-NEXT: psrad $31, %xmm9 +; SSE42-NEXT: pandn %xmm7, %xmm9 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm7 @@ -3522,28 +3525,23 @@ define <16 x i32> @shl_v16i32_commute_swap(<16 x i1> %b, <16 x i32> noundef %x, ; SSE42-NEXT: pslld $31, %xmm6 ; SSE42-NEXT: psrad $31, %xmm6 ; SSE42-NEXT: pandn %xmm5, %xmm6 -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3] -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm5 -; SSE42-NEXT: psrad $31, %xmm5 -; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm5 ; SSE42-NEXT: pslld $23, %xmm1 -; SSE42-NEXT: movdqa {{.*#+}} xmm9 = [1065353216,1065353216,1065353216,1065353216] -; SSE42-NEXT: paddd %xmm9, %xmm1 +; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216] +; SSE42-NEXT: paddd %xmm5, %xmm1 ; SSE42-NEXT: cvttps2dq %xmm1, %xmm0 ; SSE42-NEXT: pmulld %xmm6, %xmm0 ; SSE42-NEXT: pslld $23, %xmm2 -; SSE42-NEXT: paddd %xmm9, %xmm2 +; SSE42-NEXT: paddd %xmm5, %xmm2 ; SSE42-NEXT: cvttps2dq %xmm2, %xmm1 ; SSE42-NEXT: pmulld %xmm7, %xmm1 ; SSE42-NEXT: pslld $23, %xmm3 -; SSE42-NEXT: paddd %xmm9, %xmm3 +; SSE42-NEXT: paddd %xmm5, %xmm3 ; SSE42-NEXT: cvttps2dq %xmm3, %xmm2 -; SSE42-NEXT: pmulld %xmm8, %xmm2 +; SSE42-NEXT: pmulld %xmm9, %xmm2 ; SSE42-NEXT: pslld $23, %xmm4 -; SSE42-NEXT: paddd %xmm9, %xmm4 +; SSE42-NEXT: paddd %xmm5, %xmm4 ; SSE42-NEXT: cvttps2dq %xmm4, %xmm3 -; SSE42-NEXT: pmulld %xmm5, %xmm3 +; SSE42-NEXT: pmulld %xmm8, %xmm3 ; SSE42-NEXT: retq ; ; AVX2-LABEL: shl_v16i32_commute_swap: @@ -4078,85 +4076,85 @@ define <16 x i32> @lshr_v16i32_swap(<16 x i1> %b, <16 x i32> noundef %x, <16 x i ; ; SSE42-LABEL: lshr_v16i32_swap: ; SSE42: # %bb.0: -; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,3,2,3] +; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[3,3,3,3] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm8 ; SSE42-NEXT: psrad $31, %xmm8 -; SSE42-NEXT: pandn %xmm7, %xmm8 +; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm8 +; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm9 +; SSE42-NEXT: psrad $31, %xmm9 +; SSE42-NEXT: pandn %xmm7, %xmm9 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm7 ; SSE42-NEXT: psrad $31, %xmm7 ; SSE42-NEXT: pandn %xmm6, %xmm7 -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm6 -; SSE42-NEXT: psrad $31, %xmm6 -; SSE42-NEXT: pandn %xmm5, %xmm6 -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3] -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm5 -; SSE42-NEXT: psrad $31, %xmm5 -; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm5 -; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm6[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm1, %xmm9 -; SSE42-NEXT: psrld %xmm0, %xmm9 -; SSE42-NEXT: pshufd {{.*#+}} xmm10 = xmm6[2,3,2,3] -; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[2,3,3,3,4,5,6,7] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm0 +; SSE42-NEXT: psrad $31, %xmm0 +; SSE42-NEXT: pandn %xmm5, %xmm0 +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm1, %xmm6 +; SSE42-NEXT: psrld %xmm5, %xmm6 +; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,2,3] +; SSE42-NEXT: pshuflw {{.*#+}} xmm10 = xmm5[2,3,3,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm1, %xmm11 -; SSE42-NEXT: psrld %xmm0, %xmm11 -; SSE42-NEXT: pblendw {{.*#+}} xmm11 = xmm9[0,1,2,3],xmm11[4,5,6,7] -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,1,1,4,5,6,7] +; SSE42-NEXT: psrld %xmm10, %xmm11 +; SSE42-NEXT: pblendw {{.*#+}} xmm11 = xmm6[0,1,2,3],xmm11[4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[0,1,1,1,4,5,6,7] ; SSE42-NEXT: movdqa %xmm1, %xmm0 ; SSE42-NEXT: psrld %xmm6, %xmm0 -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm10[0,1,1,1,4,5,6,7] -; SSE42-NEXT: psrld %xmm6, %xmm1 +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7] +; SSE42-NEXT: psrld %xmm5, %xmm1 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm11[2,3],xmm0[4,5],xmm11[6,7] ; SSE42-NEXT: pshuflw {{.*#+}} xmm1 = xmm7[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm2, %xmm6 -; SSE42-NEXT: psrld %xmm1, %xmm6 -; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm7[2,3,2,3] -; SSE42-NEXT: pshuflw {{.*#+}} xmm1 = xmm9[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm2, %xmm5 +; SSE42-NEXT: psrld %xmm1, %xmm5 +; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm7[2,3,2,3] +; SSE42-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[2,3,3,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm2, %xmm10 ; SSE42-NEXT: psrld %xmm1, %xmm10 -; SSE42-NEXT: pblendw {{.*#+}} xmm10 = xmm6[0,1,2,3],xmm10[4,5,6,7] -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm7[0,1,1,1,4,5,6,7] +; SSE42-NEXT: pblendw {{.*#+}} xmm10 = xmm5[0,1,2,3],xmm10[4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm7[0,1,1,1,4,5,6,7] ; SSE42-NEXT: movdqa %xmm2, %xmm1 -; SSE42-NEXT: psrld %xmm6, %xmm1 -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[0,1,1,1,4,5,6,7] -; SSE42-NEXT: psrld %xmm6, %xmm2 +; SSE42-NEXT: psrld %xmm5, %xmm1 +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,1,1,1,4,5,6,7] +; SSE42-NEXT: psrld %xmm5, %xmm2 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm10[2,3],xmm1[4,5],xmm10[6,7] -; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm8[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm3, %xmm6 -; SSE42-NEXT: psrld %xmm2, %xmm6 -; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm8[2,3,2,3] -; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm7[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm3, %xmm9 -; SSE42-NEXT: psrld %xmm2, %xmm9 -; SSE42-NEXT: pblendw {{.*#+}} xmm9 = xmm6[0,1,2,3],xmm9[4,5,6,7] -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm8[0,1,1,1,4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm9[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm3, %xmm5 +; SSE42-NEXT: psrld %xmm2, %xmm5 +; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm9[2,3,2,3] +; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm6[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm3, %xmm7 +; SSE42-NEXT: psrld %xmm2, %xmm7 +; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm9[0,1,1,1,4,5,6,7] ; SSE42-NEXT: movdqa %xmm3, %xmm2 -; SSE42-NEXT: psrld %xmm6, %xmm2 -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm7[0,1,1,1,4,5,6,7] -; SSE42-NEXT: psrld %xmm6, %xmm3 +; SSE42-NEXT: psrld %xmm5, %xmm2 +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,1,1,1,4,5,6,7] +; SSE42-NEXT: psrld %xmm5, %xmm3 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7] -; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm9[2,3],xmm2[4,5],xmm9[6,7] -; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm4, %xmm6 -; SSE42-NEXT: psrld %xmm3, %xmm6 -; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm5[2,3,2,3] -; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm4, %xmm8 -; SSE42-NEXT: psrld %xmm3, %xmm8 -; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm6[0,1,2,3],xmm8[4,5,6,7] -; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7] +; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm7[2,3],xmm2[4,5],xmm7[6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm4, %xmm5 +; SSE42-NEXT: psrld %xmm3, %xmm5 +; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm8[2,3,2,3] +; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm6[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm4, %xmm7 +; SSE42-NEXT: psrld %xmm3, %xmm7 +; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm8[0,1,1,1,4,5,6,7] ; SSE42-NEXT: movdqa %xmm4, %xmm3 ; SSE42-NEXT: psrld %xmm5, %xmm3 -; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm7[0,1,1,1,4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,1,1,1,4,5,6,7] ; SSE42-NEXT: psrld %xmm5, %xmm4 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7] -; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm8[2,3],xmm3[4,5],xmm8[6,7] +; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm7[2,3],xmm3[4,5],xmm7[6,7] ; SSE42-NEXT: retq ; ; AVX2-LABEL: lshr_v16i32_swap: @@ -4280,74 +4278,73 @@ define <16 x i32> @lshr_v16i32_commute_swap(<16 x i1> %b, <16 x i32> noundef %x, ; ; SSE42-LABEL: lshr_v16i32_commute_swap: ; SSE42: # %bb.0: -; SSE42-NEXT: movdqa %xmm3, %xmm10 -; SSE42-NEXT: movdqa %xmm2, %xmm9 -; SSE42-NEXT: movdqa %xmm1, %xmm8 -; SSE42-NEXT: movdqa %xmm0, %xmm3 -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; SSE42-NEXT: movdqa %xmm3, %xmm8 +; SSE42-NEXT: movdqa %xmm2, %xmm10 +; SSE42-NEXT: movdqa %xmm1, %xmm9 +; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm3 +; SSE42-NEXT: psrad $31, %xmm3 +; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm3 +; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm2 ; SSE42-NEXT: psrad $31, %xmm2 ; SSE42-NEXT: pandn %xmm7, %xmm2 -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1] -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm1 ; SSE42-NEXT: psrad $31, %xmm1 ; SSE42-NEXT: pandn %xmm6, %xmm1 -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm0 ; SSE42-NEXT: psrad $31, %xmm0 ; SSE42-NEXT: pandn %xmm5, %xmm0 -; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,3,3,3] -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm3 -; SSE42-NEXT: psrad $31, %xmm3 -; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm3 -; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm8[2,3,2,3] +; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm9[2,3,2,3] ; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm0, %xmm7 ; SSE42-NEXT: psrld %xmm6, %xmm7 -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm8[2,3,3,3,4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[2,3,3,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm0, %xmm11 ; SSE42-NEXT: psrld %xmm6, %xmm11 ; SSE42-NEXT: pblendw {{.*#+}} xmm11 = xmm11[0,1,2,3],xmm7[4,5,6,7] ; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7] ; SSE42-NEXT: movdqa %xmm0, %xmm6 ; SSE42-NEXT: psrld %xmm5, %xmm6 -; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm8[0,1,1,1,4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm9[0,1,1,1,4,5,6,7] ; SSE42-NEXT: psrld %xmm5, %xmm0 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4,5,6,7] ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm11[2,3],xmm0[4,5],xmm11[6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm9[2,3,2,3] +; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm10[2,3,2,3] ; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm1, %xmm7 ; SSE42-NEXT: psrld %xmm6, %xmm7 -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm1, %xmm8 -; SSE42-NEXT: psrld %xmm6, %xmm8 -; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm7[4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm10[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm1, %xmm9 +; SSE42-NEXT: psrld %xmm6, %xmm9 +; SSE42-NEXT: pblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm7[4,5,6,7] ; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7] ; SSE42-NEXT: movdqa %xmm1, %xmm6 ; SSE42-NEXT: psrld %xmm5, %xmm6 -; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm9[0,1,1,1,4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm10[0,1,1,1,4,5,6,7] ; SSE42-NEXT: psrld %xmm5, %xmm1 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm6[4,5,6,7] -; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm8[2,3],xmm1[4,5],xmm8[6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm10[2,3,2,3] +; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm9[2,3],xmm1[4,5],xmm9[6,7] +; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm8[2,3,2,3] ; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm2, %xmm7 ; SSE42-NEXT: psrld %xmm6, %xmm7 -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm10[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm2, %xmm8 -; SSE42-NEXT: psrld %xmm6, %xmm8 -; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm7[4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm8[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm2, %xmm9 +; SSE42-NEXT: psrld %xmm6, %xmm9 +; SSE42-NEXT: pblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm7[4,5,6,7] ; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7] ; SSE42-NEXT: movdqa %xmm2, %xmm6 ; SSE42-NEXT: psrld %xmm5, %xmm6 -; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm10[0,1,1,1,4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm8[0,1,1,1,4,5,6,7] ; SSE42-NEXT: psrld %xmm5, %xmm2 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7] -; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm8[2,3],xmm2[4,5],xmm8[6,7] +; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm9[2,3],xmm2[4,5],xmm9[6,7] ; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3] ; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm3, %xmm7 @@ -4929,85 +4926,85 @@ define <16 x i32> @ashr_v16i32_swap(<16 x i1> %b, <16 x i32> noundef %x, <16 x i ; ; SSE42-LABEL: ashr_v16i32_swap: ; SSE42: # %bb.0: -; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,3,2,3] +; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[3,3,3,3] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm8 ; SSE42-NEXT: psrad $31, %xmm8 -; SSE42-NEXT: pandn %xmm7, %xmm8 +; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm8 +; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm9 +; SSE42-NEXT: psrad $31, %xmm9 +; SSE42-NEXT: pandn %xmm7, %xmm9 ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1] ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm7 ; SSE42-NEXT: psrad $31, %xmm7 ; SSE42-NEXT: pandn %xmm6, %xmm7 -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm6 -; SSE42-NEXT: psrad $31, %xmm6 -; SSE42-NEXT: pandn %xmm5, %xmm6 -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3] -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm5 -; SSE42-NEXT: psrad $31, %xmm5 -; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm5 -; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm6[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm1, %xmm9 -; SSE42-NEXT: psrad %xmm0, %xmm9 -; SSE42-NEXT: pshufd {{.*#+}} xmm10 = xmm6[2,3,2,3] -; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[2,3,3,3,4,5,6,7] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm0 +; SSE42-NEXT: psrad $31, %xmm0 +; SSE42-NEXT: pandn %xmm5, %xmm0 +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm1, %xmm6 +; SSE42-NEXT: psrad %xmm5, %xmm6 +; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,2,3] +; SSE42-NEXT: pshuflw {{.*#+}} xmm10 = xmm5[2,3,3,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm1, %xmm11 -; SSE42-NEXT: psrad %xmm0, %xmm11 -; SSE42-NEXT: pblendw {{.*#+}} xmm11 = xmm9[0,1,2,3],xmm11[4,5,6,7] -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,1,1,4,5,6,7] +; SSE42-NEXT: psrad %xmm10, %xmm11 +; SSE42-NEXT: pblendw {{.*#+}} xmm11 = xmm6[0,1,2,3],xmm11[4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[0,1,1,1,4,5,6,7] ; SSE42-NEXT: movdqa %xmm1, %xmm0 ; SSE42-NEXT: psrad %xmm6, %xmm0 -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm10[0,1,1,1,4,5,6,7] -; SSE42-NEXT: psrad %xmm6, %xmm1 +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7] +; SSE42-NEXT: psrad %xmm5, %xmm1 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm11[2,3],xmm0[4,5],xmm11[6,7] ; SSE42-NEXT: pshuflw {{.*#+}} xmm1 = xmm7[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm2, %xmm6 -; SSE42-NEXT: psrad %xmm1, %xmm6 -; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm7[2,3,2,3] -; SSE42-NEXT: pshuflw {{.*#+}} xmm1 = xmm9[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm2, %xmm5 +; SSE42-NEXT: psrad %xmm1, %xmm5 +; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm7[2,3,2,3] +; SSE42-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[2,3,3,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm2, %xmm10 ; SSE42-NEXT: psrad %xmm1, %xmm10 -; SSE42-NEXT: pblendw {{.*#+}} xmm10 = xmm6[0,1,2,3],xmm10[4,5,6,7] -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm7[0,1,1,1,4,5,6,7] +; SSE42-NEXT: pblendw {{.*#+}} xmm10 = xmm5[0,1,2,3],xmm10[4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm7[0,1,1,1,4,5,6,7] ; SSE42-NEXT: movdqa %xmm2, %xmm1 -; SSE42-NEXT: psrad %xmm6, %xmm1 -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[0,1,1,1,4,5,6,7] -; SSE42-NEXT: psrad %xmm6, %xmm2 +; SSE42-NEXT: psrad %xmm5, %xmm1 +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,1,1,1,4,5,6,7] +; SSE42-NEXT: psrad %xmm5, %xmm2 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm10[2,3],xmm1[4,5],xmm10[6,7] -; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm8[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm3, %xmm6 -; SSE42-NEXT: psrad %xmm2, %xmm6 -; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm8[2,3,2,3] -; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm7[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm3, %xmm9 -; SSE42-NEXT: psrad %xmm2, %xmm9 -; SSE42-NEXT: pblendw {{.*#+}} xmm9 = xmm6[0,1,2,3],xmm9[4,5,6,7] -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm8[0,1,1,1,4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm9[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm3, %xmm5 +; SSE42-NEXT: psrad %xmm2, %xmm5 +; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm9[2,3,2,3] +; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm6[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm3, %xmm7 +; SSE42-NEXT: psrad %xmm2, %xmm7 +; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm9[0,1,1,1,4,5,6,7] ; SSE42-NEXT: movdqa %xmm3, %xmm2 -; SSE42-NEXT: psrad %xmm6, %xmm2 -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm7[0,1,1,1,4,5,6,7] -; SSE42-NEXT: psrad %xmm6, %xmm3 +; SSE42-NEXT: psrad %xmm5, %xmm2 +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,1,1,1,4,5,6,7] +; SSE42-NEXT: psrad %xmm5, %xmm3 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7] -; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm9[2,3],xmm2[4,5],xmm9[6,7] -; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm4, %xmm6 -; SSE42-NEXT: psrad %xmm3, %xmm6 -; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm5[2,3,2,3] -; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm4, %xmm8 -; SSE42-NEXT: psrad %xmm3, %xmm8 -; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm6[0,1,2,3],xmm8[4,5,6,7] -; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7] +; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm7[2,3],xmm2[4,5],xmm7[6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm4, %xmm5 +; SSE42-NEXT: psrad %xmm3, %xmm5 +; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm8[2,3,2,3] +; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm6[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm4, %xmm7 +; SSE42-NEXT: psrad %xmm3, %xmm7 +; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm8[0,1,1,1,4,5,6,7] ; SSE42-NEXT: movdqa %xmm4, %xmm3 ; SSE42-NEXT: psrad %xmm5, %xmm3 -; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm7[0,1,1,1,4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,1,1,1,4,5,6,7] ; SSE42-NEXT: psrad %xmm5, %xmm4 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7] -; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm8[2,3],xmm3[4,5],xmm8[6,7] +; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm7[2,3],xmm3[4,5],xmm7[6,7] ; SSE42-NEXT: retq ; ; AVX2-LABEL: ashr_v16i32_swap: @@ -5131,74 +5128,73 @@ define <16 x i32> @ashr_v16i32_commute_swap(<16 x i1> %b, <16 x i32> noundef %x, ; ; SSE42-LABEL: ashr_v16i32_commute_swap: ; SSE42: # %bb.0: -; SSE42-NEXT: movdqa %xmm3, %xmm10 -; SSE42-NEXT: movdqa %xmm2, %xmm9 -; SSE42-NEXT: movdqa %xmm1, %xmm8 -; SSE42-NEXT: movdqa %xmm0, %xmm3 -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; SSE42-NEXT: movdqa %xmm3, %xmm8 +; SSE42-NEXT: movdqa %xmm2, %xmm10 +; SSE42-NEXT: movdqa %xmm1, %xmm9 +; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE42-NEXT: pslld $31, %xmm3 +; SSE42-NEXT: psrad $31, %xmm3 +; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm3 +; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm2 ; SSE42-NEXT: psrad $31, %xmm2 ; SSE42-NEXT: pandn %xmm7, %xmm2 -; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1] -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm1 ; SSE42-NEXT: psrad $31, %xmm1 ; SSE42-NEXT: pandn %xmm6, %xmm1 -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero +; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero ; SSE42-NEXT: pslld $31, %xmm0 ; SSE42-NEXT: psrad $31, %xmm0 ; SSE42-NEXT: pandn %xmm5, %xmm0 -; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,3,3,3] -; SSE42-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero -; SSE42-NEXT: pslld $31, %xmm3 -; SSE42-NEXT: psrad $31, %xmm3 -; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm3 -; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm8[2,3,2,3] +; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm9[2,3,2,3] ; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm0, %xmm7 ; SSE42-NEXT: psrad %xmm6, %xmm7 -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm8[2,3,3,3,4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[2,3,3,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm0, %xmm11 ; SSE42-NEXT: psrad %xmm6, %xmm11 ; SSE42-NEXT: pblendw {{.*#+}} xmm11 = xmm11[0,1,2,3],xmm7[4,5,6,7] ; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7] ; SSE42-NEXT: movdqa %xmm0, %xmm6 ; SSE42-NEXT: psrad %xmm5, %xmm6 -; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm8[0,1,1,1,4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm9[0,1,1,1,4,5,6,7] ; SSE42-NEXT: psrad %xmm5, %xmm0 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4,5,6,7] ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm11[2,3],xmm0[4,5],xmm11[6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm9[2,3,2,3] +; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm10[2,3,2,3] ; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm1, %xmm7 ; SSE42-NEXT: psrad %xmm6, %xmm7 -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm1, %xmm8 -; SSE42-NEXT: psrad %xmm6, %xmm8 -; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm7[4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm10[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm1, %xmm9 +; SSE42-NEXT: psrad %xmm6, %xmm9 +; SSE42-NEXT: pblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm7[4,5,6,7] ; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7] ; SSE42-NEXT: movdqa %xmm1, %xmm6 ; SSE42-NEXT: psrad %xmm5, %xmm6 -; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm9[0,1,1,1,4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm10[0,1,1,1,4,5,6,7] ; SSE42-NEXT: psrad %xmm5, %xmm1 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm6[4,5,6,7] -; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm8[2,3],xmm1[4,5],xmm8[6,7] -; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm10[2,3,2,3] +; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm9[2,3],xmm1[4,5],xmm9[6,7] +; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm8[2,3,2,3] ; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm2, %xmm7 ; SSE42-NEXT: psrad %xmm6, %xmm7 -; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm10[2,3,3,3,4,5,6,7] -; SSE42-NEXT: movdqa %xmm2, %xmm8 -; SSE42-NEXT: psrad %xmm6, %xmm8 -; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm7[4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm8[2,3,3,3,4,5,6,7] +; SSE42-NEXT: movdqa %xmm2, %xmm9 +; SSE42-NEXT: psrad %xmm6, %xmm9 +; SSE42-NEXT: pblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm7[4,5,6,7] ; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7] ; SSE42-NEXT: movdqa %xmm2, %xmm6 ; SSE42-NEXT: psrad %xmm5, %xmm6 -; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm10[0,1,1,1,4,5,6,7] +; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm8[0,1,1,1,4,5,6,7] ; SSE42-NEXT: psrad %xmm5, %xmm2 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7] -; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm8[2,3],xmm2[4,5],xmm8[6,7] +; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm9[2,3],xmm2[4,5],xmm9[6,7] ; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3] ; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm3, %xmm7 diff --git a/llvm/test/CodeGen/X86/vector-compress.ll b/llvm/test/CodeGen/X86/vector-compress.ll index 894186f..1ab1a1a 100644 --- a/llvm/test/CodeGen/X86/vector-compress.ll +++ b/llvm/test/CodeGen/X86/vector-compress.ll @@ -1094,26 +1094,25 @@ define <16 x i8> @test_compress_v16i8(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> ; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX2-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm1 ; AVX2-NEXT: vmovaps %xmm2, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: vpextrb $1, %xmm1, %r11d -; AVX2-NEXT: vmovd %xmm1, %eax -; AVX2-NEXT: movzbl %al, %edx -; AVX2-NEXT: # kill: def $al killed $al killed $eax +; AVX2-NEXT: vpextrb $1, %xmm1, %r13d +; AVX2-NEXT: vmovd %xmm1, %esi +; AVX2-NEXT: movl %esi, %eax ; AVX2-NEXT: andb $1, %al -; AVX2-NEXT: subb %r11b, %al -; AVX2-NEXT: vpextrb $2, %xmm1, %esi -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: vpextrb $3, %xmm1, %r13d ; AVX2-NEXT: subb %r13b, %al +; AVX2-NEXT: vpextrb $2, %xmm1, %edx +; AVX2-NEXT: subb %dl, %al +; AVX2-NEXT: vpextrb $3, %xmm1, %ebp +; AVX2-NEXT: subb %bpl, %al ; AVX2-NEXT: vpextrb $4, %xmm1, %r12d ; AVX2-NEXT: subb %r12b, %al ; AVX2-NEXT: vpextrb $5, %xmm1, %r15d ; AVX2-NEXT: subb %r15b, %al ; AVX2-NEXT: vpextrb $6, %xmm1, %r14d ; AVX2-NEXT: subb %r14b, %al -; AVX2-NEXT: vpextrb $7, %xmm1, %ebp -; AVX2-NEXT: subb %bpl, %al -; AVX2-NEXT: vpextrb $8, %xmm1, %ebx +; AVX2-NEXT: vpextrb $7, %xmm1, %ebx ; AVX2-NEXT: subb %bl, %al +; AVX2-NEXT: vpextrb $8, %xmm1, %r11d +; AVX2-NEXT: subb %r11b, %al ; AVX2-NEXT: vpextrb $9, %xmm1, %r10d ; AVX2-NEXT: subb %r10b, %al ; AVX2-NEXT: vpextrb $10, %xmm1, %r9d @@ -1123,108 +1122,94 @@ define <16 x i8> @test_compress_v16i8(<16 x i8> %vec, <16 x i1> %mask, <16 x i8> ; AVX2-NEXT: vpextrb $12, %xmm1, %edi ; AVX2-NEXT: subb %dil, %al ; AVX2-NEXT: vpextrb $13, %xmm1, %ecx -; AVX2-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: subb %cl, %al ; AVX2-NEXT: vpextrb $14, %xmm1, %ecx -; AVX2-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: subb %cl, %al ; AVX2-NEXT: vpextrb $15, %xmm1, %ecx -; AVX2-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; AVX2-NEXT: subb %cl, %al ; AVX2-NEXT: movzbl %al, %eax ; AVX2-NEXT: andl $15, %eax ; AVX2-NEXT: movzbl -40(%rsp,%rax), %eax ; AVX2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; AVX2-NEXT: vpextrb $0, %xmm0, -{{[0-9]+}}(%rsp) +; AVX2-NEXT: andl $1, %esi +; AVX2-NEXT: vpextrb $1, %xmm0, -40(%rsp,%rsi) +; AVX2-NEXT: andl $1, %r13d +; AVX2-NEXT: addq %rsi, %r13 +; AVX2-NEXT: vpextrb $2, %xmm0, -40(%rsp,%r13) ; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: vpextrb $1, %xmm0, -40(%rsp,%rdx) -; AVX2-NEXT: movzbl %r11b, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: addq %rdx, %rax -; AVX2-NEXT: vpextrb $2, %xmm0, -40(%rsp,%rax) -; AVX2-NEXT: movzbl %sil, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rax, %rcx -; AVX2-NEXT: vpextrb $3, %xmm0, -40(%rsp,%rcx) -; AVX2-NEXT: movzbl %r13b, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: addq %rcx, %rax -; AVX2-NEXT: vpextrb $4, %xmm0, -40(%rsp,%rax) -; AVX2-NEXT: movzbl %r12b, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rax, %rcx -; AVX2-NEXT: movzbl %r15b, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: addq %rcx, %rax -; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx -; AVX2-NEXT: andl $15, %ecx -; AVX2-NEXT: vpextrb $5, %xmm0, -40(%rsp,%rcx) -; AVX2-NEXT: movzbl %r14b, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rax, %rcx -; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax -; AVX2-NEXT: andl $15, %eax -; AVX2-NEXT: vpextrb $6, %xmm0, -40(%rsp,%rax) -; AVX2-NEXT: movzbl %bpl, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: addq %rcx, %rax -; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx -; AVX2-NEXT: andl $15, %ecx -; AVX2-NEXT: vpextrb $7, %xmm0, -40(%rsp,%rcx) -; AVX2-NEXT: movzbl %bl, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rax, %rcx -; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax -; AVX2-NEXT: andl $15, %eax -; AVX2-NEXT: vpextrb $8, %xmm0, -40(%rsp,%rax) -; AVX2-NEXT: movzbl %r10b, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: addq %rcx, %rax -; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx -; AVX2-NEXT: andl $15, %ecx -; AVX2-NEXT: vpextrb $9, %xmm0, -40(%rsp,%rcx) -; AVX2-NEXT: movzbl %r9b, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rax, %rcx -; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax -; AVX2-NEXT: andl $15, %eax -; AVX2-NEXT: vpextrb $10, %xmm0, -40(%rsp,%rax) -; AVX2-NEXT: movzbl %r8b, %eax -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: addq %rcx, %rax -; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx -; AVX2-NEXT: andl $15, %ecx -; AVX2-NEXT: vpextrb $11, %xmm0, -40(%rsp,%rcx) -; AVX2-NEXT: movzbl %dil, %ecx -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rax, %rcx -; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax -; AVX2-NEXT: andl $15, %eax -; AVX2-NEXT: vpextrb $12, %xmm0, -40(%rsp,%rax) -; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload +; AVX2-NEXT: addq %r13, %rdx +; AVX2-NEXT: vpextrb $3, %xmm0, -40(%rsp,%rdx) +; AVX2-NEXT: andl $1, %ebp +; AVX2-NEXT: addq %rdx, %rbp +; AVX2-NEXT: vpextrb $4, %xmm0, -40(%rsp,%rbp) +; AVX2-NEXT: andl $1, %r12d +; AVX2-NEXT: addq %rbp, %r12 +; AVX2-NEXT: andl $1, %r15d +; AVX2-NEXT: addq %r12, %r15 +; AVX2-NEXT: # kill: def $r12d killed $r12d killed $r12 def $r12 +; AVX2-NEXT: andl $15, %r12d +; AVX2-NEXT: vpextrb $5, %xmm0, -40(%rsp,%r12) +; AVX2-NEXT: andl $1, %r14d +; AVX2-NEXT: addq %r15, %r14 +; AVX2-NEXT: # kill: def $r15d killed $r15d killed $r15 def $r15 +; AVX2-NEXT: andl $15, %r15d +; AVX2-NEXT: vpextrb $6, %xmm0, -40(%rsp,%r15) +; AVX2-NEXT: andl $1, %ebx +; AVX2-NEXT: addq %r14, %rbx +; AVX2-NEXT: # kill: def $r14d killed $r14d killed $r14 def $r14 +; AVX2-NEXT: andl $15, %r14d +; AVX2-NEXT: vpextrb $7, %xmm0, -40(%rsp,%r14) +; AVX2-NEXT: andl $1, %r11d +; AVX2-NEXT: addq %rbx, %r11 +; AVX2-NEXT: # kill: def $ebx killed $ebx killed $rbx def $rbx +; AVX2-NEXT: andl $15, %ebx +; AVX2-NEXT: vpextrb $8, %xmm0, -40(%rsp,%rbx) +; AVX2-NEXT: andl $1, %r10d +; AVX2-NEXT: addq %r11, %r10 +; AVX2-NEXT: # kill: def $r11d killed $r11d killed $r11 def $r11 +; AVX2-NEXT: andl $15, %r11d +; AVX2-NEXT: vpextrb $9, %xmm0, -40(%rsp,%r11) +; AVX2-NEXT: andl $1, %r9d +; AVX2-NEXT: addq %r10, %r9 +; AVX2-NEXT: # kill: def $r10d killed $r10d killed $r10 def $r10 +; AVX2-NEXT: andl $15, %r10d +; AVX2-NEXT: vpextrb $10, %xmm0, -40(%rsp,%r10) +; AVX2-NEXT: andl $1, %r8d +; AVX2-NEXT: addq %r9, %r8 +; AVX2-NEXT: # kill: def $r9d killed $r9d killed $r9 def $r9 +; AVX2-NEXT: andl $15, %r9d +; AVX2-NEXT: vpextrb $11, %xmm0, -40(%rsp,%r9) +; AVX2-NEXT: andl $1, %edi +; AVX2-NEXT: addq %r8, %rdi +; AVX2-NEXT: # kill: def $r8d killed $r8d killed $r8 def $r8 +; AVX2-NEXT: andl $15, %r8d +; AVX2-NEXT: vpextrb $12, %xmm0, -40(%rsp,%r8) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; AVX2-NEXT: andl $1, %esi +; AVX2-NEXT: addq %rdi, %rsi +; AVX2-NEXT: # kill: def $edi killed $edi killed $rdi def $rdi +; AVX2-NEXT: andl $15, %edi +; AVX2-NEXT: vpextrb $13, %xmm0, -40(%rsp,%rdi) +; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: addq %rcx, %rax -; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx -; AVX2-NEXT: andl $15, %ecx -; AVX2-NEXT: vpextrb $13, %xmm0, -40(%rsp,%rcx) -; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 1-byte Folded Reload +; AVX2-NEXT: addq %rsi, %rax +; AVX2-NEXT: # kill: def $esi killed $esi killed $rsi def $rsi +; AVX2-NEXT: andl $15, %esi +; AVX2-NEXT: vpextrb $14, %xmm0, -40(%rsp,%rsi) ; AVX2-NEXT: andl $1, %ecx ; AVX2-NEXT: addq %rax, %rcx ; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $15, %eax -; AVX2-NEXT: vpextrb $14, %xmm0, -40(%rsp,%rax) -; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: addq %rcx, %rax -; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx -; AVX2-NEXT: andl $15, %ecx -; AVX2-NEXT: vpextrb $15, %xmm0, -40(%rsp,%rcx) -; AVX2-NEXT: cmpq $15, %rax -; AVX2-NEXT: movl $15, %ecx -; AVX2-NEXT: cmovbq %rax, %rcx -; AVX2-NEXT: vpextrb $15, %xmm0, %eax -; AVX2-NEXT: cmovbel {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload -; AVX2-NEXT: movb %al, -40(%rsp,%rcx) +; AVX2-NEXT: vpextrb $15, %xmm0, -40(%rsp,%rax) +; AVX2-NEXT: cmpq $15, %rcx +; AVX2-NEXT: movl $15, %eax +; AVX2-NEXT: cmovbq %rcx, %rax +; AVX2-NEXT: vpextrb $15, %xmm0, %ecx +; AVX2-NEXT: cmovbel {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 4-byte Folded Reload +; AVX2-NEXT: movb %cl, -40(%rsp,%rax) ; AVX2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 ; AVX2-NEXT: popq %rbx ; AVX2-NEXT: popq %r12 @@ -1805,140 +1790,137 @@ define <64 x i8> @test_compress_v64i8(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> ; AVX2-NEXT: pushq %r12 ; AVX2-NEXT: pushq %rbx ; AVX2-NEXT: andq $-32, %rsp -; AVX2-NEXT: subq $128, %rsp -; AVX2-NEXT: # kill: def $r9d killed $r9d def $r9 -; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: # kill: def $r8d killed $r8d def $r8 -; AVX2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movl %ecx, %r13d -; AVX2-NEXT: movl %edx, %r15d -; AVX2-NEXT: movl %esi, %ebx +; AVX2-NEXT: subq $96, %rsp +; AVX2-NEXT: movl %r9d, %r11d +; AVX2-NEXT: movl %r8d, %r10d +; AVX2-NEXT: movl %ecx, %r9d +; AVX2-NEXT: movl %edx, %r8d +; AVX2-NEXT: # kill: def $esi killed $esi def $rsi ; AVX2-NEXT: # kill: def $edi killed $edi def $rdi -; AVX2-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX2-NEXT: movl 360(%rbp), %eax -; AVX2-NEXT: movl 352(%rbp), %ecx +; AVX2-NEXT: movzbl 360(%rbp), %eax +; AVX2-NEXT: movzbl 352(%rbp), %ecx ; AVX2-NEXT: vmovd %ecx, %xmm4 ; AVX2-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4 -; AVX2-NEXT: movl 368(%rbp), %eax +; AVX2-NEXT: movzbl 368(%rbp), %eax ; AVX2-NEXT: vpinsrb $2, %eax, %xmm4, %xmm4 -; AVX2-NEXT: movl 376(%rbp), %eax +; AVX2-NEXT: movzbl 376(%rbp), %eax ; AVX2-NEXT: vpinsrb $3, %eax, %xmm4, %xmm4 -; AVX2-NEXT: movl 384(%rbp), %eax +; AVX2-NEXT: movzbl 384(%rbp), %eax ; AVX2-NEXT: vpinsrb $4, %eax, %xmm4, %xmm4 -; AVX2-NEXT: movl 392(%rbp), %eax +; AVX2-NEXT: movzbl 392(%rbp), %eax ; AVX2-NEXT: vpinsrb $5, %eax, %xmm4, %xmm4 -; AVX2-NEXT: movl 400(%rbp), %eax +; AVX2-NEXT: movzbl 400(%rbp), %eax ; AVX2-NEXT: vpinsrb $6, %eax, %xmm4, %xmm4 -; AVX2-NEXT: movl 408(%rbp), %eax +; AVX2-NEXT: movzbl 408(%rbp), %eax ; AVX2-NEXT: vpinsrb $7, %eax, %xmm4, %xmm4 -; AVX2-NEXT: movl 416(%rbp), %eax +; AVX2-NEXT: movzbl 416(%rbp), %eax ; AVX2-NEXT: vpinsrb $8, %eax, %xmm4, %xmm4 -; AVX2-NEXT: movl 424(%rbp), %eax +; AVX2-NEXT: movzbl 424(%rbp), %eax ; AVX2-NEXT: vpinsrb $9, %eax, %xmm4, %xmm4 -; AVX2-NEXT: movl 432(%rbp), %eax +; AVX2-NEXT: movzbl 432(%rbp), %eax ; AVX2-NEXT: vpinsrb $10, %eax, %xmm4, %xmm4 -; AVX2-NEXT: movl 440(%rbp), %eax +; AVX2-NEXT: movzbl 440(%rbp), %eax ; AVX2-NEXT: vpinsrb $11, %eax, %xmm4, %xmm4 -; AVX2-NEXT: movl 448(%rbp), %eax +; AVX2-NEXT: movzbl 448(%rbp), %eax ; AVX2-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4 -; AVX2-NEXT: movl 456(%rbp), %eax +; AVX2-NEXT: movzbl 456(%rbp), %eax ; AVX2-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4 -; AVX2-NEXT: movl 464(%rbp), %eax +; AVX2-NEXT: movzbl 464(%rbp), %eax ; AVX2-NEXT: vpinsrb $14, %eax, %xmm4, %xmm4 -; AVX2-NEXT: movl 472(%rbp), %eax +; AVX2-NEXT: movzbl 472(%rbp), %eax ; AVX2-NEXT: vpinsrb $15, %eax, %xmm4, %xmm4 -; AVX2-NEXT: movl 224(%rbp), %eax +; AVX2-NEXT: movzbl 224(%rbp), %eax ; AVX2-NEXT: vmovd %eax, %xmm5 -; AVX2-NEXT: movl 232(%rbp), %eax +; AVX2-NEXT: movzbl 232(%rbp), %eax ; AVX2-NEXT: vpinsrb $1, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 240(%rbp), %eax +; AVX2-NEXT: movzbl 240(%rbp), %eax ; AVX2-NEXT: vpinsrb $2, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 248(%rbp), %eax +; AVX2-NEXT: movzbl 248(%rbp), %eax ; AVX2-NEXT: vpinsrb $3, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 256(%rbp), %eax +; AVX2-NEXT: movzbl 256(%rbp), %eax ; AVX2-NEXT: vpinsrb $4, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 264(%rbp), %eax +; AVX2-NEXT: movzbl 264(%rbp), %eax ; AVX2-NEXT: vpinsrb $5, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 272(%rbp), %eax +; AVX2-NEXT: movzbl 272(%rbp), %eax ; AVX2-NEXT: vpinsrb $6, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 280(%rbp), %eax +; AVX2-NEXT: movzbl 280(%rbp), %eax ; AVX2-NEXT: vpinsrb $7, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 288(%rbp), %eax +; AVX2-NEXT: movzbl 288(%rbp), %eax ; AVX2-NEXT: vpinsrb $8, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 296(%rbp), %eax +; AVX2-NEXT: movzbl 296(%rbp), %eax ; AVX2-NEXT: vpinsrb $9, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 304(%rbp), %eax +; AVX2-NEXT: movzbl 304(%rbp), %eax ; AVX2-NEXT: vpinsrb $10, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 312(%rbp), %eax +; AVX2-NEXT: movzbl 312(%rbp), %eax ; AVX2-NEXT: vpinsrb $11, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 320(%rbp), %eax +; AVX2-NEXT: movzbl 320(%rbp), %eax ; AVX2-NEXT: vpinsrb $12, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 328(%rbp), %eax +; AVX2-NEXT: movzbl 328(%rbp), %eax ; AVX2-NEXT: vpinsrb $13, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 336(%rbp), %eax +; AVX2-NEXT: movzbl 336(%rbp), %eax ; AVX2-NEXT: vpinsrb $14, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 344(%rbp), %eax +; AVX2-NEXT: movzbl 344(%rbp), %eax ; AVX2-NEXT: vpinsrb $15, %eax, %xmm5, %xmm5 ; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4 -; AVX2-NEXT: movl 96(%rbp), %eax -; AVX2-NEXT: vmovd %eax, %xmm5 -; AVX2-NEXT: movl 104(%rbp), %eax -; AVX2-NEXT: vpinsrb $1, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 112(%rbp), %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 120(%rbp), %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 128(%rbp), %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 136(%rbp), %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 144(%rbp), %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 152(%rbp), %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 160(%rbp), %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 168(%rbp), %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 176(%rbp), %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 184(%rbp), %eax +; AVX2-NEXT: vmovd %edi, %xmm5 +; AVX2-NEXT: vpinsrb $1, %esi, %xmm5, %xmm5 +; AVX2-NEXT: vpinsrb $2, %edx, %xmm5, %xmm5 +; AVX2-NEXT: vpinsrb $3, %r9d, %xmm5, %xmm5 +; AVX2-NEXT: vpinsrb $4, %r10d, %xmm5, %xmm5 +; AVX2-NEXT: vpinsrb $5, %r11d, %xmm5, %xmm5 +; AVX2-NEXT: movzbl 16(%rbp), %ebx +; AVX2-NEXT: vpinsrb $6, %ebx, %xmm5, %xmm5 +; AVX2-NEXT: movzbl 24(%rbp), %r14d +; AVX2-NEXT: vpinsrb $7, %r14d, %xmm5, %xmm5 +; AVX2-NEXT: movzbl 32(%rbp), %r15d +; AVX2-NEXT: vpinsrb $8, %r15d, %xmm5, %xmm5 +; AVX2-NEXT: movzbl 40(%rbp), %r12d +; AVX2-NEXT: vpinsrb $9, %r12d, %xmm5, %xmm5 +; AVX2-NEXT: movzbl 48(%rbp), %r13d +; AVX2-NEXT: vpinsrb $10, %r13d, %xmm5, %xmm5 +; AVX2-NEXT: movzbl 56(%rbp), %eax ; AVX2-NEXT: vpinsrb $11, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 192(%rbp), %eax +; AVX2-NEXT: movzbl 64(%rbp), %eax ; AVX2-NEXT: vpinsrb $12, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 200(%rbp), %eax +; AVX2-NEXT: movzbl 72(%rbp), %eax ; AVX2-NEXT: vpinsrb $13, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 208(%rbp), %eax +; AVX2-NEXT: movzbl 80(%rbp), %eax ; AVX2-NEXT: vpinsrb $14, %eax, %xmm5, %xmm5 -; AVX2-NEXT: movl 216(%rbp), %eax +; AVX2-NEXT: movzbl 88(%rbp), %eax ; AVX2-NEXT: vpinsrb $15, %eax, %xmm5, %xmm5 -; AVX2-NEXT: vmovd %edi, %xmm6 -; AVX2-NEXT: vpinsrb $1, %esi, %xmm6, %xmm6 -; AVX2-NEXT: vpinsrb $2, %edx, %xmm6, %xmm6 -; AVX2-NEXT: vpinsrb $3, %r13d, %xmm6, %xmm6 -; AVX2-NEXT: vpinsrb $4, %r8d, %xmm6, %xmm6 -; AVX2-NEXT: vpinsrb $5, %r9d, %xmm6, %xmm6 -; AVX2-NEXT: movl 16(%rbp), %esi -; AVX2-NEXT: vpinsrb $6, %esi, %xmm6, %xmm6 -; AVX2-NEXT: movl 24(%rbp), %edi -; AVX2-NEXT: vpinsrb $7, %edi, %xmm6, %xmm6 -; AVX2-NEXT: movl 32(%rbp), %r8d -; AVX2-NEXT: vpinsrb $8, %r8d, %xmm6, %xmm6 -; AVX2-NEXT: movl 40(%rbp), %r9d -; AVX2-NEXT: vpinsrb $9, %r9d, %xmm6, %xmm6 -; AVX2-NEXT: movl 48(%rbp), %r10d -; AVX2-NEXT: vpinsrb $10, %r10d, %xmm6, %xmm6 -; AVX2-NEXT: movl 56(%rbp), %r11d -; AVX2-NEXT: vpinsrb $11, %r11d, %xmm6, %xmm6 -; AVX2-NEXT: movl 64(%rbp), %r14d -; AVX2-NEXT: vpinsrb $12, %r14d, %xmm6, %xmm6 -; AVX2-NEXT: movl 72(%rbp), %r12d -; AVX2-NEXT: vpinsrb $13, %r12d, %xmm6, %xmm6 -; AVX2-NEXT: movl 80(%rbp), %eax +; AVX2-NEXT: movzbl 96(%rbp), %eax +; AVX2-NEXT: vmovd %eax, %xmm6 +; AVX2-NEXT: movzbl 104(%rbp), %eax +; AVX2-NEXT: vpinsrb $1, %eax, %xmm6, %xmm6 +; AVX2-NEXT: movzbl 112(%rbp), %eax +; AVX2-NEXT: vpinsrb $2, %eax, %xmm6, %xmm6 +; AVX2-NEXT: movzbl 120(%rbp), %eax +; AVX2-NEXT: vpinsrb $3, %eax, %xmm6, %xmm6 +; AVX2-NEXT: movzbl 128(%rbp), %eax +; AVX2-NEXT: vpinsrb $4, %eax, %xmm6, %xmm6 +; AVX2-NEXT: movzbl 136(%rbp), %eax +; AVX2-NEXT: vpinsrb $5, %eax, %xmm6, %xmm6 +; AVX2-NEXT: movzbl 144(%rbp), %eax +; AVX2-NEXT: vpinsrb $6, %eax, %xmm6, %xmm6 +; AVX2-NEXT: movzbl 152(%rbp), %eax +; AVX2-NEXT: vpinsrb $7, %eax, %xmm6, %xmm6 +; AVX2-NEXT: movzbl 160(%rbp), %eax +; AVX2-NEXT: vpinsrb $8, %eax, %xmm6, %xmm6 +; AVX2-NEXT: movzbl 168(%rbp), %eax +; AVX2-NEXT: vpinsrb $9, %eax, %xmm6, %xmm6 +; AVX2-NEXT: movzbl 176(%rbp), %eax +; AVX2-NEXT: vpinsrb $10, %eax, %xmm6, %xmm6 +; AVX2-NEXT: movzbl 184(%rbp), %eax +; AVX2-NEXT: vpinsrb $11, %eax, %xmm6, %xmm6 +; AVX2-NEXT: movzbl 192(%rbp), %eax +; AVX2-NEXT: vpinsrb $12, %eax, %xmm6, %xmm6 +; AVX2-NEXT: movzbl 200(%rbp), %eax +; AVX2-NEXT: vpinsrb $13, %eax, %xmm6, %xmm6 +; AVX2-NEXT: movzbl 208(%rbp), %eax ; AVX2-NEXT: vpinsrb $14, %eax, %xmm6, %xmm6 -; AVX2-NEXT: movl 88(%rbp), %eax +; AVX2-NEXT: movzbl 216(%rbp), %eax ; AVX2-NEXT: vpinsrb $15, %eax, %xmm6, %xmm6 -; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm6, %ymm5 +; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm5, %ymm5 ; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] ; AVX2-NEXT: vpand %ymm6, %ymm5, %ymm5 ; AVX2-NEXT: vpand %ymm6, %ymm4, %ymm4 @@ -1980,379 +1962,435 @@ define <64 x i8> @test_compress_v64i8(<64 x i8> %vec, <64 x i1> %mask, <64 x i8> ; AVX2-NEXT: vmovaps %ymm2, (%rsp) ; AVX2-NEXT: movzbl %al, %eax ; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: movzbl (%rsp,%rax), %eax -; AVX2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVX2-NEXT: movzbl (%rsp,%rax), %edx ; AVX2-NEXT: vpextrb $0, %xmm0, (%rsp) -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: vpextrb $1, %xmm0, (%rsp,%rax) -; AVX2-NEXT: andl $1, %ebx -; AVX2-NEXT: addq %rax, %rbx -; AVX2-NEXT: vpextrb $2, %xmm0, (%rsp,%rbx) -; AVX2-NEXT: andl $1, %r15d -; AVX2-NEXT: addq %rbx, %r15 -; AVX2-NEXT: vpextrb $3, %xmm0, (%rsp,%r15) -; AVX2-NEXT: andl $1, %r13d -; AVX2-NEXT: addq %r15, %r13 -; AVX2-NEXT: vpextrb $4, %xmm0, (%rsp,%r13) -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload -; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %r13, %rcx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: vpextrb $5, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: addq %rcx, %rax -; AVX2-NEXT: andl $1, %esi -; AVX2-NEXT: addq %rax, %rsi -; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $6, %xmm0, (%rsp,%rax) ; AVX2-NEXT: andl $1, %edi -; AVX2-NEXT: addq %rsi, %rdi -; AVX2-NEXT: # kill: def $esi killed $esi killed $rsi def $rsi -; AVX2-NEXT: andl $63, %esi -; AVX2-NEXT: vpextrb $7, %xmm0, (%rsp,%rsi) +; AVX2-NEXT: vpextrb $1, %xmm0, (%rsp,%rdi) +; AVX2-NEXT: andl $1, %esi +; AVX2-NEXT: addq %rdi, %rsi +; AVX2-NEXT: vpextrb $2, %xmm0, (%rsp,%rsi) ; AVX2-NEXT: andl $1, %r8d -; AVX2-NEXT: addq %rdi, %r8 -; AVX2-NEXT: # kill: def $edi killed $edi killed $rdi def $rdi -; AVX2-NEXT: andl $63, %edi -; AVX2-NEXT: vpextrb $8, %xmm0, (%rsp,%rdi) +; AVX2-NEXT: addq %rsi, %r8 +; AVX2-NEXT: vpextrb $3, %xmm0, (%rsp,%r8) ; AVX2-NEXT: andl $1, %r9d ; AVX2-NEXT: addq %r8, %r9 -; AVX2-NEXT: # kill: def $r8d killed $r8d killed $r8 def $r8 -; AVX2-NEXT: andl $63, %r8d -; AVX2-NEXT: vpextrb $9, %xmm0, (%rsp,%r8) +; AVX2-NEXT: vpextrb $4, %xmm0, (%rsp,%r9) ; AVX2-NEXT: andl $1, %r10d ; AVX2-NEXT: addq %r9, %r10 -; AVX2-NEXT: # kill: def $r9d killed $r9d killed $r9 def $r9 -; AVX2-NEXT: andl $63, %r9d -; AVX2-NEXT: vpextrb $10, %xmm0, (%rsp,%r9) +; AVX2-NEXT: movl %r10d, %eax +; AVX2-NEXT: vpextrb $5, %xmm0, (%rsp,%rax) ; AVX2-NEXT: andl $1, %r11d ; AVX2-NEXT: addq %r10, %r11 -; AVX2-NEXT: # kill: def $r10d killed $r10d killed $r10 def $r10 -; AVX2-NEXT: andl $63, %r10d -; AVX2-NEXT: vpextrb $11, %xmm0, (%rsp,%r10) -; AVX2-NEXT: andl $1, %r14d -; AVX2-NEXT: addq %r11, %r14 +; AVX2-NEXT: movzbl %bl, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %r11, %rax ; AVX2-NEXT: # kill: def $r11d killed $r11d killed $r11 def $r11 ; AVX2-NEXT: andl $63, %r11d -; AVX2-NEXT: vpextrb $12, %xmm0, (%rsp,%r11) -; AVX2-NEXT: andl $1, %r12d -; AVX2-NEXT: addq %r14, %r12 -; AVX2-NEXT: # kill: def $r14d killed $r14d killed $r14 def $r14 -; AVX2-NEXT: andl $63, %r14d -; AVX2-NEXT: vpextrb $13, %xmm0, (%rsp,%r14) -; AVX2-NEXT: movl 80(%rbp), %eax +; AVX2-NEXT: vpextrb $6, %xmm0, (%rsp,%r11) +; AVX2-NEXT: movzbl %r14b, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $7, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movzbl %r15b, %eax ; AVX2-NEXT: andl $1, %eax -; AVX2-NEXT: addq %r12, %rax -; AVX2-NEXT: # kill: def $r12d killed $r12d killed $r12 def $r12 -; AVX2-NEXT: andl $63, %r12d -; AVX2-NEXT: vpextrb $14, %xmm0, (%rsp,%r12) -; AVX2-NEXT: movl 88(%rbp), %ecx +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $8, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl %r12b, %ecx ; AVX2-NEXT: andl $1, %ecx ; AVX2-NEXT: addq %rax, %rcx ; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $15, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 96(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax +; AVX2-NEXT: vpextrb $9, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movzbl %r13b, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $10, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 56(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $11, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movzbl 64(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $12, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 72(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $13, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movzbl 80(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $14, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 88(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax +; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: vpextrb $15, %xmm0, (%rsp,%rax) +; AVX2-NEXT: movzbl 96(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vpextrb $0, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 104(%rbp), %ecx +; AVX2-NEXT: vpextrb $0, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 104(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $1, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 112(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $2, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 120(%rbp), %ecx +; AVX2-NEXT: movzbl 112(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $2, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 120(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $3, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 128(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $4, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 136(%rbp), %ecx +; AVX2-NEXT: movzbl 128(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $4, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 136(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $5, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 144(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $6, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 152(%rbp), %ecx +; AVX2-NEXT: movzbl 144(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $6, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 152(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $7, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 160(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $8, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 168(%rbp), %ecx +; AVX2-NEXT: movzbl 160(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $8, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 168(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $9, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 176(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $10, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 184(%rbp), %ecx +; AVX2-NEXT: movzbl 176(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $10, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 184(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $11, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 192(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $12, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 200(%rbp), %ecx +; AVX2-NEXT: movzbl 192(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $12, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 200(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $13, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 208(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $14, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 216(%rbp), %ecx +; AVX2-NEXT: movzbl 208(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $14, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 216(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $15, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 224(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $0, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 232(%rbp), %ecx +; AVX2-NEXT: movzbl 224(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $0, %xmm1, (%rsp,%rcx) +; AVX2-NEXT: movzbl 232(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $1, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 240(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $2, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 248(%rbp), %ecx +; AVX2-NEXT: movzbl 240(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $2, %xmm1, (%rsp,%rcx) +; AVX2-NEXT: movzbl 248(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $3, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 256(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $4, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 264(%rbp), %ecx +; AVX2-NEXT: movzbl 256(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $4, %xmm1, (%rsp,%rcx) +; AVX2-NEXT: movzbl 264(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $5, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 272(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $6, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 280(%rbp), %ecx +; AVX2-NEXT: movzbl 272(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $6, %xmm1, (%rsp,%rcx) +; AVX2-NEXT: movzbl 280(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $7, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 288(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $8, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 296(%rbp), %ecx +; AVX2-NEXT: movzbl 288(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $8, %xmm1, (%rsp,%rcx) +; AVX2-NEXT: movzbl 296(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $9, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 304(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $10, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 312(%rbp), %ecx +; AVX2-NEXT: movzbl 304(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $10, %xmm1, (%rsp,%rcx) +; AVX2-NEXT: movzbl 312(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $11, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 320(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $12, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 328(%rbp), %ecx +; AVX2-NEXT: movzbl 320(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $12, %xmm1, (%rsp,%rcx) +; AVX2-NEXT: movzbl 328(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $13, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 336(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $14, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 344(%rbp), %ecx +; AVX2-NEXT: movzbl 336(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $14, %xmm1, (%rsp,%rcx) +; AVX2-NEXT: movzbl 344(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $15, %xmm1, (%rsp,%rax) -; AVX2-NEXT: movl 352(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax +; AVX2-NEXT: movzbl 352(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm0 -; AVX2-NEXT: vpextrb $0, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 360(%rbp), %ecx +; AVX2-NEXT: vpextrb $0, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 360(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $1, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 368(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $2, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 376(%rbp), %ecx +; AVX2-NEXT: movzbl 368(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $2, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 376(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $3, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 384(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $4, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 392(%rbp), %ecx +; AVX2-NEXT: movzbl 384(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $4, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 392(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $5, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 400(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $6, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 408(%rbp), %ecx +; AVX2-NEXT: movzbl 400(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $6, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 408(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $7, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 416(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $8, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 424(%rbp), %ecx +; AVX2-NEXT: movzbl 416(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $8, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 424(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $9, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 432(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $10, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 440(%rbp), %ecx +; AVX2-NEXT: movzbl 432(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $10, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 440(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $11, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 448(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $12, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 456(%rbp), %ecx +; AVX2-NEXT: movzbl 448(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $12, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 456(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $13, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 464(%rbp), %edx -; AVX2-NEXT: andl $1, %edx -; AVX2-NEXT: addq %rcx, %rdx -; AVX2-NEXT: movl %ecx, %eax -; AVX2-NEXT: andl $63, %eax -; AVX2-NEXT: vpextrb $14, %xmm0, (%rsp,%rax) -; AVX2-NEXT: movl 472(%rbp), %ecx +; AVX2-NEXT: movzbl 464(%rbp), %eax +; AVX2-NEXT: movzbl %al, %eax +; AVX2-NEXT: andl $1, %eax +; AVX2-NEXT: addq %rcx, %rax +; AVX2-NEXT: # kill: def $ecx killed $ecx killed $rcx def $rcx +; AVX2-NEXT: andl $63, %ecx +; AVX2-NEXT: vpextrb $14, %xmm0, (%rsp,%rcx) +; AVX2-NEXT: movzbl 472(%rbp), %ecx +; AVX2-NEXT: movzbl %cl, %ecx ; AVX2-NEXT: andl $1, %ecx -; AVX2-NEXT: addq %rdx, %rcx -; AVX2-NEXT: movl %edx, %eax +; AVX2-NEXT: addq %rax, %rcx +; AVX2-NEXT: # kill: def $eax killed $eax killed $rax def $rax ; AVX2-NEXT: andl $63, %eax ; AVX2-NEXT: vpextrb $15, %xmm0, (%rsp,%rax) ; AVX2-NEXT: vpextrb $15, %xmm0, %eax ; AVX2-NEXT: cmpq $64, %rcx -; AVX2-NEXT: cmovbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload +; AVX2-NEXT: cmovbl %edx, %eax ; AVX2-NEXT: cmpq $63, %rcx -; AVX2-NEXT: movq %rcx, %rdx -; AVX2-NEXT: movl $63, %ecx -; AVX2-NEXT: cmovbq %rdx, %rcx -; AVX2-NEXT: movb %al, (%rsp,%rcx) +; AVX2-NEXT: movl $63, %edx +; AVX2-NEXT: cmovbq %rcx, %rdx +; AVX2-NEXT: movb %al, (%rsp,%rdx) ; AVX2-NEXT: vmovaps (%rsp), %ymm0 ; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %ymm1 ; AVX2-NEXT: leaq -40(%rbp), %rsp diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll index e60b565..d0690bd 100644 --- a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll @@ -509,10 +509,10 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind { ; SSE2-NEXT: pandn %xmm3, %xmm2 ; SSE2-NEXT: por %xmm4, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: psrlw $7, %xmm3 -; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 +; SSE2-NEXT: paddb %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: paddb %xmm2, %xmm4 +; SSE2-NEXT: psrlw $7, %xmm4 +; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 ; SSE2-NEXT: por %xmm3, %xmm4 ; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: pcmpgtb %xmm1, %xmm0 @@ -545,10 +545,10 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind { ; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 -; SSE41-NEXT: psrlw $7, %xmm0 -; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: paddb %xmm1, %xmm0 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddb %xmm1, %xmm3 +; SSE41-NEXT: psrlw $7, %xmm3 +; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 ; SSE41-NEXT: por %xmm0, %xmm3 ; SSE41-NEXT: paddb %xmm2, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm0 @@ -572,10 +572,10 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind { ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpsrlw $7, %xmm0, %xmm2 -; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 -; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm3 -; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vpsrlw $7, %xmm0, %xmm3 +; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 +; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq @@ -704,10 +704,10 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind { ; X86-SSE2-NEXT: pandn %xmm3, %xmm2 ; X86-SSE2-NEXT: por %xmm4, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm3 -; X86-SSE2-NEXT: psrlw $7, %xmm3 -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm3 +; X86-SSE2-NEXT: paddb %xmm2, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm4 -; X86-SSE2-NEXT: paddb %xmm2, %xmm4 +; X86-SSE2-NEXT: psrlw $7, %xmm4 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm4 ; X86-SSE2-NEXT: por %xmm3, %xmm4 ; X86-SSE2-NEXT: paddb %xmm1, %xmm1 ; X86-SSE2-NEXT: pcmpgtb %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll index 11a02f8..421fa98 100644 --- a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll @@ -431,10 +431,10 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind { ; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm2 -; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 -; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm3 -; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2 +; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm3 +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 +; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: retq @@ -451,10 +451,10 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind { ; AVX512F-NEXT: vpternlogd {{.*#+}} zmm3 = zmm3 ^ (m32bcst & (zmm3 ^ zmm2)) ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 -; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm2 -; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 -; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm3 -; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm2 +; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm3 +; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 +; AVX512F-NEXT: vpor %ymm3, %ymm2, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll index d979997..4969cb5 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll @@ -533,10 +533,10 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind { ; SSE2-NEXT: pandn %xmm1, %xmm2 ; SSE2-NEXT: por %xmm4, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm1 -; SSE2-NEXT: psrlw $7, %xmm1 -; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: paddb %xmm2, %xmm1 ; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: paddb %xmm2, %xmm4 +; SSE2-NEXT: psrlw $7, %xmm4 +; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 ; SSE2-NEXT: por %xmm1, %xmm4 ; SSE2-NEXT: paddb %xmm3, %xmm3 ; SSE2-NEXT: pcmpgtb %xmm3, %xmm0 @@ -568,10 +568,10 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind { ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm1 -; SSE41-NEXT: psrlw $7, %xmm1 -; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE41-NEXT: paddb %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: paddb %xmm2, %xmm3 +; SSE41-NEXT: psrlw $7, %xmm3 +; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 ; SSE41-NEXT: por %xmm1, %xmm3 ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 @@ -596,10 +596,10 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind { ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpsrlw $7, %xmm0, %xmm2 -; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 -; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm3 -; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vpsrlw $7, %xmm0, %xmm3 +; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 +; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq @@ -731,10 +731,10 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind { ; X86-SSE2-NEXT: pandn %xmm1, %xmm2 ; X86-SSE2-NEXT: por %xmm4, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm1 -; X86-SSE2-NEXT: psrlw $7, %xmm1 -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: paddb %xmm2, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm4 -; X86-SSE2-NEXT: paddb %xmm2, %xmm4 +; X86-SSE2-NEXT: psrlw $7, %xmm4 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm4 ; X86-SSE2-NEXT: por %xmm1, %xmm4 ; X86-SSE2-NEXT: paddb %xmm3, %xmm3 ; X86-SSE2-NEXT: pcmpgtb %xmm3, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll index 15e09c3..e2a3e26 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll @@ -457,10 +457,10 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind { ; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm2 -; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 -; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm3 -; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2 +; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm3 +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 +; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-rotate-128.ll b/llvm/test/CodeGen/X86/vector-rotate-128.ll index 6c79be7..93f4ce7 100644 --- a/llvm/test/CodeGen/X86/vector-rotate-128.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-128.ll @@ -442,10 +442,10 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; SSE2-NEXT: pandn %xmm3, %xmm2 ; SSE2-NEXT: por %xmm4, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: psrlw $7, %xmm3 -; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 +; SSE2-NEXT: paddb %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: paddb %xmm2, %xmm4 +; SSE2-NEXT: psrlw $7, %xmm4 +; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 ; SSE2-NEXT: por %xmm3, %xmm4 ; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: pcmpgtb %xmm1, %xmm0 @@ -478,10 +478,10 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 -; SSE41-NEXT: psrlw $7, %xmm0 -; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE41-NEXT: paddb %xmm1, %xmm0 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: paddb %xmm1, %xmm3 +; SSE41-NEXT: psrlw $7, %xmm3 +; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 ; SSE41-NEXT: por %xmm0, %xmm3 ; SSE41-NEXT: paddb %xmm2, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm0 @@ -505,10 +505,10 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpsrlw $7, %xmm0, %xmm2 -; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 -; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm3 -; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vpsrlw $7, %xmm0, %xmm3 +; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 +; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq @@ -637,10 +637,10 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; X86-SSE2-NEXT: pandn %xmm3, %xmm2 ; X86-SSE2-NEXT: por %xmm4, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm3 -; X86-SSE2-NEXT: psrlw $7, %xmm3 -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm3 +; X86-SSE2-NEXT: paddb %xmm2, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm4 -; X86-SSE2-NEXT: paddb %xmm2, %xmm4 +; X86-SSE2-NEXT: psrlw $7, %xmm4 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm4 ; X86-SSE2-NEXT: por %xmm3, %xmm4 ; X86-SSE2-NEXT: paddb %xmm1, %xmm1 ; X86-SSE2-NEXT: pcmpgtb %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-rotate-256.ll b/llvm/test/CodeGen/X86/vector-rotate-256.ll index 684721f..64c3118 100644 --- a/llvm/test/CodeGen/X86/vector-rotate-256.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-256.ll @@ -375,10 +375,10 @@ define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm2 -; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 -; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm3 -; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2 +; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm3 +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 +; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: retq @@ -395,10 +395,10 @@ define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX512F-NEXT: vpternlogd {{.*#+}} zmm3 = zmm3 ^ (m32bcst & (zmm3 ^ zmm2)) ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 -; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm2 -; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 -; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm3 -; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm2 +; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm3 +; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 +; AVX512F-NEXT: vpor %ymm3, %ymm2, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: retq diff --git a/llvm/test/CodeGen/X86/xray-custom-log.ll b/llvm/test/CodeGen/X86/xray-custom-log.ll index 8f23055..f4cdc23 100644 --- a/llvm/test/CodeGen/X86/xray-custom-log.ll +++ b/llvm/test/CodeGen/X86/xray-custom-log.ll @@ -1,9 +1,6 @@ ; RUN: llc -mtriple=x86_64 < %s | FileCheck %s ; RUN: llc -mtriple=x86_64 -relocation-model=pic < %s | FileCheck %s --check-prefix=PIC -; RUN: llc -mtriple=x86_64 -filetype=obj %s -o %t -; RUN: llvm-dwarfdump %t | FileCheck %s --check-prefix=DBG - define i32 @customevent() nounwind "function-instrument"="xray-always" !dbg !1 { %eventptr = alloca i8 %eventsize = alloca i64 @@ -93,17 +90,6 @@ define void @leaf_func() "function-instrument"="xray-always" "frame-pointer"="no declare void @llvm.xray.customevent(ptr, i64) declare void @llvm.xray.typedevent(i64, ptr, i64) -;; Construct call site entries for PATCHABLE_EVENT_CALL. -; DBG: DW_TAG_subprogram -; DBG: DW_TAG_call_site -; DBG-NEXT: DW_AT_call_target (DW_OP_reg{{.*}}) -; DBG-NEXT: DW_AT_call_return_pc - -; DBG: DW_TAG_subprogram -; DBG: DW_TAG_call_site -; DBG-NEXT: DW_AT_call_target (DW_OP_reg{{.*}}) -; DBG-NEXT: DW_AT_call_return_pc - !llvm.dbg.cu = !{!7} !llvm.module.flags = !{!10, !11} diff --git a/llvm/test/CodeGen/XCore/llvm.sincos.ll b/llvm/test/CodeGen/XCore/llvm.sincos.ll index 690c038..e01f208 100644 --- a/llvm/test/CodeGen/XCore/llvm.sincos.ll +++ b/llvm/test/CodeGen/XCore/llvm.sincos.ll @@ -26,9 +26,8 @@ define { <2 x half>, <2 x half> } @test_sincos_v2f16(<2 x half> %a) nounwind { } ; CHECK-LABEL: test_sincos_f32: -; OTHER: bl sinf -; OTHER: bl cosf -; GNU: bl sincosf +; CHECK: bl sinf +; CHECK: bl cosf define { float, float } @test_sincos_f32(float %a) nounwind { %result = call { float, float } @llvm.sincos.f32(float %a) ret { float, float } %result diff --git a/llvm/test/CodeGen/Xtensa/atomic-load-store.ll b/llvm/test/CodeGen/Xtensa/atomic-load-store.ll new file mode 100644 index 0000000..bd843a3 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/atomic-load-store.ll @@ -0,0 +1,498 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=xtensa -mattr=+windowed < %s | FileCheck %s --check-prefixes=XTENSA +; RUN: llc -mtriple=xtensa -mattr=+windowed,s32c1i < %s | FileCheck %s --check-prefixes=XTENSA-ATOMIC + +define i8 @atomic_load_i8_unordered(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i8, ptr %a unordered, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_monotonic(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i8, ptr %a monotonic, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_acquire(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI2_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i8, ptr %a acquire, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI3_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i8, ptr %a seq_cst, align 1 + ret i8 %1 +} + +define i16 @atomic_load_i16_unordered(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i16, ptr %a unordered, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_monotonic(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i16, ptr %a monotonic, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_acquire(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i16, ptr %a acquire, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_seq_cst(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i16, ptr %a seq_cst, align 2 + ret i16 %1 +} + +define i32 @atomic_load_i32_unordered(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i32, ptr %a unordered, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_monotonic(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI9_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i32, ptr %a monotonic, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_acquire(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i32, ptr %a acquire, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_seq_cst(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i32, ptr %a seq_cst, align 4 + ret i32 %1 +} + +define void @atomic_store_i8_unordered(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i8 %b, ptr %a unordered, align 1 + ret void +} + +define void @atomic_store_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI13_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i8 %b, ptr %a monotonic, align 1 + ret void +} + +define void @atomic_store_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i8 %b, ptr %a release, align 1 + ret void +} + +define void @atomic_store_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i8 %b, ptr %a seq_cst, align 1 + ret void +} + +define void @atomic_store_i16_unordered(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i16_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i16 %b, ptr %a unordered, align 2 + ret void +} + +define void @atomic_store_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i16 %b, ptr %a monotonic, align 2 + ret void +} + +define void @atomic_store_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i16 %b, ptr %a release, align 2 + ret void +} + +define void @atomic_store_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i16 %b, ptr %a seq_cst, align 2 + ret void +} + +define void @atomic_store_i32_unordered(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i32_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 %b, ptr %a unordered, align 4 + ret void +} + +define void @atomic_store_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI21_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 %b, ptr %a monotonic, align 4 + ret void +} + +define void @atomic_store_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI22_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 %b, ptr %a release, align 4 + ret void +} + +define void @atomic_store_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI23_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 %b, ptr %a seq_cst, align 4 + ret void +} diff --git a/llvm/test/CodeGen/Xtensa/atomic-rmw.ll b/llvm/test/CodeGen/Xtensa/atomic-rmw.ll new file mode 100644 index 0000000..81cb2dd --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/atomic-rmw.ll @@ -0,0 +1,10298 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=xtensa -mattr=+windowed < %s | FileCheck %s --check-prefixes=XTENSA +; RUN: llc -mtriple=xtensa -mattr=+windowed,s32c1i < %s | FileCheck %s --check-prefixes=XTENSA-ATOMIC + +define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB0_2 +; XTENSA-ATOMIC-NEXT: .LBB0_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB0_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB0_4 +; XTENSA-ATOMIC-NEXT: .LBB0_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB0_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB0_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB0_1 +; XTENSA-ATOMIC-NEXT: .LBB0_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB1_2 +; XTENSA-ATOMIC-NEXT: .LBB1_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB1_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB1_4 +; XTENSA-ATOMIC-NEXT: .LBB1_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB1_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB1_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB1_1 +; XTENSA-ATOMIC-NEXT: .LBB1_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI2_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB2_2 +; XTENSA-ATOMIC-NEXT: .LBB2_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB2_4 +; XTENSA-ATOMIC-NEXT: .LBB2_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB2_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB2_1 +; XTENSA-ATOMIC-NEXT: .LBB2_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI3_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB3_2 +; XTENSA-ATOMIC-NEXT: .LBB3_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB3_4 +; XTENSA-ATOMIC-NEXT: .LBB3_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB3_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB3_1 +; XTENSA-ATOMIC-NEXT: .LBB3_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB4_2 +; XTENSA-ATOMIC-NEXT: .LBB4_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB4_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB4_4 +; XTENSA-ATOMIC-NEXT: .LBB4_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB4_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB4_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB4_1 +; XTENSA-ATOMIC-NEXT: .LBB4_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB5_2 +; XTENSA-ATOMIC-NEXT: .LBB5_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB5_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB5_4 +; XTENSA-ATOMIC-NEXT: .LBB5_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB5_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB5_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB5_1 +; XTENSA-ATOMIC-NEXT: .LBB5_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB6_2 +; XTENSA-ATOMIC-NEXT: .LBB6_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB6_4 +; XTENSA-ATOMIC-NEXT: .LBB6_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB6_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB6_1 +; XTENSA-ATOMIC-NEXT: .LBB6_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB7_2 +; XTENSA-ATOMIC-NEXT: .LBB7_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB7_4 +; XTENSA-ATOMIC-NEXT: .LBB7_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB7_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB7_1 +; XTENSA-ATOMIC-NEXT: .LBB7_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB8_2 +; XTENSA-ATOMIC-NEXT: .LBB8_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB8_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB8_4 +; XTENSA-ATOMIC-NEXT: .LBB8_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB8_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB8_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB8_1 +; XTENSA-ATOMIC-NEXT: .LBB8_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI9_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB9_2 +; XTENSA-ATOMIC-NEXT: .LBB9_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB9_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB9_4 +; XTENSA-ATOMIC-NEXT: .LBB9_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB9_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB9_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB9_1 +; XTENSA-ATOMIC-NEXT: .LBB9_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB10_2 +; XTENSA-ATOMIC-NEXT: .LBB10_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB10_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB10_4 +; XTENSA-ATOMIC-NEXT: .LBB10_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB10_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB10_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB10_1 +; XTENSA-ATOMIC-NEXT: .LBB10_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB11_2 +; XTENSA-ATOMIC-NEXT: .LBB11_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB11_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB11_4 +; XTENSA-ATOMIC-NEXT: .LBB11_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB11_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB11_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB11_1 +; XTENSA-ATOMIC-NEXT: .LBB11_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB12_2 +; XTENSA-ATOMIC-NEXT: .LBB12_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB12_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB12_4 +; XTENSA-ATOMIC-NEXT: .LBB12_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB12_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB12_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB12_1 +; XTENSA-ATOMIC-NEXT: .LBB12_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI13_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB13_2 +; XTENSA-ATOMIC-NEXT: .LBB13_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB13_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB13_4 +; XTENSA-ATOMIC-NEXT: .LBB13_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB13_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB13_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB13_1 +; XTENSA-ATOMIC-NEXT: .LBB13_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB14_2 +; XTENSA-ATOMIC-NEXT: .LBB14_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB14_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB14_4 +; XTENSA-ATOMIC-NEXT: .LBB14_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB14_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB14_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB14_1 +; XTENSA-ATOMIC-NEXT: .LBB14_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB15_2 +; XTENSA-ATOMIC-NEXT: .LBB15_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB15_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB15_4 +; XTENSA-ATOMIC-NEXT: .LBB15_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB15_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB15_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB15_1 +; XTENSA-ATOMIC-NEXT: .LBB15_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB16_2 +; XTENSA-ATOMIC-NEXT: .LBB16_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB16_4 +; XTENSA-ATOMIC-NEXT: .LBB16_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB16_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB16_1 +; XTENSA-ATOMIC-NEXT: .LBB16_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB17_2 +; XTENSA-ATOMIC-NEXT: .LBB17_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB17_4 +; XTENSA-ATOMIC-NEXT: .LBB17_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB17_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB17_1 +; XTENSA-ATOMIC-NEXT: .LBB17_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB18_2 +; XTENSA-ATOMIC-NEXT: .LBB18_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB18_4 +; XTENSA-ATOMIC-NEXT: .LBB18_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB18_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB18_1 +; XTENSA-ATOMIC-NEXT: .LBB18_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB19_2 +; XTENSA-ATOMIC-NEXT: .LBB19_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB19_4 +; XTENSA-ATOMIC-NEXT: .LBB19_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB19_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB19_1 +; XTENSA-ATOMIC-NEXT: .LBB19_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB20_2 +; XTENSA-ATOMIC-NEXT: .LBB20_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB20_4 +; XTENSA-ATOMIC-NEXT: .LBB20_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB20_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB20_1 +; XTENSA-ATOMIC-NEXT: .LBB20_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI21_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB21_2 +; XTENSA-ATOMIC-NEXT: .LBB21_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB21_4 +; XTENSA-ATOMIC-NEXT: .LBB21_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB21_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB21_1 +; XTENSA-ATOMIC-NEXT: .LBB21_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI22_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB22_2 +; XTENSA-ATOMIC-NEXT: .LBB22_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB22_4 +; XTENSA-ATOMIC-NEXT: .LBB22_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB22_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB22_1 +; XTENSA-ATOMIC-NEXT: .LBB22_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI23_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB23_2 +; XTENSA-ATOMIC-NEXT: .LBB23_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB23_4 +; XTENSA-ATOMIC-NEXT: .LBB23_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB23_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB23_1 +; XTENSA-ATOMIC-NEXT: .LBB23_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI24_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB24_2 +; XTENSA-ATOMIC-NEXT: .LBB24_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB24_4 +; XTENSA-ATOMIC-NEXT: .LBB24_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB24_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB24_1 +; XTENSA-ATOMIC-NEXT: .LBB24_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI25_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB25_2 +; XTENSA-ATOMIC-NEXT: .LBB25_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB25_4 +; XTENSA-ATOMIC-NEXT: .LBB25_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB25_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB25_1 +; XTENSA-ATOMIC-NEXT: .LBB25_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI26_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB26_2 +; XTENSA-ATOMIC-NEXT: .LBB26_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB26_4 +; XTENSA-ATOMIC-NEXT: .LBB26_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB26_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB26_1 +; XTENSA-ATOMIC-NEXT: .LBB26_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI27_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB27_2 +; XTENSA-ATOMIC-NEXT: .LBB27_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB27_4 +; XTENSA-ATOMIC-NEXT: .LBB27_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB27_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB27_1 +; XTENSA-ATOMIC-NEXT: .LBB27_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI28_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB28_2 +; XTENSA-ATOMIC-NEXT: .LBB28_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB28_4 +; XTENSA-ATOMIC-NEXT: .LBB28_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB28_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB28_1 +; XTENSA-ATOMIC-NEXT: .LBB28_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI29_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB29_2 +; XTENSA-ATOMIC-NEXT: .LBB29_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB29_4 +; XTENSA-ATOMIC-NEXT: .LBB29_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB29_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB29_1 +; XTENSA-ATOMIC-NEXT: .LBB29_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI30_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB30_2 +; XTENSA-ATOMIC-NEXT: .LBB30_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB30_4 +; XTENSA-ATOMIC-NEXT: .LBB30_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB30_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB30_1 +; XTENSA-ATOMIC-NEXT: .LBB30_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI31_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB31_2 +; XTENSA-ATOMIC-NEXT: .LBB31_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB31_4 +; XTENSA-ATOMIC-NEXT: .LBB31_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB31_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB31_1 +; XTENSA-ATOMIC-NEXT: .LBB31_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI32_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB32_2 +; XTENSA-ATOMIC-NEXT: .LBB32_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB32_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB32_4 +; XTENSA-ATOMIC-NEXT: .LBB32_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB32_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB32_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB32_1 +; XTENSA-ATOMIC-NEXT: .LBB32_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI33_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB33_2 +; XTENSA-ATOMIC-NEXT: .LBB33_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB33_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB33_4 +; XTENSA-ATOMIC-NEXT: .LBB33_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB33_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB33_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB33_1 +; XTENSA-ATOMIC-NEXT: .LBB33_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI34_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB34_2 +; XTENSA-ATOMIC-NEXT: .LBB34_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB34_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB34_4 +; XTENSA-ATOMIC-NEXT: .LBB34_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB34_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB34_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB34_1 +; XTENSA-ATOMIC-NEXT: .LBB34_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a4, .LCPI35_0 +; XTENSA-NEXT: j .LBB35_2 +; XTENSA-NEXT: .LBB35_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB35_4 +; XTENSA-NEXT: .LBB35_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB35_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB35_1 +; XTENSA-NEXT: .LBB35_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB35_2 +; XTENSA-ATOMIC-NEXT: .LBB35_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB35_6 +; XTENSA-ATOMIC-NEXT: .LBB35_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB35_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB35_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB35_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB35_1 +; XTENSA-ATOMIC-NEXT: .LBB35_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a4, .LCPI36_0 +; XTENSA-NEXT: j .LBB36_2 +; XTENSA-NEXT: .LBB36_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB36_4 +; XTENSA-NEXT: .LBB36_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB36_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB36_1 +; XTENSA-NEXT: .LBB36_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB36_2 +; XTENSA-ATOMIC-NEXT: .LBB36_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB36_6 +; XTENSA-ATOMIC-NEXT: .LBB36_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB36_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB36_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB36_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB36_1 +; XTENSA-ATOMIC-NEXT: .LBB36_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l8ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 24 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a5, .LCPI37_0 +; XTENSA-NEXT: j .LBB37_2 +; XTENSA-NEXT: .LBB37_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB37_4 +; XTENSA-NEXT: .LBB37_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bge a4, a8, .LBB37_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB37_1 +; XTENSA-NEXT: .LBB37_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB37_2 +; XTENSA-ATOMIC-NEXT: .LBB37_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB37_6 +; XTENSA-ATOMIC-NEXT: .LBB37_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB37_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB37_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB37_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB37_1 +; XTENSA-ATOMIC-NEXT: .LBB37_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l8ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 24 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a5, .LCPI38_0 +; XTENSA-NEXT: j .LBB38_2 +; XTENSA-NEXT: .LBB38_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB38_4 +; XTENSA-NEXT: .LBB38_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bge a4, a8, .LBB38_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB38_1 +; XTENSA-NEXT: .LBB38_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB38_2 +; XTENSA-ATOMIC-NEXT: .LBB38_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB38_6 +; XTENSA-ATOMIC-NEXT: .LBB38_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB38_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB38_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB38_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB38_1 +; XTENSA-ATOMIC-NEXT: .LBB38_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI39_0 +; XTENSA-NEXT: j .LBB39_2 +; XTENSA-NEXT: .LBB39_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB39_4 +; XTENSA-NEXT: .LBB39_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB39_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB39_1 +; XTENSA-NEXT: .LBB39_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB39_2 +; XTENSA-ATOMIC-NEXT: .LBB39_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB39_6 +; XTENSA-ATOMIC-NEXT: .LBB39_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB39_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB39_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB39_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB39_1 +; XTENSA-ATOMIC-NEXT: .LBB39_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a4, .LCPI40_0 +; XTENSA-NEXT: j .LBB40_2 +; XTENSA-NEXT: .LBB40_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB40_4 +; XTENSA-NEXT: .LBB40_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB40_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB40_1 +; XTENSA-NEXT: .LBB40_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB40_2 +; XTENSA-ATOMIC-NEXT: .LBB40_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB40_6 +; XTENSA-ATOMIC-NEXT: .LBB40_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB40_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB40_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB40_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB40_1 +; XTENSA-ATOMIC-NEXT: .LBB40_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a4, .LCPI41_0 +; XTENSA-NEXT: j .LBB41_2 +; XTENSA-NEXT: .LBB41_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB41_4 +; XTENSA-NEXT: .LBB41_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB41_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB41_1 +; XTENSA-NEXT: .LBB41_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB41_2 +; XTENSA-ATOMIC-NEXT: .LBB41_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB41_6 +; XTENSA-ATOMIC-NEXT: .LBB41_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB41_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB41_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB41_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB41_1 +; XTENSA-ATOMIC-NEXT: .LBB41_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l8ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 24 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a5, .LCPI42_0 +; XTENSA-NEXT: j .LBB42_2 +; XTENSA-NEXT: .LBB42_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB42_4 +; XTENSA-NEXT: .LBB42_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: blt a4, a8, .LBB42_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB42_1 +; XTENSA-NEXT: .LBB42_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB42_2 +; XTENSA-ATOMIC-NEXT: .LBB42_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB42_6 +; XTENSA-ATOMIC-NEXT: .LBB42_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB42_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB42_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB42_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB42_1 +; XTENSA-ATOMIC-NEXT: .LBB42_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l8ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 24 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a5, .LCPI43_0 +; XTENSA-NEXT: j .LBB43_2 +; XTENSA-NEXT: .LBB43_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB43_4 +; XTENSA-NEXT: .LBB43_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: blt a4, a8, .LBB43_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB43_1 +; XTENSA-NEXT: .LBB43_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB43_2 +; XTENSA-ATOMIC-NEXT: .LBB43_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB43_6 +; XTENSA-ATOMIC-NEXT: .LBB43_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB43_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB43_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB43_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB43_1 +; XTENSA-ATOMIC-NEXT: .LBB43_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI44_0 +; XTENSA-NEXT: j .LBB44_2 +; XTENSA-NEXT: .LBB44_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB44_4 +; XTENSA-NEXT: .LBB44_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB44_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB44_1 +; XTENSA-NEXT: .LBB44_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB44_2 +; XTENSA-ATOMIC-NEXT: .LBB44_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB44_6 +; XTENSA-ATOMIC-NEXT: .LBB44_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB44_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB44_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB44_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB44_1 +; XTENSA-ATOMIC-NEXT: .LBB44_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a6, .LCPI45_0 +; XTENSA-NEXT: j .LBB45_2 +; XTENSA-NEXT: .LBB45_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB45_4 +; XTENSA-NEXT: .LBB45_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a4, a8, .LBB45_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB45_1 +; XTENSA-NEXT: .LBB45_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB45_2 +; XTENSA-ATOMIC-NEXT: .LBB45_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB45_6 +; XTENSA-ATOMIC-NEXT: .LBB45_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB45_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB45_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB45_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB45_1 +; XTENSA-ATOMIC-NEXT: .LBB45_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a6, .LCPI46_0 +; XTENSA-NEXT: j .LBB46_2 +; XTENSA-NEXT: .LBB46_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB46_4 +; XTENSA-NEXT: .LBB46_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a4, a8, .LBB46_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB46_1 +; XTENSA-NEXT: .LBB46_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB46_2 +; XTENSA-ATOMIC-NEXT: .LBB46_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB46_6 +; XTENSA-ATOMIC-NEXT: .LBB46_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB46_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB46_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB46_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB46_1 +; XTENSA-ATOMIC-NEXT: .LBB46_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a4, 255 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: and a8, a3, a4 +; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a3, .LCPI47_0 +; XTENSA-NEXT: j .LBB47_2 +; XTENSA-NEXT: .LBB47_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 8 +; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a3 +; XTENSA-NEXT: l8ui a2, a1, 8 +; XTENSA-NEXT: bnez a10, .LBB47_4 +; XTENSA-NEXT: .LBB47_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 8 +; XTENSA-NEXT: and a8, a2, a4 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bgeu a9, a8, .LBB47_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB47_1 +; XTENSA-NEXT: .LBB47_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB47_2 +; XTENSA-ATOMIC-NEXT: .LBB47_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB47_6 +; XTENSA-ATOMIC-NEXT: .LBB47_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB47_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB47_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB47_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB47_1 +; XTENSA-ATOMIC-NEXT: .LBB47_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a4, 255 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: and a8, a3, a4 +; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a3, .LCPI48_0 +; XTENSA-NEXT: j .LBB48_2 +; XTENSA-NEXT: .LBB48_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 8 +; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a3 +; XTENSA-NEXT: l8ui a2, a1, 8 +; XTENSA-NEXT: bnez a10, .LBB48_4 +; XTENSA-NEXT: .LBB48_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 8 +; XTENSA-NEXT: and a8, a2, a4 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bgeu a9, a8, .LBB48_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB48_1 +; XTENSA-NEXT: .LBB48_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB48_2 +; XTENSA-ATOMIC-NEXT: .LBB48_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB48_6 +; XTENSA-ATOMIC-NEXT: .LBB48_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB48_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB48_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB48_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB48_1 +; XTENSA-ATOMIC-NEXT: .LBB48_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a6, .LCPI49_0 +; XTENSA-NEXT: j .LBB49_2 +; XTENSA-NEXT: .LBB49_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB49_4 +; XTENSA-NEXT: .LBB49_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a4, a8, .LBB49_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB49_1 +; XTENSA-NEXT: .LBB49_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB49_2 +; XTENSA-ATOMIC-NEXT: .LBB49_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB49_6 +; XTENSA-ATOMIC-NEXT: .LBB49_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB49_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB49_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB49_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB49_1 +; XTENSA-ATOMIC-NEXT: .LBB49_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a6, .LCPI50_0 +; XTENSA-NEXT: j .LBB50_2 +; XTENSA-NEXT: .LBB50_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB50_4 +; XTENSA-NEXT: .LBB50_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a4, a8, .LBB50_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB50_1 +; XTENSA-NEXT: .LBB50_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB50_2 +; XTENSA-ATOMIC-NEXT: .LBB50_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB50_6 +; XTENSA-ATOMIC-NEXT: .LBB50_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB50_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB50_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB50_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB50_1 +; XTENSA-ATOMIC-NEXT: .LBB50_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a6, .LCPI51_0 +; XTENSA-NEXT: j .LBB51_2 +; XTENSA-NEXT: .LBB51_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB51_4 +; XTENSA-NEXT: .LBB51_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a4, a8, .LBB51_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB51_1 +; XTENSA-NEXT: .LBB51_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB51_2 +; XTENSA-ATOMIC-NEXT: .LBB51_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB51_6 +; XTENSA-ATOMIC-NEXT: .LBB51_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB51_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB51_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB51_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB51_1 +; XTENSA-ATOMIC-NEXT: .LBB51_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a4, 255 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: and a8, a3, a4 +; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a3, .LCPI52_0 +; XTENSA-NEXT: j .LBB52_2 +; XTENSA-NEXT: .LBB52_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 8 +; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a3 +; XTENSA-NEXT: l8ui a2, a1, 8 +; XTENSA-NEXT: bnez a10, .LBB52_4 +; XTENSA-NEXT: .LBB52_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 8 +; XTENSA-NEXT: and a8, a2, a4 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bltu a9, a8, .LBB52_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB52_1 +; XTENSA-NEXT: .LBB52_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB52_2 +; XTENSA-ATOMIC-NEXT: .LBB52_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB52_6 +; XTENSA-ATOMIC-NEXT: .LBB52_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB52_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB52_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB52_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB52_1 +; XTENSA-ATOMIC-NEXT: .LBB52_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a4, 255 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: and a8, a3, a4 +; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a3, .LCPI53_0 +; XTENSA-NEXT: j .LBB53_2 +; XTENSA-NEXT: .LBB53_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 8 +; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a3 +; XTENSA-NEXT: l8ui a2, a1, 8 +; XTENSA-NEXT: bnez a10, .LBB53_4 +; XTENSA-NEXT: .LBB53_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 8 +; XTENSA-NEXT: and a8, a2, a4 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bltu a9, a8, .LBB53_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB53_1 +; XTENSA-NEXT: .LBB53_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB53_2 +; XTENSA-ATOMIC-NEXT: .LBB53_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB53_6 +; XTENSA-ATOMIC-NEXT: .LBB53_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB53_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB53_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB53_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB53_1 +; XTENSA-ATOMIC-NEXT: .LBB53_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a6, .LCPI54_0 +; XTENSA-NEXT: j .LBB54_2 +; XTENSA-NEXT: .LBB54_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB54_4 +; XTENSA-NEXT: .LBB54_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a4, a8, .LBB54_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB54_1 +; XTENSA-NEXT: .LBB54_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB54_2 +; XTENSA-ATOMIC-NEXT: .LBB54_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB54_6 +; XTENSA-ATOMIC-NEXT: .LBB54_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB54_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB54_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB54_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB54_1 +; XTENSA-ATOMIC-NEXT: .LBB54_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI55_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI55_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB55_2 +; XTENSA-ATOMIC-NEXT: .LBB55_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB55_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB55_4 +; XTENSA-ATOMIC-NEXT: .LBB55_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB55_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB55_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB55_1 +; XTENSA-ATOMIC-NEXT: .LBB55_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI56_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI56_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB56_2 +; XTENSA-ATOMIC-NEXT: .LBB56_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB56_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB56_4 +; XTENSA-ATOMIC-NEXT: .LBB56_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB56_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB56_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB56_1 +; XTENSA-ATOMIC-NEXT: .LBB56_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI57_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI57_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB57_2 +; XTENSA-ATOMIC-NEXT: .LBB57_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB57_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB57_4 +; XTENSA-ATOMIC-NEXT: .LBB57_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB57_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB57_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB57_1 +; XTENSA-ATOMIC-NEXT: .LBB57_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI58_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI58_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB58_2 +; XTENSA-ATOMIC-NEXT: .LBB58_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB58_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB58_4 +; XTENSA-ATOMIC-NEXT: .LBB58_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB58_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB58_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB58_1 +; XTENSA-ATOMIC-NEXT: .LBB58_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI59_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI59_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB59_2 +; XTENSA-ATOMIC-NEXT: .LBB59_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB59_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB59_4 +; XTENSA-ATOMIC-NEXT: .LBB59_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB59_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB59_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB59_1 +; XTENSA-ATOMIC-NEXT: .LBB59_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI60_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI60_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB60_2 +; XTENSA-ATOMIC-NEXT: .LBB60_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB60_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB60_4 +; XTENSA-ATOMIC-NEXT: .LBB60_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB60_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB60_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB60_1 +; XTENSA-ATOMIC-NEXT: .LBB60_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI61_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI61_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB61_2 +; XTENSA-ATOMIC-NEXT: .LBB61_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB61_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB61_4 +; XTENSA-ATOMIC-NEXT: .LBB61_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB61_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB61_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB61_1 +; XTENSA-ATOMIC-NEXT: .LBB61_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI62_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI62_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB62_2 +; XTENSA-ATOMIC-NEXT: .LBB62_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB62_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB62_4 +; XTENSA-ATOMIC-NEXT: .LBB62_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB62_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB62_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB62_1 +; XTENSA-ATOMIC-NEXT: .LBB62_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI63_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI63_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB63_2 +; XTENSA-ATOMIC-NEXT: .LBB63_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB63_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB63_4 +; XTENSA-ATOMIC-NEXT: .LBB63_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB63_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB63_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB63_1 +; XTENSA-ATOMIC-NEXT: .LBB63_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI64_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI64_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB64_2 +; XTENSA-ATOMIC-NEXT: .LBB64_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB64_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB64_4 +; XTENSA-ATOMIC-NEXT: .LBB64_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB64_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB64_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB64_1 +; XTENSA-ATOMIC-NEXT: .LBB64_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI65_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI65_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB65_2 +; XTENSA-ATOMIC-NEXT: .LBB65_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB65_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB65_4 +; XTENSA-ATOMIC-NEXT: .LBB65_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB65_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB65_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB65_1 +; XTENSA-ATOMIC-NEXT: .LBB65_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI66_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI66_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB66_2 +; XTENSA-ATOMIC-NEXT: .LBB66_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB66_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB66_4 +; XTENSA-ATOMIC-NEXT: .LBB66_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB66_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB66_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB66_1 +; XTENSA-ATOMIC-NEXT: .LBB66_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI67_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI67_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB67_2 +; XTENSA-ATOMIC-NEXT: .LBB67_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB67_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB67_4 +; XTENSA-ATOMIC-NEXT: .LBB67_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB67_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB67_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB67_1 +; XTENSA-ATOMIC-NEXT: .LBB67_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI68_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI68_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB68_2 +; XTENSA-ATOMIC-NEXT: .LBB68_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB68_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB68_4 +; XTENSA-ATOMIC-NEXT: .LBB68_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB68_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB68_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB68_1 +; XTENSA-ATOMIC-NEXT: .LBB68_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI69_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI69_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB69_2 +; XTENSA-ATOMIC-NEXT: .LBB69_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB69_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB69_4 +; XTENSA-ATOMIC-NEXT: .LBB69_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB69_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB69_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB69_1 +; XTENSA-ATOMIC-NEXT: .LBB69_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI70_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI70_0 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB70_2 +; XTENSA-ATOMIC-NEXT: .LBB70_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB70_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB70_4 +; XTENSA-ATOMIC-NEXT: .LBB70_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB70_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB70_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB70_1 +; XTENSA-ATOMIC-NEXT: .LBB70_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI71_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI71_0 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB71_2 +; XTENSA-ATOMIC-NEXT: .LBB71_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB71_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB71_4 +; XTENSA-ATOMIC-NEXT: .LBB71_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB71_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB71_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB71_1 +; XTENSA-ATOMIC-NEXT: .LBB71_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI72_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI72_0 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB72_2 +; XTENSA-ATOMIC-NEXT: .LBB72_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB72_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB72_4 +; XTENSA-ATOMIC-NEXT: .LBB72_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB72_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB72_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB72_1 +; XTENSA-ATOMIC-NEXT: .LBB72_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI73_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI73_0 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB73_2 +; XTENSA-ATOMIC-NEXT: .LBB73_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB73_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB73_4 +; XTENSA-ATOMIC-NEXT: .LBB73_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB73_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB73_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB73_1 +; XTENSA-ATOMIC-NEXT: .LBB73_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI74_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI74_0 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB74_2 +; XTENSA-ATOMIC-NEXT: .LBB74_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB74_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB74_4 +; XTENSA-ATOMIC-NEXT: .LBB74_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB74_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB74_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB74_1 +; XTENSA-ATOMIC-NEXT: .LBB74_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI75_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI75_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB75_2 +; XTENSA-ATOMIC-NEXT: .LBB75_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB75_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB75_4 +; XTENSA-ATOMIC-NEXT: .LBB75_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB75_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB75_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB75_1 +; XTENSA-ATOMIC-NEXT: .LBB75_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI76_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI76_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB76_2 +; XTENSA-ATOMIC-NEXT: .LBB76_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB76_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB76_4 +; XTENSA-ATOMIC-NEXT: .LBB76_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB76_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB76_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB76_1 +; XTENSA-ATOMIC-NEXT: .LBB76_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI77_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI77_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB77_2 +; XTENSA-ATOMIC-NEXT: .LBB77_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB77_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB77_4 +; XTENSA-ATOMIC-NEXT: .LBB77_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB77_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB77_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB77_1 +; XTENSA-ATOMIC-NEXT: .LBB77_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI78_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI78_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB78_2 +; XTENSA-ATOMIC-NEXT: .LBB78_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB78_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB78_4 +; XTENSA-ATOMIC-NEXT: .LBB78_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB78_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB78_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB78_1 +; XTENSA-ATOMIC-NEXT: .LBB78_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI79_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI79_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB79_2 +; XTENSA-ATOMIC-NEXT: .LBB79_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB79_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB79_4 +; XTENSA-ATOMIC-NEXT: .LBB79_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB79_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB79_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB79_1 +; XTENSA-ATOMIC-NEXT: .LBB79_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI80_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI80_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB80_2 +; XTENSA-ATOMIC-NEXT: .LBB80_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB80_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB80_4 +; XTENSA-ATOMIC-NEXT: .LBB80_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB80_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB80_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB80_1 +; XTENSA-ATOMIC-NEXT: .LBB80_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI81_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI81_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB81_2 +; XTENSA-ATOMIC-NEXT: .LBB81_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB81_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB81_4 +; XTENSA-ATOMIC-NEXT: .LBB81_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB81_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB81_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB81_1 +; XTENSA-ATOMIC-NEXT: .LBB81_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI82_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI82_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB82_2 +; XTENSA-ATOMIC-NEXT: .LBB82_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB82_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB82_4 +; XTENSA-ATOMIC-NEXT: .LBB82_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB82_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB82_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB82_1 +; XTENSA-ATOMIC-NEXT: .LBB82_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI83_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI83_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB83_2 +; XTENSA-ATOMIC-NEXT: .LBB83_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB83_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB83_4 +; XTENSA-ATOMIC-NEXT: .LBB83_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB83_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB83_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB83_1 +; XTENSA-ATOMIC-NEXT: .LBB83_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI84_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI84_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB84_2 +; XTENSA-ATOMIC-NEXT: .LBB84_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB84_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB84_4 +; XTENSA-ATOMIC-NEXT: .LBB84_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB84_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB84_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB84_1 +; XTENSA-ATOMIC-NEXT: .LBB84_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI85_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI85_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB85_2 +; XTENSA-ATOMIC-NEXT: .LBB85_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB85_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB85_4 +; XTENSA-ATOMIC-NEXT: .LBB85_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB85_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB85_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB85_1 +; XTENSA-ATOMIC-NEXT: .LBB85_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI86_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI86_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB86_2 +; XTENSA-ATOMIC-NEXT: .LBB86_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB86_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB86_4 +; XTENSA-ATOMIC-NEXT: .LBB86_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB86_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB86_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB86_1 +; XTENSA-ATOMIC-NEXT: .LBB86_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI87_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI87_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB87_2 +; XTENSA-ATOMIC-NEXT: .LBB87_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB87_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB87_4 +; XTENSA-ATOMIC-NEXT: .LBB87_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB87_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB87_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB87_1 +; XTENSA-ATOMIC-NEXT: .LBB87_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI88_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI88_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB88_2 +; XTENSA-ATOMIC-NEXT: .LBB88_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB88_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB88_4 +; XTENSA-ATOMIC-NEXT: .LBB88_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB88_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB88_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB88_1 +; XTENSA-ATOMIC-NEXT: .LBB88_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI89_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI89_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB89_2 +; XTENSA-ATOMIC-NEXT: .LBB89_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB89_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB89_4 +; XTENSA-ATOMIC-NEXT: .LBB89_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB89_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB89_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB89_1 +; XTENSA-ATOMIC-NEXT: .LBB89_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a4, .LCPI90_0 +; XTENSA-NEXT: j .LBB90_2 +; XTENSA-NEXT: .LBB90_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB90_4 +; XTENSA-NEXT: .LBB90_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB90_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB90_1 +; XTENSA-NEXT: .LBB90_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI90_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB90_2 +; XTENSA-ATOMIC-NEXT: .LBB90_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB90_6 +; XTENSA-ATOMIC-NEXT: .LBB90_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB90_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB90_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI90_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB90_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB90_1 +; XTENSA-ATOMIC-NEXT: .LBB90_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a4, .LCPI91_0 +; XTENSA-NEXT: j .LBB91_2 +; XTENSA-NEXT: .LBB91_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB91_4 +; XTENSA-NEXT: .LBB91_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB91_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB91_1 +; XTENSA-NEXT: .LBB91_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI91_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB91_2 +; XTENSA-ATOMIC-NEXT: .LBB91_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB91_6 +; XTENSA-ATOMIC-NEXT: .LBB91_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB91_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB91_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI91_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB91_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB91_1 +; XTENSA-ATOMIC-NEXT: .LBB91_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l16ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 16 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a5, .LCPI92_0 +; XTENSA-NEXT: j .LBB92_2 +; XTENSA-NEXT: .LBB92_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB92_4 +; XTENSA-NEXT: .LBB92_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bge a4, a8, .LBB92_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB92_1 +; XTENSA-NEXT: .LBB92_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI92_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB92_2 +; XTENSA-ATOMIC-NEXT: .LBB92_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB92_6 +; XTENSA-ATOMIC-NEXT: .LBB92_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB92_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB92_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI92_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB92_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB92_1 +; XTENSA-ATOMIC-NEXT: .LBB92_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l16ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 16 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a5, .LCPI93_0 +; XTENSA-NEXT: j .LBB93_2 +; XTENSA-NEXT: .LBB93_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB93_4 +; XTENSA-NEXT: .LBB93_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bge a4, a8, .LBB93_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB93_1 +; XTENSA-NEXT: .LBB93_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI93_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB93_2 +; XTENSA-ATOMIC-NEXT: .LBB93_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB93_6 +; XTENSA-ATOMIC-NEXT: .LBB93_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB93_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB93_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI93_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB93_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB93_1 +; XTENSA-ATOMIC-NEXT: .LBB93_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI94_0 +; XTENSA-NEXT: j .LBB94_2 +; XTENSA-NEXT: .LBB94_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB94_4 +; XTENSA-NEXT: .LBB94_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB94_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB94_1 +; XTENSA-NEXT: .LBB94_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI94_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB94_2 +; XTENSA-ATOMIC-NEXT: .LBB94_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB94_6 +; XTENSA-ATOMIC-NEXT: .LBB94_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB94_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB94_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI94_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB94_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB94_1 +; XTENSA-ATOMIC-NEXT: .LBB94_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a4, .LCPI95_0 +; XTENSA-NEXT: j .LBB95_2 +; XTENSA-NEXT: .LBB95_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB95_4 +; XTENSA-NEXT: .LBB95_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB95_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB95_1 +; XTENSA-NEXT: .LBB95_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI95_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB95_2 +; XTENSA-ATOMIC-NEXT: .LBB95_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB95_6 +; XTENSA-ATOMIC-NEXT: .LBB95_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB95_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB95_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI95_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB95_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB95_1 +; XTENSA-ATOMIC-NEXT: .LBB95_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a4, .LCPI96_0 +; XTENSA-NEXT: j .LBB96_2 +; XTENSA-NEXT: .LBB96_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB96_4 +; XTENSA-NEXT: .LBB96_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB96_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB96_1 +; XTENSA-NEXT: .LBB96_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI96_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB96_2 +; XTENSA-ATOMIC-NEXT: .LBB96_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB96_6 +; XTENSA-ATOMIC-NEXT: .LBB96_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB96_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB96_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI96_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB96_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB96_1 +; XTENSA-ATOMIC-NEXT: .LBB96_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l16ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 16 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a5, .LCPI97_0 +; XTENSA-NEXT: j .LBB97_2 +; XTENSA-NEXT: .LBB97_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB97_4 +; XTENSA-NEXT: .LBB97_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: blt a4, a8, .LBB97_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB97_1 +; XTENSA-NEXT: .LBB97_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI97_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB97_2 +; XTENSA-ATOMIC-NEXT: .LBB97_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB97_6 +; XTENSA-ATOMIC-NEXT: .LBB97_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB97_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB97_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI97_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB97_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB97_1 +; XTENSA-ATOMIC-NEXT: .LBB97_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l16ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 16 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a5, .LCPI98_0 +; XTENSA-NEXT: j .LBB98_2 +; XTENSA-NEXT: .LBB98_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB98_4 +; XTENSA-NEXT: .LBB98_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: blt a4, a8, .LBB98_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB98_1 +; XTENSA-NEXT: .LBB98_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI98_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB98_2 +; XTENSA-ATOMIC-NEXT: .LBB98_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB98_6 +; XTENSA-ATOMIC-NEXT: .LBB98_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB98_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB98_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI98_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB98_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB98_1 +; XTENSA-ATOMIC-NEXT: .LBB98_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI99_0 +; XTENSA-NEXT: j .LBB99_2 +; XTENSA-NEXT: .LBB99_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB99_4 +; XTENSA-NEXT: .LBB99_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB99_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB99_1 +; XTENSA-NEXT: .LBB99_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI99_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB99_2 +; XTENSA-ATOMIC-NEXT: .LBB99_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB99_6 +; XTENSA-ATOMIC-NEXT: .LBB99_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB99_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB99_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI99_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB99_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB99_1 +; XTENSA-ATOMIC-NEXT: .LBB99_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI100_1 +; XTENSA-NEXT: j .LBB100_2 +; XTENSA-NEXT: .LBB100_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB100_4 +; XTENSA-NEXT: .LBB100_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI100_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a9, a8, .LBB100_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB100_1 +; XTENSA-NEXT: .LBB100_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI100_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB100_2 +; XTENSA-ATOMIC-NEXT: .LBB100_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB100_6 +; XTENSA-ATOMIC-NEXT: .LBB100_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI100_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB100_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB100_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB100_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB100_1 +; XTENSA-ATOMIC-NEXT: .LBB100_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI101_1 +; XTENSA-NEXT: j .LBB101_2 +; XTENSA-NEXT: .LBB101_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB101_4 +; XTENSA-NEXT: .LBB101_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI101_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a9, a8, .LBB101_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB101_1 +; XTENSA-NEXT: .LBB101_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI101_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB101_2 +; XTENSA-ATOMIC-NEXT: .LBB101_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB101_6 +; XTENSA-ATOMIC-NEXT: .LBB101_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI101_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB101_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB101_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB101_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB101_1 +; XTENSA-ATOMIC-NEXT: .LBB101_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l16ui a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI102_1 +; XTENSA-NEXT: j .LBB102_2 +; XTENSA-NEXT: .LBB102_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB102_4 +; XTENSA-NEXT: .LBB102_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI102_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a9, a8, .LBB102_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB102_1 +; XTENSA-NEXT: .LBB102_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI102_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB102_2 +; XTENSA-ATOMIC-NEXT: .LBB102_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB102_6 +; XTENSA-ATOMIC-NEXT: .LBB102_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI102_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB102_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB102_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB102_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB102_1 +; XTENSA-ATOMIC-NEXT: .LBB102_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l16ui a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI103_1 +; XTENSA-NEXT: j .LBB103_2 +; XTENSA-NEXT: .LBB103_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB103_4 +; XTENSA-NEXT: .LBB103_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI103_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a9, a8, .LBB103_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB103_1 +; XTENSA-NEXT: .LBB103_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI103_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB103_2 +; XTENSA-ATOMIC-NEXT: .LBB103_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB103_6 +; XTENSA-ATOMIC-NEXT: .LBB103_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI103_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB103_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB103_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB103_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB103_1 +; XTENSA-ATOMIC-NEXT: .LBB103_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI104_1 +; XTENSA-NEXT: j .LBB104_2 +; XTENSA-NEXT: .LBB104_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB104_4 +; XTENSA-NEXT: .LBB104_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI104_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a9, a8, .LBB104_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB104_1 +; XTENSA-NEXT: .LBB104_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI104_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB104_2 +; XTENSA-ATOMIC-NEXT: .LBB104_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB104_6 +; XTENSA-ATOMIC-NEXT: .LBB104_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI104_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB104_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB104_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB104_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB104_1 +; XTENSA-ATOMIC-NEXT: .LBB104_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI105_1 +; XTENSA-NEXT: j .LBB105_2 +; XTENSA-NEXT: .LBB105_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB105_4 +; XTENSA-NEXT: .LBB105_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI105_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a9, a8, .LBB105_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB105_1 +; XTENSA-NEXT: .LBB105_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI105_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB105_2 +; XTENSA-ATOMIC-NEXT: .LBB105_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB105_6 +; XTENSA-ATOMIC-NEXT: .LBB105_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI105_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB105_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB105_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB105_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB105_1 +; XTENSA-ATOMIC-NEXT: .LBB105_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI106_1 +; XTENSA-NEXT: j .LBB106_2 +; XTENSA-NEXT: .LBB106_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB106_4 +; XTENSA-NEXT: .LBB106_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI106_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a9, a8, .LBB106_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB106_1 +; XTENSA-NEXT: .LBB106_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI106_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB106_2 +; XTENSA-ATOMIC-NEXT: .LBB106_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB106_6 +; XTENSA-ATOMIC-NEXT: .LBB106_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI106_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB106_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB106_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB106_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB106_1 +; XTENSA-ATOMIC-NEXT: .LBB106_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l16ui a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI107_1 +; XTENSA-NEXT: j .LBB107_2 +; XTENSA-NEXT: .LBB107_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB107_4 +; XTENSA-NEXT: .LBB107_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI107_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a9, a8, .LBB107_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB107_1 +; XTENSA-NEXT: .LBB107_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI107_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB107_2 +; XTENSA-ATOMIC-NEXT: .LBB107_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB107_6 +; XTENSA-ATOMIC-NEXT: .LBB107_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI107_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB107_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB107_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB107_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB107_1 +; XTENSA-ATOMIC-NEXT: .LBB107_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l16ui a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI108_1 +; XTENSA-NEXT: j .LBB108_2 +; XTENSA-NEXT: .LBB108_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB108_4 +; XTENSA-NEXT: .LBB108_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI108_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a9, a8, .LBB108_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB108_1 +; XTENSA-NEXT: .LBB108_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI108_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB108_2 +; XTENSA-ATOMIC-NEXT: .LBB108_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB108_6 +; XTENSA-ATOMIC-NEXT: .LBB108_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI108_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB108_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB108_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB108_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB108_1 +; XTENSA-ATOMIC-NEXT: .LBB108_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI109_1 +; XTENSA-NEXT: j .LBB109_2 +; XTENSA-NEXT: .LBB109_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB109_4 +; XTENSA-NEXT: .LBB109_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI109_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a9, a8, .LBB109_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB109_1 +; XTENSA-NEXT: .LBB109_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI109_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB109_2 +; XTENSA-ATOMIC-NEXT: .LBB109_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB109_6 +; XTENSA-ATOMIC-NEXT: .LBB109_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI109_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB109_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB109_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB109_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB109_1 +; XTENSA-ATOMIC-NEXT: .LBB109_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI110_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB110_2 +; XTENSA-ATOMIC-NEXT: .LBB110_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB110_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB110_4 +; XTENSA-ATOMIC-NEXT: .LBB110_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB110_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB110_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB110_1 +; XTENSA-ATOMIC-NEXT: .LBB110_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_xchg_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI111_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB111_2 +; XTENSA-ATOMIC-NEXT: .LBB111_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB111_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB111_4 +; XTENSA-ATOMIC-NEXT: .LBB111_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB111_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB111_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB111_1 +; XTENSA-ATOMIC-NEXT: .LBB111_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_xchg_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI112_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB112_2 +; XTENSA-ATOMIC-NEXT: .LBB112_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB112_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB112_4 +; XTENSA-ATOMIC-NEXT: .LBB112_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB112_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB112_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB112_1 +; XTENSA-ATOMIC-NEXT: .LBB112_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_xchg_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI113_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB113_2 +; XTENSA-ATOMIC-NEXT: .LBB113_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB113_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB113_4 +; XTENSA-ATOMIC-NEXT: .LBB113_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB113_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB113_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB113_1 +; XTENSA-ATOMIC-NEXT: .LBB113_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_xchg_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI114_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB114_2 +; XTENSA-ATOMIC-NEXT: .LBB114_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB114_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB114_4 +; XTENSA-ATOMIC-NEXT: .LBB114_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB114_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB114_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB114_1 +; XTENSA-ATOMIC-NEXT: .LBB114_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI115_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB115_2 +; XTENSA-ATOMIC-NEXT: .LBB115_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB115_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB115_4 +; XTENSA-ATOMIC-NEXT: .LBB115_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: add a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB115_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB115_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB115_1 +; XTENSA-ATOMIC-NEXT: .LBB115_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_add_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI116_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB116_2 +; XTENSA-ATOMIC-NEXT: .LBB116_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB116_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB116_4 +; XTENSA-ATOMIC-NEXT: .LBB116_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: add a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB116_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB116_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB116_1 +; XTENSA-ATOMIC-NEXT: .LBB116_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_add_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI117_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB117_2 +; XTENSA-ATOMIC-NEXT: .LBB117_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB117_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB117_4 +; XTENSA-ATOMIC-NEXT: .LBB117_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: add a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB117_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB117_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB117_1 +; XTENSA-ATOMIC-NEXT: .LBB117_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_add_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI118_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB118_2 +; XTENSA-ATOMIC-NEXT: .LBB118_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB118_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB118_4 +; XTENSA-ATOMIC-NEXT: .LBB118_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: add a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB118_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB118_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB118_1 +; XTENSA-ATOMIC-NEXT: .LBB118_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_add_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI119_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB119_2 +; XTENSA-ATOMIC-NEXT: .LBB119_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB119_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB119_4 +; XTENSA-ATOMIC-NEXT: .LBB119_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: add a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB119_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB119_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB119_1 +; XTENSA-ATOMIC-NEXT: .LBB119_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI120_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB120_2 +; XTENSA-ATOMIC-NEXT: .LBB120_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB120_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB120_4 +; XTENSA-ATOMIC-NEXT: .LBB120_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: sub a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB120_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB120_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB120_1 +; XTENSA-ATOMIC-NEXT: .LBB120_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_sub_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI121_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB121_2 +; XTENSA-ATOMIC-NEXT: .LBB121_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB121_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB121_4 +; XTENSA-ATOMIC-NEXT: .LBB121_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: sub a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB121_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB121_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB121_1 +; XTENSA-ATOMIC-NEXT: .LBB121_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_sub_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI122_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB122_2 +; XTENSA-ATOMIC-NEXT: .LBB122_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB122_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB122_4 +; XTENSA-ATOMIC-NEXT: .LBB122_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: sub a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB122_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB122_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB122_1 +; XTENSA-ATOMIC-NEXT: .LBB122_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_sub_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI123_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB123_2 +; XTENSA-ATOMIC-NEXT: .LBB123_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB123_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB123_4 +; XTENSA-ATOMIC-NEXT: .LBB123_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: sub a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB123_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB123_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB123_1 +; XTENSA-ATOMIC-NEXT: .LBB123_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_sub_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI124_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB124_2 +; XTENSA-ATOMIC-NEXT: .LBB124_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB124_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB124_4 +; XTENSA-ATOMIC-NEXT: .LBB124_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: sub a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB124_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB124_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB124_1 +; XTENSA-ATOMIC-NEXT: .LBB124_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI125_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB125_2 +; XTENSA-ATOMIC-NEXT: .LBB125_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB125_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB125_4 +; XTENSA-ATOMIC-NEXT: .LBB125_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB125_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB125_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB125_1 +; XTENSA-ATOMIC-NEXT: .LBB125_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_and_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI126_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB126_2 +; XTENSA-ATOMIC-NEXT: .LBB126_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB126_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB126_4 +; XTENSA-ATOMIC-NEXT: .LBB126_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB126_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB126_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB126_1 +; XTENSA-ATOMIC-NEXT: .LBB126_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_and_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI127_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB127_2 +; XTENSA-ATOMIC-NEXT: .LBB127_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB127_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB127_4 +; XTENSA-ATOMIC-NEXT: .LBB127_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB127_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB127_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB127_1 +; XTENSA-ATOMIC-NEXT: .LBB127_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_and_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI128_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB128_2 +; XTENSA-ATOMIC-NEXT: .LBB128_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB128_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB128_4 +; XTENSA-ATOMIC-NEXT: .LBB128_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB128_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB128_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB128_1 +; XTENSA-ATOMIC-NEXT: .LBB128_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_and_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI129_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB129_2 +; XTENSA-ATOMIC-NEXT: .LBB129_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB129_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB129_4 +; XTENSA-ATOMIC-NEXT: .LBB129_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB129_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB129_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB129_1 +; XTENSA-ATOMIC-NEXT: .LBB129_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i32 %b seq_cst + ret i32 %res +} + +;define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind { +; %res = atomicrmw nand ptr %a, i32 %b monotonic +; ret i32 %res +;} +; +;define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind { +; %res = atomicrmw nand ptr %a, i32 %b acquire +; ret i32 %res +;} +; +;define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind { +; %res = atomicrmw nand ptr %a, i32 %b release +; ret i32 %res +;} +; +;define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind { +; %res = atomicrmw nand ptr %a, i32 %b acq_rel +; ret i32 %res +;} +; +;define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind { +; %res = atomicrmw nand ptr %a, i32 %b seq_cst +; ret i32 %res +;} + +define i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI130_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB130_2 +; XTENSA-ATOMIC-NEXT: .LBB130_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB130_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB130_4 +; XTENSA-ATOMIC-NEXT: .LBB130_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB130_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB130_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB130_1 +; XTENSA-ATOMIC-NEXT: .LBB130_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_or_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI131_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB131_2 +; XTENSA-ATOMIC-NEXT: .LBB131_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB131_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB131_4 +; XTENSA-ATOMIC-NEXT: .LBB131_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB131_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB131_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB131_1 +; XTENSA-ATOMIC-NEXT: .LBB131_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_or_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI132_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB132_2 +; XTENSA-ATOMIC-NEXT: .LBB132_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB132_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB132_4 +; XTENSA-ATOMIC-NEXT: .LBB132_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB132_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB132_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB132_1 +; XTENSA-ATOMIC-NEXT: .LBB132_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_or_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI133_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB133_2 +; XTENSA-ATOMIC-NEXT: .LBB133_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB133_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB133_4 +; XTENSA-ATOMIC-NEXT: .LBB133_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB133_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB133_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB133_1 +; XTENSA-ATOMIC-NEXT: .LBB133_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_or_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI134_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB134_2 +; XTENSA-ATOMIC-NEXT: .LBB134_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB134_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB134_4 +; XTENSA-ATOMIC-NEXT: .LBB134_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB134_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB134_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB134_1 +; XTENSA-ATOMIC-NEXT: .LBB134_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI135_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB135_2 +; XTENSA-ATOMIC-NEXT: .LBB135_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB135_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB135_4 +; XTENSA-ATOMIC-NEXT: .LBB135_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB135_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB135_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB135_1 +; XTENSA-ATOMIC-NEXT: .LBB135_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_xor_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI136_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB136_2 +; XTENSA-ATOMIC-NEXT: .LBB136_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB136_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB136_4 +; XTENSA-ATOMIC-NEXT: .LBB136_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB136_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB136_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB136_1 +; XTENSA-ATOMIC-NEXT: .LBB136_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_xor_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI137_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB137_2 +; XTENSA-ATOMIC-NEXT: .LBB137_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB137_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB137_4 +; XTENSA-ATOMIC-NEXT: .LBB137_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB137_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB137_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB137_1 +; XTENSA-ATOMIC-NEXT: .LBB137_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI138_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB138_2 +; XTENSA-ATOMIC-NEXT: .LBB138_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB138_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB138_4 +; XTENSA-ATOMIC-NEXT: .LBB138_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB138_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB138_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB138_1 +; XTENSA-ATOMIC-NEXT: .LBB138_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_xor_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI139_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB139_2 +; XTENSA-ATOMIC-NEXT: .LBB139_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB139_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB139_4 +; XTENSA-ATOMIC-NEXT: .LBB139_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB139_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB139_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB139_1 +; XTENSA-ATOMIC-NEXT: .LBB139_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI140_0 +; XTENSA-NEXT: j .LBB140_2 +; XTENSA-NEXT: .LBB140_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB140_4 +; XTENSA-NEXT: .LBB140_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a3, a2, .LBB140_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB140_1 +; XTENSA-NEXT: .LBB140_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB140_2 +; XTENSA-ATOMIC-NEXT: .LBB140_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB140_6 +; XTENSA-ATOMIC-NEXT: .LBB140_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB140_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB140_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB140_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB140_1 +; XTENSA-ATOMIC-NEXT: .LBB140_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI141_0 +; XTENSA-NEXT: j .LBB141_2 +; XTENSA-NEXT: .LBB141_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB141_4 +; XTENSA-NEXT: .LBB141_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a3, a2, .LBB141_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB141_1 +; XTENSA-NEXT: .LBB141_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB141_2 +; XTENSA-ATOMIC-NEXT: .LBB141_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB141_6 +; XTENSA-ATOMIC-NEXT: .LBB141_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB141_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB141_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB141_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB141_1 +; XTENSA-ATOMIC-NEXT: .LBB141_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI142_0 +; XTENSA-NEXT: j .LBB142_2 +; XTENSA-NEXT: .LBB142_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB142_4 +; XTENSA-NEXT: .LBB142_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a3, a2, .LBB142_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB142_1 +; XTENSA-NEXT: .LBB142_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB142_2 +; XTENSA-ATOMIC-NEXT: .LBB142_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB142_6 +; XTENSA-ATOMIC-NEXT: .LBB142_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB142_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB142_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB142_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB142_1 +; XTENSA-ATOMIC-NEXT: .LBB142_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI143_0 +; XTENSA-NEXT: j .LBB143_2 +; XTENSA-NEXT: .LBB143_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB143_4 +; XTENSA-NEXT: .LBB143_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a3, a2, .LBB143_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB143_1 +; XTENSA-NEXT: .LBB143_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB143_2 +; XTENSA-ATOMIC-NEXT: .LBB143_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB143_6 +; XTENSA-ATOMIC-NEXT: .LBB143_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB143_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB143_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB143_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB143_1 +; XTENSA-ATOMIC-NEXT: .LBB143_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI144_0 +; XTENSA-NEXT: j .LBB144_2 +; XTENSA-NEXT: .LBB144_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB144_4 +; XTENSA-NEXT: .LBB144_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a3, a2, .LBB144_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB144_1 +; XTENSA-NEXT: .LBB144_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB144_2 +; XTENSA-ATOMIC-NEXT: .LBB144_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB144_6 +; XTENSA-ATOMIC-NEXT: .LBB144_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB144_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB144_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB144_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB144_1 +; XTENSA-ATOMIC-NEXT: .LBB144_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI145_0 +; XTENSA-NEXT: j .LBB145_2 +; XTENSA-NEXT: .LBB145_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB145_4 +; XTENSA-NEXT: .LBB145_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a3, a2, .LBB145_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB145_1 +; XTENSA-NEXT: .LBB145_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB145_2 +; XTENSA-ATOMIC-NEXT: .LBB145_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB145_6 +; XTENSA-ATOMIC-NEXT: .LBB145_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB145_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB145_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB145_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB145_1 +; XTENSA-ATOMIC-NEXT: .LBB145_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI146_0 +; XTENSA-NEXT: j .LBB146_2 +; XTENSA-NEXT: .LBB146_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB146_4 +; XTENSA-NEXT: .LBB146_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a3, a2, .LBB146_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB146_1 +; XTENSA-NEXT: .LBB146_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB146_2 +; XTENSA-ATOMIC-NEXT: .LBB146_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB146_6 +; XTENSA-ATOMIC-NEXT: .LBB146_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB146_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB146_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB146_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB146_1 +; XTENSA-ATOMIC-NEXT: .LBB146_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI147_0 +; XTENSA-NEXT: j .LBB147_2 +; XTENSA-NEXT: .LBB147_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB147_4 +; XTENSA-NEXT: .LBB147_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a3, a2, .LBB147_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB147_1 +; XTENSA-NEXT: .LBB147_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB147_2 +; XTENSA-ATOMIC-NEXT: .LBB147_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB147_6 +; XTENSA-ATOMIC-NEXT: .LBB147_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB147_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB147_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB147_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB147_1 +; XTENSA-ATOMIC-NEXT: .LBB147_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI148_0 +; XTENSA-NEXT: j .LBB148_2 +; XTENSA-NEXT: .LBB148_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB148_4 +; XTENSA-NEXT: .LBB148_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a3, a2, .LBB148_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB148_1 +; XTENSA-NEXT: .LBB148_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB148_2 +; XTENSA-ATOMIC-NEXT: .LBB148_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB148_6 +; XTENSA-ATOMIC-NEXT: .LBB148_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB148_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB148_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB148_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB148_1 +; XTENSA-ATOMIC-NEXT: .LBB148_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI149_0 +; XTENSA-NEXT: j .LBB149_2 +; XTENSA-NEXT: .LBB149_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB149_4 +; XTENSA-NEXT: .LBB149_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a3, a2, .LBB149_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB149_1 +; XTENSA-NEXT: .LBB149_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB149_2 +; XTENSA-ATOMIC-NEXT: .LBB149_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB149_6 +; XTENSA-ATOMIC-NEXT: .LBB149_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB149_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB149_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB149_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB149_1 +; XTENSA-ATOMIC-NEXT: .LBB149_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI150_0 +; XTENSA-NEXT: j .LBB150_2 +; XTENSA-NEXT: .LBB150_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB150_4 +; XTENSA-NEXT: .LBB150_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a3, a2, .LBB150_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB150_1 +; XTENSA-NEXT: .LBB150_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB150_2 +; XTENSA-ATOMIC-NEXT: .LBB150_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB150_6 +; XTENSA-ATOMIC-NEXT: .LBB150_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB150_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB150_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB150_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB150_1 +; XTENSA-ATOMIC-NEXT: .LBB150_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI151_0 +; XTENSA-NEXT: j .LBB151_2 +; XTENSA-NEXT: .LBB151_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB151_4 +; XTENSA-NEXT: .LBB151_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a3, a2, .LBB151_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB151_1 +; XTENSA-NEXT: .LBB151_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB151_2 +; XTENSA-ATOMIC-NEXT: .LBB151_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB151_6 +; XTENSA-ATOMIC-NEXT: .LBB151_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB151_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB151_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB151_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB151_1 +; XTENSA-ATOMIC-NEXT: .LBB151_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI152_0 +; XTENSA-NEXT: j .LBB152_2 +; XTENSA-NEXT: .LBB152_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB152_4 +; XTENSA-NEXT: .LBB152_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a3, a2, .LBB152_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB152_1 +; XTENSA-NEXT: .LBB152_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB152_2 +; XTENSA-ATOMIC-NEXT: .LBB152_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB152_6 +; XTENSA-ATOMIC-NEXT: .LBB152_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB152_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB152_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB152_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB152_1 +; XTENSA-ATOMIC-NEXT: .LBB152_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI153_0 +; XTENSA-NEXT: j .LBB153_2 +; XTENSA-NEXT: .LBB153_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB153_4 +; XTENSA-NEXT: .LBB153_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a3, a2, .LBB153_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB153_1 +; XTENSA-NEXT: .LBB153_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB153_2 +; XTENSA-ATOMIC-NEXT: .LBB153_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB153_6 +; XTENSA-ATOMIC-NEXT: .LBB153_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB153_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB153_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB153_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB153_1 +; XTENSA-ATOMIC-NEXT: .LBB153_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI154_0 +; XTENSA-NEXT: j .LBB154_2 +; XTENSA-NEXT: .LBB154_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB154_4 +; XTENSA-NEXT: .LBB154_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a3, a2, .LBB154_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB154_1 +; XTENSA-NEXT: .LBB154_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB154_2 +; XTENSA-ATOMIC-NEXT: .LBB154_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB154_6 +; XTENSA-ATOMIC-NEXT: .LBB154_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB154_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB154_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB154_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB154_1 +; XTENSA-ATOMIC-NEXT: .LBB154_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI155_0 +; XTENSA-NEXT: j .LBB155_2 +; XTENSA-NEXT: .LBB155_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB155_4 +; XTENSA-NEXT: .LBB155_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a3, a2, .LBB155_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB155_1 +; XTENSA-NEXT: .LBB155_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB155_2 +; XTENSA-ATOMIC-NEXT: .LBB155_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB155_6 +; XTENSA-ATOMIC-NEXT: .LBB155_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB155_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB155_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB155_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB155_1 +; XTENSA-ATOMIC-NEXT: .LBB155_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI156_0 +; XTENSA-NEXT: j .LBB156_2 +; XTENSA-NEXT: .LBB156_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB156_4 +; XTENSA-NEXT: .LBB156_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a3, a2, .LBB156_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB156_1 +; XTENSA-NEXT: .LBB156_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB156_2 +; XTENSA-ATOMIC-NEXT: .LBB156_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB156_6 +; XTENSA-ATOMIC-NEXT: .LBB156_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB156_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB156_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB156_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB156_1 +; XTENSA-ATOMIC-NEXT: .LBB156_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI157_0 +; XTENSA-NEXT: j .LBB157_2 +; XTENSA-NEXT: .LBB157_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB157_4 +; XTENSA-NEXT: .LBB157_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a3, a2, .LBB157_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB157_1 +; XTENSA-NEXT: .LBB157_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB157_2 +; XTENSA-ATOMIC-NEXT: .LBB157_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB157_6 +; XTENSA-ATOMIC-NEXT: .LBB157_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB157_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB157_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB157_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB157_1 +; XTENSA-ATOMIC-NEXT: .LBB157_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI158_0 +; XTENSA-NEXT: j .LBB158_2 +; XTENSA-NEXT: .LBB158_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB158_4 +; XTENSA-NEXT: .LBB158_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a3, a2, .LBB158_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB158_1 +; XTENSA-NEXT: .LBB158_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB158_2 +; XTENSA-ATOMIC-NEXT: .LBB158_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB158_6 +; XTENSA-ATOMIC-NEXT: .LBB158_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB158_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB158_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB158_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB158_1 +; XTENSA-ATOMIC-NEXT: .LBB158_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI159_0 +; XTENSA-NEXT: j .LBB159_2 +; XTENSA-NEXT: .LBB159_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB159_4 +; XTENSA-NEXT: .LBB159_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a3, a2, .LBB159_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB159_1 +; XTENSA-NEXT: .LBB159_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB159_2 +; XTENSA-ATOMIC-NEXT: .LBB159_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB159_6 +; XTENSA-ATOMIC-NEXT: .LBB159_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB159_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB159_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB159_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB159_1 +; XTENSA-ATOMIC-NEXT: .LBB159_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i32 %b seq_cst + ret i32 %res +} diff --git a/llvm/test/CodeGen/Xtensa/forced-atomics.ll b/llvm/test/CodeGen/Xtensa/forced-atomics.ll new file mode 100644 index 0000000..eeec87b --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/forced-atomics.ll @@ -0,0 +1,1426 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=xtensa -mattr=+windowed < %s | FileCheck %s --check-prefixes=XTENSA +; RUN: llc -mtriple=xtensa -mattr=+windowed,s32c1i -mattr=+forced-atomics < %s | FileCheck %s --check-prefixes=XTENSA-ATOMIC + +define i8 @load8(ptr %p) nounwind { +; XTENSA-LABEL: load8: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load8: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i8, ptr %p seq_cst, align 1 + ret i8 %v +} + +define void @store8(ptr %p) nounwind { +; XTENSA-LABEL: store8: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store8: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s8i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i8 0, ptr %p seq_cst, align 1 + ret void +} + +define i8 @rmw8(ptr %p) nounwind { +; XTENSA-LABEL: rmw8: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI2_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw8: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a11, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: j .LBB2_2 +; XTENSA-ATOMIC-NEXT: .LBB2_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB2_4 +; XTENSA-ATOMIC-NEXT: .LBB2_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: add a6, a15, a10 +; XTENSA-ATOMIC-NEXT: and a6, a6, a11 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: or a6, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB2_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB2_1 +; XTENSA-ATOMIC-NEXT: .LBB2_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw add ptr %p, i8 1 seq_cst, align 1 + ret i8 %v +} + +define i8 @cmpxchg8(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg8: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a8, 0 +; XTENSA-NEXT: s8i a8, a1, 0 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: movi a12, 1 +; XTENSA-NEXT: movi a13, 5 +; XTENSA-NEXT: l32r a8, .LCPI3_0 +; XTENSA-NEXT: or a14, a13, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: cmpxchg8: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a10, 0 +; XTENSA-ATOMIC-NEXT: and a7, a11, a9 +; XTENSA-ATOMIC-NEXT: movi a11, 1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a12, a11 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: .LBB3_1: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: or a14, a15, a12 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a11, a11 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB3_3 +; XTENSA-ATOMIC-NEXT: # %bb.2: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_1 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: .LBB3_3: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_1 Depth=1 +; XTENSA-ATOMIC-NEXT: bnez a7, .LBB3_5 +; XTENSA-ATOMIC-NEXT: # %bb.4: # %partword.cmpxchg.failure +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: bne a15, a7, .LBB3_1 +; XTENSA-ATOMIC-NEXT: .LBB3_5: # %partword.cmpxchg.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = cmpxchg ptr %p, i8 0, i8 1 seq_cst seq_cst + %res.0 = extractvalue { i8, i1 } %res, 0 + ret i8 %res.0 +} + +define i16 @load16(ptr %p) nounwind { +; XTENSA-LABEL: load16: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load16: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i16, ptr %p seq_cst, align 2 + ret i16 %v +} + +define void @store16(ptr %p) nounwind { +; XTENSA-LABEL: store16: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store16: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s16i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i16 0, ptr %p seq_cst, align 2 + ret void +} + +define i16 @rmw16(ptr %p) nounwind { +; XTENSA-LABEL: rmw16: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw16: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI6_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a11, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: j .LBB6_2 +; XTENSA-ATOMIC-NEXT: .LBB6_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB6_4 +; XTENSA-ATOMIC-NEXT: .LBB6_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: add a6, a15, a10 +; XTENSA-ATOMIC-NEXT: and a6, a6, a11 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: or a6, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB6_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB6_1 +; XTENSA-ATOMIC-NEXT: .LBB6_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw add ptr %p, i16 1 seq_cst, align 2 + ret i16 %v +} + +define i16 @cmpxchg16(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg16: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a8, 0 +; XTENSA-NEXT: s16i a8, a1, 0 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: movi a12, 1 +; XTENSA-NEXT: movi a13, 5 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: or a14, a13, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: cmpxchg16: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI7_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a10, 0 +; XTENSA-ATOMIC-NEXT: and a7, a11, a9 +; XTENSA-ATOMIC-NEXT: movi a11, 1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a12, a11 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: .LBB7_1: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: or a14, a15, a12 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a11, a11 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB7_3 +; XTENSA-ATOMIC-NEXT: # %bb.2: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_1 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: .LBB7_3: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_1 Depth=1 +; XTENSA-ATOMIC-NEXT: bnez a7, .LBB7_5 +; XTENSA-ATOMIC-NEXT: # %bb.4: # %partword.cmpxchg.failure +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: bne a15, a7, .LBB7_1 +; XTENSA-ATOMIC-NEXT: .LBB7_5: # %partword.cmpxchg.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = cmpxchg ptr %p, i16 0, i16 1 seq_cst seq_cst + %res.0 = extractvalue { i16, i1 } %res, 0 + ret i16 %res.0 +} + +define i32 @load32_unordered(ptr %p) nounwind { +; XTENSA-LABEL: load32_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load32_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i32, ptr %p unordered, align 4 + ret i32 %v +} + +define i32 @load32_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: load32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI9_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i32, ptr %p monotonic, align 4 + ret i32 %v +} + +define i32 @load32_acquire(ptr %p) nounwind { +; XTENSA-LABEL: load32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i32, ptr %p acquire, align 4 + ret i32 %v +} + +define i32 @load32_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: load32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i32, ptr %p seq_cst, align 4 + ret i32 %v +} + +define void @store32_unordered(ptr %p) nounwind { +; XTENSA-LABEL: store32_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: or a12, a11, a11 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store32_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 0, ptr %p unordered, align 4 + ret void +} + +define void @store32_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: store32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI13_0 +; XTENSA-NEXT: or a12, a11, a11 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 0, ptr %p monotonic, align 4 + ret void +} + +define void @store32_release(ptr %p) nounwind { +; XTENSA-LABEL: store32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 0, ptr %p release, align 4 + ret void +} + +define void @store32_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: store32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 0, ptr %p seq_cst, align 4 + ret void +} + +define i32 @rmw32_add_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_add_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_add_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB16_2 +; XTENSA-ATOMIC-NEXT: .LBB16_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB16_4 +; XTENSA-ATOMIC-NEXT: .LBB16_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: addi a8, a11, 1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB16_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB16_1 +; XTENSA-ATOMIC-NEXT: .LBB16_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw add ptr %p, i32 1 monotonic, align 4 + ret i32 %v +} + +define i32 @rmw32_add_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_add_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_add_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB17_2 +; XTENSA-ATOMIC-NEXT: .LBB17_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB17_4 +; XTENSA-ATOMIC-NEXT: .LBB17_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: addi a8, a11, 1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB17_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB17_1 +; XTENSA-ATOMIC-NEXT: .LBB17_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw add ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_sub_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_sub_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_sub_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB18_2 +; XTENSA-ATOMIC-NEXT: .LBB18_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB18_4 +; XTENSA-ATOMIC-NEXT: .LBB18_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: addi a8, a11, -1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB18_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB18_1 +; XTENSA-ATOMIC-NEXT: .LBB18_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw sub ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_and_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_and_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_and_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB19_2 +; XTENSA-ATOMIC-NEXT: .LBB19_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB19_4 +; XTENSA-ATOMIC-NEXT: .LBB19_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a9 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB19_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB19_1 +; XTENSA-ATOMIC-NEXT: .LBB19_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw and ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_nand_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_nand_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_nand_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, -1 +; XTENSA-ATOMIC-NEXT: movi a10, -2 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB20_2 +; XTENSA-ATOMIC-NEXT: .LBB20_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a14, 1, .LBB20_4 +; XTENSA-ATOMIC-NEXT: .LBB20_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a13, a9 +; XTENSA-ATOMIC-NEXT: or a8, a8, a10 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a14, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a8, a13, .LBB20_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB20_1 +; XTENSA-ATOMIC-NEXT: .LBB20_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw nand ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_or_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_or_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI21_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_or_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB21_2 +; XTENSA-ATOMIC-NEXT: .LBB21_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB21_4 +; XTENSA-ATOMIC-NEXT: .LBB21_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a9 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB21_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB21_1 +; XTENSA-ATOMIC-NEXT: .LBB21_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw or ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_xor_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_xor_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI22_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_xor_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB22_2 +; XTENSA-ATOMIC-NEXT: .LBB22_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB22_4 +; XTENSA-ATOMIC-NEXT: .LBB22_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a9 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB22_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB22_1 +; XTENSA-ATOMIC-NEXT: .LBB22_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw xor ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_max_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_max_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a5, 1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI23_0 +; XTENSA-NEXT: j .LBB23_2 +; XTENSA-NEXT: .LBB23_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB23_4 +; XTENSA-NEXT: .LBB23_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: bge a5, a2, .LBB23_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB23_1 +; XTENSA-NEXT: .LBB23_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_max_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB23_2 +; XTENSA-ATOMIC-NEXT: .LBB23_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB23_6 +; XTENSA-ATOMIC-NEXT: .LBB23_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a9, a9 +; XTENSA-ATOMIC-NEXT: bge a9, a11, .LBB23_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB23_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB23_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB23_1 +; XTENSA-ATOMIC-NEXT: .LBB23_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw max ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_min_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_min_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a12, a2, 0 +; XTENSA-NEXT: movi a6, 1 +; XTENSA-NEXT: movi a5, 2 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI24_0 +; XTENSA-NEXT: j .LBB24_2 +; XTENSA-NEXT: .LBB24_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a12, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB24_4 +; XTENSA-NEXT: .LBB24_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a12, a1, 0 +; XTENSA-NEXT: blt a12, a5, .LBB24_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-NEXT: or a12, a6, a6 +; XTENSA-NEXT: j .LBB24_1 +; XTENSA-NEXT: .LBB24_4: # %atomicrmw.end +; XTENSA-NEXT: or a2, a12, a12 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_min_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a12, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 2 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: or a8, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB24_2 +; XTENSA-ATOMIC-NEXT: .LBB24_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a13, 1, .LBB24_6 +; XTENSA-ATOMIC-NEXT: .LBB24_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: blt a12, a10, .LBB24_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a9, a9 +; XTENSA-ATOMIC-NEXT: .LBB24_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a12, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a13, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a12, .LBB24_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB24_1 +; XTENSA-ATOMIC-NEXT: .LBB24_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw min ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_umax_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_umax_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a5, 1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI25_0 +; XTENSA-NEXT: j .LBB25_2 +; XTENSA-NEXT: .LBB25_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB25_4 +; XTENSA-NEXT: .LBB25_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: bgeu a5, a2, .LBB25_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB25_1 +; XTENSA-NEXT: .LBB25_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_umax_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB25_2 +; XTENSA-ATOMIC-NEXT: .LBB25_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB25_6 +; XTENSA-ATOMIC-NEXT: .LBB25_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a9, a9 +; XTENSA-ATOMIC-NEXT: bgeu a9, a11, .LBB25_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB25_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB25_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB25_1 +; XTENSA-ATOMIC-NEXT: .LBB25_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw umax ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_umin_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_umin_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a12, a2, 0 +; XTENSA-NEXT: movi a6, 1 +; XTENSA-NEXT: movi a5, 2 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI26_0 +; XTENSA-NEXT: j .LBB26_2 +; XTENSA-NEXT: .LBB26_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a12, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB26_4 +; XTENSA-NEXT: .LBB26_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a12, a1, 0 +; XTENSA-NEXT: bltu a12, a5, .LBB26_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-NEXT: or a12, a6, a6 +; XTENSA-NEXT: j .LBB26_1 +; XTENSA-NEXT: .LBB26_4: # %atomicrmw.end +; XTENSA-NEXT: or a2, a12, a12 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_umin_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a12, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 2 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: or a8, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB26_2 +; XTENSA-ATOMIC-NEXT: .LBB26_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a13, 1, .LBB26_6 +; XTENSA-ATOMIC-NEXT: .LBB26_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: bltu a12, a10, .LBB26_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a9, a9 +; XTENSA-ATOMIC-NEXT: .LBB26_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a12, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a13, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a12, .LBB26_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB26_1 +; XTENSA-ATOMIC-NEXT: .LBB26_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw umin ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_xchg_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI27_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_xchg_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB27_2 +; XTENSA-ATOMIC-NEXT: .LBB27_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB27_4 +; XTENSA-ATOMIC-NEXT: .LBB27_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a9, a9 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB27_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB27_1 +; XTENSA-ATOMIC-NEXT: .LBB27_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw xchg ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define float @rmw32_fadd_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fadd_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a10, a2, 0 +; XTENSA-NEXT: l32r a6, .LCPI28_1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI28_2 +; XTENSA-NEXT: .LBB28_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a10, a1, 0 +; XTENSA-NEXT: l32r a11, .LCPI28_0 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: or a12, a10, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: l32i a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB28_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_fadd_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI28_1 +; XTENSA-ATOMIC-NEXT: movi a5, 0 +; XTENSA-ATOMIC-NEXT: movi a4, 1 +; XTENSA-ATOMIC-NEXT: j .LBB28_2 +; XTENSA-ATOMIC-NEXT: .LBB28_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a10, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB28_4 +; XTENSA-ATOMIC-NEXT: .LBB28_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI28_0 +; XTENSA-ATOMIC-NEXT: or a10, a7, a7 +; XTENSA-ATOMIC-NEXT: callx8 a6 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0 +; XTENSA-ATOMIC-NEXT: or a8, a4, a4 +; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB28_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a5, a5 +; XTENSA-ATOMIC-NEXT: j .LBB28_1 +; XTENSA-ATOMIC-NEXT: .LBB28_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a10, a10 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw fadd ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define float @rmw32_fsub_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fsub_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a10, a2, 0 +; XTENSA-NEXT: l32r a6, .LCPI29_1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI29_2 +; XTENSA-NEXT: .LBB29_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a10, a1, 0 +; XTENSA-NEXT: l32r a11, .LCPI29_0 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: or a12, a10, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: l32i a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB29_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_fsub_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI29_1 +; XTENSA-ATOMIC-NEXT: movi a5, 0 +; XTENSA-ATOMIC-NEXT: movi a4, 1 +; XTENSA-ATOMIC-NEXT: j .LBB29_2 +; XTENSA-ATOMIC-NEXT: .LBB29_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a10, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB29_4 +; XTENSA-ATOMIC-NEXT: .LBB29_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI29_0 +; XTENSA-ATOMIC-NEXT: or a10, a7, a7 +; XTENSA-ATOMIC-NEXT: callx8 a6 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0 +; XTENSA-ATOMIC-NEXT: or a8, a4, a4 +; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB29_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a5, a5 +; XTENSA-ATOMIC-NEXT: j .LBB29_1 +; XTENSA-ATOMIC-NEXT: .LBB29_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a10, a10 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw fsub ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define float @rmw32_fmin_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fmin_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a10, a2, 0 +; XTENSA-NEXT: l32r a6, .LCPI30_1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI30_2 +; XTENSA-NEXT: .LBB30_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a10, a1, 0 +; XTENSA-NEXT: l32r a11, .LCPI30_0 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: or a12, a10, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: l32i a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB30_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_fmin_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI30_1 +; XTENSA-ATOMIC-NEXT: movi a5, 0 +; XTENSA-ATOMIC-NEXT: movi a4, 1 +; XTENSA-ATOMIC-NEXT: j .LBB30_2 +; XTENSA-ATOMIC-NEXT: .LBB30_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a10, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB30_4 +; XTENSA-ATOMIC-NEXT: .LBB30_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI30_0 +; XTENSA-ATOMIC-NEXT: or a10, a7, a7 +; XTENSA-ATOMIC-NEXT: callx8 a6 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0 +; XTENSA-ATOMIC-NEXT: or a8, a4, a4 +; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB30_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a5, a5 +; XTENSA-ATOMIC-NEXT: j .LBB30_1 +; XTENSA-ATOMIC-NEXT: .LBB30_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a10, a10 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw fmin ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define float @rmw32_fmax_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fmax_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a10, a2, 0 +; XTENSA-NEXT: l32r a6, .LCPI31_1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI31_2 +; XTENSA-NEXT: .LBB31_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a10, a1, 0 +; XTENSA-NEXT: l32r a11, .LCPI31_0 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: or a12, a10, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: l32i a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB31_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_fmax_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI31_1 +; XTENSA-ATOMIC-NEXT: movi a5, 0 +; XTENSA-ATOMIC-NEXT: movi a4, 1 +; XTENSA-ATOMIC-NEXT: j .LBB31_2 +; XTENSA-ATOMIC-NEXT: .LBB31_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a10, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB31_4 +; XTENSA-ATOMIC-NEXT: .LBB31_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI31_0 +; XTENSA-ATOMIC-NEXT: or a10, a7, a7 +; XTENSA-ATOMIC-NEXT: callx8 a6 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0 +; XTENSA-ATOMIC-NEXT: or a8, a4, a4 +; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB31_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a5, a5 +; XTENSA-ATOMIC-NEXT: j .LBB31_1 +; XTENSA-ATOMIC-NEXT: .LBB31_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a10, a10 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw fmax ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define i32 @cmpxchg32_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a13, 0 +; XTENSA-NEXT: s32i a13, a1, 0 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: movi a12, 1 +; XTENSA-NEXT: l32r a8, .LCPI32_0 +; XTENSA-NEXT: or a14, a13, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: cmpxchg32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 1 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: wsr a9, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = cmpxchg ptr %p, i32 0, i32 1 monotonic monotonic + %res.0 = extractvalue { i32, i1 } %res, 0 + ret i32 %res.0 +} + +define i32 @cmpxchg32_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a8, 0 +; XTENSA-NEXT: s32i a8, a1, 0 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: movi a12, 1 +; XTENSA-NEXT: movi a13, 5 +; XTENSA-NEXT: l32r a8, .LCPI33_0 +; XTENSA-NEXT: or a14, a13, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: cmpxchg32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi a8, 1 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: wsr a9, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = cmpxchg ptr %p, i32 0, i32 1 seq_cst seq_cst + %res.0 = extractvalue { i32, i1 } %res, 0 + ret i32 %res.0 +} |