diff options
Diffstat (limited to 'llvm/test/CodeGen')
200 files changed, 37534 insertions, 9306 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir index 437a9e6..3f14162 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir @@ -10,7 +10,7 @@ define i32 @va_start(ptr %a, ...) { entry: %ap = alloca %struct.__va_list, align 8 - call void @llvm.lifetime.start.p0(i64 32, ptr %ap) + call void @llvm.lifetime.start.p0(ptr %ap) call void @llvm.va_start.p0(ptr %ap) %vr_offs_p = getelementptr inbounds i8, ptr %ap, i64 28 %vr_offs = load i32, ptr %vr_offs_p, align 4 diff --git a/llvm/test/CodeGen/AArch64/aarch64-histcnt-dag-combine-hang.ll b/llvm/test/CodeGen/AArch64/aarch64-histcnt-dag-combine-hang.ll new file mode 100644 index 0000000..da04c67 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/aarch64-histcnt-dag-combine-hang.ll @@ -0,0 +1,70 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mattr=+sve2 -verify-machineinstrs < %s -o - | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; This test is reduced from a real world example that would cause the DAGCombiner to hang. + +define void @histcnt_loop(ptr %0, i64 %1, ptr %2, i64 %3, i64 %4) { +; CHECK-LABEL: histcnt_loop: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov z0.d, #1 // =0x1 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov x8, xzr +; CHECK-NEXT: add x9, x0, x1 +; CHECK-NEXT: .LBB0_1: // %loop +; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, x8, lsl #1] +; CHECK-NEXT: lsl x10, x8, #1 +; CHECK-NEXT: add x11, x0, x10 +; CHECK-NEXT: add x10, x9, x10 +; CHECK-NEXT: lsl z1.d, z1.d, #1 +; CHECK-NEXT: ld1h { z4.d }, p0/z, [x11, #1, mul vl] +; CHECK-NEXT: ld1h { z5.d }, p0/z, [x10, #1, mul vl] +; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d +; CHECK-NEXT: ld1h { z3.d }, p0/z, [x2, z1.d] +; CHECK-NEXT: mad z2.d, p0/m, z0.d, z3.d +; CHECK-NEXT: ld1h { z3.d }, p0/z, [x9, x8, lsl #1] +; CHECK-NEXT: add x8, x8, x3 +; CHECK-NEXT: cmp x4, x8 +; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d] +; CHECK-NEXT: lsl z1.d, z4.d, #1 +; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d +; CHECK-NEXT: ld1h { z4.d }, p0/z, [x2, z1.d] +; CHECK-NEXT: mad z2.d, p0/m, z0.d, z4.d +; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d] +; CHECK-NEXT: lsl z1.d, z3.d, #1 +; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d +; CHECK-NEXT: ld1h { z3.d }, p0/z, [x2, z1.d] +; CHECK-NEXT: mad z2.d, p0/m, z0.d, z3.d +; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d] +; CHECK-NEXT: lsl z1.d, z5.d, #1 +; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d +; CHECK-NEXT: ld1h { z3.d }, p0/z, [x2, z1.d] +; CHECK-NEXT: mad z2.d, p0/m, z0.d, z3.d +; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d] +; CHECK-NEXT: b.ne .LBB0_1 +; CHECK-NEXT: // %bb.2: // %exit +; CHECK-NEXT: ret +entry: + br label %loop + +loop: + %6 = phi i64 [ 0, %entry ], [ %15, %loop ] + %7 = getelementptr inbounds nuw i16, ptr %0, i64 %6 + %8 = getelementptr inbounds nuw i8, ptr %7, i64 %1 + %9 = load <vscale x 4 x i16>, ptr %7, align 2 + %10 = load <vscale x 4 x i16>, ptr %8, align 2 + %11 = zext <vscale x 4 x i16> %9 to <vscale x 4 x i64> + %12 = zext <vscale x 4 x i16> %10 to <vscale x 4 x i64> + %13 = getelementptr inbounds nuw [16 x i16], ptr %2, i64 0, <vscale x 4 x i64> %11 + %14 = getelementptr inbounds nuw [16 x i16], ptr %2, i64 0, <vscale x 4 x i64> %12 + call void @llvm.experimental.vector.histogram.add.nxv4p0.i16(<vscale x 4 x ptr> %13, i16 1, <vscale x 4 x i1> splat (i1 true)) + call void @llvm.experimental.vector.histogram.add.nxv4p0.i16(<vscale x 4 x ptr> %14, i16 1, <vscale x 4 x i1> splat (i1 true)) + %15 = add nuw i64 %6, %3 + %16 = icmp eq i64 %15, %4 + br i1 %16, label %exit, label %loop + +exit: + ret void +} diff --git a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll b/llvm/test/CodeGen/AArch64/aarch64-split-logic-bitmask-immediate.ll index 113eb14..4db9db9 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-split-logic-bitmask-immediate.ll @@ -370,3 +370,175 @@ entry: %r = select i1 %c, i64 %a, i64 %ands ret i64 %r } + +; Test EOR. +define i32 @test1_eor(i32 %a) { +; CHECK-LABEL: test1_eor: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: eor w8, w0, #0x400 +; CHECK-NEXT: eor w0, w8, #0x200000 +; CHECK-NEXT: ret +entry: + %eor = xor i32 %a, 2098176 + ret i32 %eor +} + +; This constant should not be split because it can be handled by one mov. +define i32 @test2_eor(i32 %a) { +; CHECK-LABEL: test2_eor: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #135 // =0x87 +; CHECK-NEXT: eor w0, w0, w8 +; CHECK-NEXT: ret +entry: + %eor = xor i32 %a, 135 + ret i32 %eor +} + +; This constant should not be split because the split immediate is not valid +; bitmask immediate. +define i32 @test3_eor(i32 %a) { +; CHECK-LABEL: test3_eor: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #1024 // =0x400 +; CHECK-NEXT: movk w8, #33, lsl #16 +; CHECK-NEXT: eor w0, w0, w8 +; CHECK-NEXT: ret +entry: + %eor = xor i32 %a, 2163712 + ret i32 %eor +} + +define i64 @test4_eor(i64 %a) { +; CHECK-LABEL: test4_eor: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: eor x8, x0, #0x400 +; CHECK-NEXT: eor x0, x8, #0x200000 +; CHECK-NEXT: ret +entry: + %eor = xor i64 %a, 2098176 + ret i64 %eor +} + +define i64 @test5_eor(i64 %a) { +; CHECK-LABEL: test5_eor: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: eor x8, x0, #0x4000 +; CHECK-NEXT: eor x0, x8, #0x200000000 +; CHECK-NEXT: ret +entry: + %eor = xor i64 %a, 8589950976 + ret i64 %eor +} + +; This constant should not be split because it can be handled by one mov. +define i64 @test6_eor(i64 %a) { +; CHECK-LABEL: test6_eor: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #135 // =0x87 +; CHECK-NEXT: eor x0, x0, x8 +; CHECK-NEXT: ret +entry: + %eor = xor i64 %a, 135 + ret i64 %eor +} + +; This constant should not be split because the split immediate is not valid +; bitmask immediate. +define i64 @test7_eor(i64 %a) { +; CHECK-LABEL: test7_eor: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #1024 // =0x400 +; CHECK-NEXT: movk w8, #33, lsl #16 +; CHECK-NEXT: eor x0, x0, x8 +; CHECK-NEXT: ret +entry: + %eor = xor i64 %a, 2163712 + ret i64 %eor +} + +; Test ORR. +define i32 @test1_orr(i32 %a) { +; CHECK-LABEL: test1_orr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: orr w8, w0, #0x400 +; CHECK-NEXT: orr w0, w8, #0x200000 +; CHECK-NEXT: ret +entry: + %orr = or i32 %a, 2098176 + ret i32 %orr +} + +; This constant should not be split because it can be handled by one mov. +define i32 @test2_orr(i32 %a) { +; CHECK-LABEL: test2_orr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #135 // =0x87 +; CHECK-NEXT: orr w0, w0, w8 +; CHECK-NEXT: ret +entry: + %orr = or i32 %a, 135 + ret i32 %orr +} + +; This constant should not be split because the split immediate is not valid +; bitmask immediate. +define i32 @test3_orr(i32 %a) { +; CHECK-LABEL: test3_orr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #1024 // =0x400 +; CHECK-NEXT: movk w8, #33, lsl #16 +; CHECK-NEXT: orr w0, w0, w8 +; CHECK-NEXT: ret +entry: + %orr = or i32 %a, 2163712 + ret i32 %orr +} + +define i64 @test4_orr(i64 %a) { +; CHECK-LABEL: test4_orr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: orr x8, x0, #0x400 +; CHECK-NEXT: orr x0, x8, #0x200000 +; CHECK-NEXT: ret +entry: + %orr = or i64 %a, 2098176 + ret i64 %orr +} + +define i64 @test5_orr(i64 %a) { +; CHECK-LABEL: test5_orr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: orr x8, x0, #0x4000 +; CHECK-NEXT: orr x0, x8, #0x200000000 +; CHECK-NEXT: ret +entry: + %orr = or i64 %a, 8589950976 + ret i64 %orr +} + +; This constant should not be split because it can be handled by one mov. +define i64 @test6_orr(i64 %a) { +; CHECK-LABEL: test6_orr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #135 // =0x87 +; CHECK-NEXT: orr x0, x0, x8 +; CHECK-NEXT: ret +entry: + %orr = or i64 %a, 135 + ret i64 %orr +} + +; This constant should not be split because the split immediate is not valid +; bitmask immediate. +define i64 @test7_orr(i64 %a) { +; CHECK-LABEL: test7_orr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #1024 // =0x400 +; CHECK-NEXT: movk w8, #33, lsl #16 +; CHECK-NEXT: orr x0, x0, x8 +; CHECK-NEXT: ret +entry: + %orr = or i64 %a, 2163712 + ret i64 %orr +} diff --git a/llvm/test/CodeGen/AArch64/abd-combine.ll b/llvm/test/CodeGen/AArch64/abd-combine.ll index d025789..cdb40ce 100644 --- a/llvm/test/CodeGen/AArch64/abd-combine.ll +++ b/llvm/test/CodeGen/AArch64/abd-combine.ll @@ -17,12 +17,9 @@ define <8 x i16> @abdu_base(<8 x i16> %src1, <8 x i16> %src2) { define <8 x i16> @abdu_const(<8 x i16> %src1) { ; CHECK-LABEL: abdu_const: ; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.4s, #1 -; CHECK-NEXT: ushll v2.4s, v0.4h, #0 -; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0 -; CHECK-NEXT: uabd v0.4s, v0.4s, v1.4s -; CHECK-NEXT: uabd v1.4s, v2.4s, v1.4s -; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-NEXT: movi v1.4h, #1 +; CHECK-NEXT: mov v1.d[1], v1.d[0] +; CHECK-NEXT: uabd v0.8h, v0.8h, v1.8h ; CHECK-NEXT: ret %zextsrc1 = zext <8 x i16> %src1 to <8 x i32> %sub = sub <8 x i32> %zextsrc1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> @@ -34,12 +31,9 @@ define <8 x i16> @abdu_const(<8 x i16> %src1) { define <8 x i16> @abdu_const_lhs(<8 x i16> %src1) { ; CHECK-LABEL: abdu_const_lhs: ; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.4s, #1 -; CHECK-NEXT: ushll v2.4s, v0.4h, #0 -; CHECK-NEXT: ushll2 v0.4s, v0.8h, #0 -; CHECK-NEXT: uabd v0.4s, v0.4s, v1.4s -; CHECK-NEXT: uabd v1.4s, v2.4s, v1.4s -; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-NEXT: movi v1.4h, #1 +; CHECK-NEXT: mov v1.d[1], v1.d[0] +; CHECK-NEXT: uabd v0.8h, v0.8h, v1.8h ; CHECK-NEXT: ret %zextsrc1 = zext <8 x i16> %src1 to <8 x i32> %sub = sub <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %zextsrc1 @@ -318,12 +312,9 @@ define <8 x i16> @abds_base(<8 x i16> %src1, <8 x i16> %src2) { define <8 x i16> @abds_const(<8 x i16> %src1) { ; CHECK-LABEL: abds_const: ; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.4s, #1 -; CHECK-NEXT: sshll v2.4s, v0.4h, #0 -; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0 -; CHECK-NEXT: sabd v0.4s, v0.4s, v1.4s -; CHECK-NEXT: sabd v1.4s, v2.4s, v1.4s -; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-NEXT: movi v1.4h, #1 +; CHECK-NEXT: mov v1.d[1], v1.d[0] +; CHECK-NEXT: sabd v0.8h, v0.8h, v1.8h ; CHECK-NEXT: ret %zextsrc1 = sext <8 x i16> %src1 to <8 x i32> %sub = sub <8 x i32> %zextsrc1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> @@ -335,12 +326,9 @@ define <8 x i16> @abds_const(<8 x i16> %src1) { define <8 x i16> @abds_const_lhs(<8 x i16> %src1) { ; CHECK-LABEL: abds_const_lhs: ; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.4s, #1 -; CHECK-NEXT: sshll v2.4s, v0.4h, #0 -; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0 -; CHECK-NEXT: sabd v0.4s, v0.4s, v1.4s -; CHECK-NEXT: sabd v1.4s, v2.4s, v1.4s -; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-NEXT: movi v1.4h, #1 +; CHECK-NEXT: mov v1.d[1], v1.d[0] +; CHECK-NEXT: sabd v0.8h, v0.8h, v1.8h ; CHECK-NEXT: ret %zextsrc1 = sext <8 x i16> %src1 to <8 x i32> %sub = sub <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %zextsrc1 @@ -352,11 +340,10 @@ define <8 x i16> @abds_const_lhs(<8 x i16> %src1) { define <8 x i16> @abds_const_zero(<8 x i16> %src1) { ; CHECK-LABEL: abds_const_zero: ; CHECK: // %bb.0: -; CHECK-NEXT: sshll v1.4s, v0.4h, #0 -; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0 -; CHECK-NEXT: abs v0.4s, v0.4s -; CHECK-NEXT: abs v1.4s, v1.4s -; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 +; CHECK-NEXT: abs v0.4h, v0.4h +; CHECK-NEXT: abs v1.4h, v1.4h +; CHECK-NEXT: mov v0.d[1], v1.d[0] ; CHECK-NEXT: ret %zextsrc1 = sext <8 x i16> %src1 to <8 x i32> %sub = sub <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, %zextsrc1 diff --git a/llvm/test/CodeGen/AArch64/abds-neg.ll b/llvm/test/CodeGen/AArch64/abds-neg.ll index 7524782..02c76ba 100644 --- a/llvm/test/CodeGen/AArch64/abds-neg.ll +++ b/llvm/test/CodeGen/AArch64/abds-neg.ll @@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -26,8 +25,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -43,8 +41,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -60,8 +57,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i16 %a to i64 @@ -93,8 +89,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i16 %a to i64 diff --git a/llvm/test/CodeGen/AArch64/abds.ll b/llvm/test/CodeGen/AArch64/abds.ll index bbdb116..bf52e71 100644 --- a/llvm/test/CodeGen/AArch64/abds.ll +++ b/llvm/test/CodeGen/AArch64/abds.ll @@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -25,8 +24,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -41,8 +39,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -57,8 +54,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i16 %a to i64 @@ -88,8 +84,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i16 %a to i64 @@ -215,8 +210,7 @@ define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_minmax_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i8 @llvm.smin.i8(i8 %a, i8 %b) @@ -229,8 +223,7 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_minmax_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i16 @llvm.smin.i16(i16 %a, i16 %b) @@ -287,8 +280,7 @@ define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_cmp_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp sgt i8 %a, %b @@ -302,8 +294,7 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_cmp_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp sge i16 %a, %b @@ -508,9 +499,8 @@ define i64 @vector_legalized(i16 %a, i16 %b) { ; CHECK: // %bb.0: ; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: addp d0, v0.2d -; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: cneg w8, w8, mi ; CHECK-NEXT: fmov x9, d0 ; CHECK-NEXT: add x0, x9, x8 @@ -533,8 +523,7 @@ define i8 @abd_select_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_select_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp slt i8 %a, %b @@ -548,8 +537,7 @@ define i16 @abd_select_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_select_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp sle i16 %a, %b diff --git a/llvm/test/CodeGen/AArch64/abdu-neg.ll b/llvm/test/CodeGen/AArch64/abdu-neg.ll index d07f099a..400031b 100644 --- a/llvm/test/CodeGen/AArch64/abdu-neg.ll +++ b/llvm/test/CodeGen/AArch64/abdu-neg.ll @@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -26,8 +25,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -43,8 +41,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -60,8 +57,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i16 %a to i64 @@ -93,8 +89,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i16 %a to i64 diff --git a/llvm/test/CodeGen/AArch64/abdu.ll b/llvm/test/CodeGen/AArch64/abdu.ll index 1045ee2..8d2b0b0 100644 --- a/llvm/test/CodeGen/AArch64/abdu.ll +++ b/llvm/test/CodeGen/AArch64/abdu.ll @@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -25,8 +24,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -41,8 +39,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -57,8 +54,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i16 %a to i64 @@ -88,8 +84,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i16 %a to i64 @@ -219,8 +214,7 @@ define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_minmax_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i8 @llvm.umin.i8(i8 %a, i8 %b) @@ -233,8 +227,7 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_minmax_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i16 @llvm.umin.i16(i16 %a, i16 %b) @@ -293,8 +286,7 @@ define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_cmp_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp ugt i8 %a, %b @@ -308,8 +300,7 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_cmp_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp uge i16 %a, %b @@ -373,10 +364,9 @@ define i64 @vector_legalized(i16 %a, i16 %b) { ; CHECK: // %bb.0: ; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: addp d0, v0.2d +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w8, w8, mi +; CHECK-NEXT: addp d0, v0.2d ; CHECK-NEXT: fmov x9, d0 ; CHECK-NEXT: add x0, x9, x8 ; CHECK-NEXT: ret @@ -398,8 +388,7 @@ define i8 @abd_select_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_select_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp ult i8 %a, %b @@ -413,8 +402,7 @@ define i16 @abd_select_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_select_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp ule i16 %a, %b diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll index 3a808f5..dd018a6 100644 --- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll +++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll @@ -11,7 +11,7 @@ define void @array_1D(ptr %addr) #0 { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr z0, [x0] ; CHECK-NEXT: ldr z1, [x0, #2, mul vl] @@ -34,7 +34,7 @@ define %my_subtype @array_1D_extract(ptr %addr) #0 { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr z0, [x0, #1, mul vl] ; CHECK-NEXT: addvl sp, sp, #3 @@ -52,7 +52,7 @@ define void @array_1D_insert(ptr %addr, %my_subtype %elt) #0 { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr z1, [x0, #2, mul vl] ; CHECK-NEXT: ldr z2, [x0] @@ -75,7 +75,7 @@ define void @array_2D(ptr %addr) #0 { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-6 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 48 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x30, 0x1e, 0x22 // sp + 16 + 48 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr z0, [x0] ; CHECK-NEXT: ldr z1, [x0, #5, mul vl] diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll index e7d8f4f..be73dc9 100644 --- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll +++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll @@ -10,7 +10,7 @@ define void @test(ptr %addr) #0 { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr z0, [x0] ; CHECK-NEXT: ldr z1, [x0, #2, mul vl] diff --git a/llvm/test/CodeGen/AArch64/arm64-ext.ll b/llvm/test/CodeGen/AArch64/arm64-ext.ll index 8bf2b82..c367057 100644 --- a/llvm/test/CodeGen/AArch64/arm64-ext.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ext.ll @@ -139,9 +139,8 @@ define <2 x ptr> @test_v2p0(<2 x ptr> %a, <2 x ptr> %b) { define <16 x i8> @reverse_vector_s8x16b(<16 x i8> noundef %x) { ; CHECK-SD-LABEL: reverse_vector_s8x16b: ; CHECK-SD: // %bb.0: // %entry -; CHECK-SD-NEXT: rev64 v1.16b, v0.16b -; CHECK-SD-NEXT: ext v0.16b, v1.16b, v1.16b, #8 -; CHECK-SD-NEXT: mov v0.d[1], v1.d[0] +; CHECK-SD-NEXT: rev64 v0.16b, v0.16b +; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: reverse_vector_s8x16b: @@ -161,9 +160,8 @@ entry: define <8 x i16> @reverse_vector_s16x8b(<8 x i16> noundef %x) { ; CHECK-SD-LABEL: reverse_vector_s16x8b: ; CHECK-SD: // %bb.0: // %entry -; CHECK-SD-NEXT: rev64 v1.8h, v0.8h -; CHECK-SD-NEXT: ext v0.16b, v1.16b, v1.16b, #8 -; CHECK-SD-NEXT: mov v0.d[1], v1.d[0] +; CHECK-SD-NEXT: rev64 v0.8h, v0.8h +; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: reverse_vector_s16x8b: diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll b/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll index 256ff94..9a1b6a0 100644 --- a/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll +++ b/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll @@ -70,6 +70,23 @@ entry: ret <2 x i64> %add.i } +define void @test_commutable_vaddl_s8(<8 x i8> %a, <8 x i8> %b, ptr %c) { +; CHECK-LABEL: test_commutable_vaddl_s8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: saddl v0.8h, v0.8b, v1.8b +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret +entry: + %vmovl.i.i = sext <8 x i8> %a to <8 x i16> + %vmovl.i2.i = sext <8 x i8> %b to <8 x i16> + %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i + store <8 x i16> %add.i, ptr %c + %add.i2 = add <8 x i16> %vmovl.i2.i, %vmovl.i.i + %c.gep.1 = getelementptr i8, ptr %c, i64 16 + store <8 x i16> %add.i2, ptr %c.gep.1 + ret void +} + define <8 x i16> @test_vaddl_u8(<8 x i8> %a, <8 x i8> %b) { ; CHECK-LABEL: test_vaddl_u8: ; CHECK: // %bb.0: // %entry @@ -106,6 +123,23 @@ entry: ret <2 x i64> %add.i } +define void @test_commutable_vaddl_u8(<8 x i8> %a, <8 x i8> %b, ptr %c) { +; CHECK-LABEL: test_commutable_vaddl_u8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: uaddl v0.8h, v0.8b, v1.8b +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret +entry: + %vmovl.i.i = zext <8 x i8> %a to <8 x i16> + %vmovl.i2.i = zext <8 x i8> %b to <8 x i16> + %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i + store <8 x i16> %add.i, ptr %c + %add.i2 = add <8 x i16> %vmovl.i2.i, %vmovl.i.i + %c.gep.1 = getelementptr i8, ptr %c, i64 16 + store <8 x i16> %add.i2, ptr %c.gep.1 + ret void +} + define <8 x i16> @test_vaddl_a8(<8 x i8> %a, <8 x i8> %b) { ; CHECK-SD-LABEL: test_vaddl_a8: ; CHECK-SD: // %bb.0: // %entry @@ -2892,9 +2926,9 @@ define <8 x i16> @cmplx_mul_combined_re_im(<8 x i16> noundef %a, i64 %scale.coer ; CHECK-GI-LABEL: cmplx_mul_combined_re_im: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: lsr x9, x0, #16 -; CHECK-GI-NEXT: adrp x8, .LCPI196_0 +; CHECK-GI-NEXT: adrp x8, .LCPI198_0 ; CHECK-GI-NEXT: rev32 v4.8h, v0.8h -; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI196_0] +; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI198_0] ; CHECK-GI-NEXT: fmov d1, x9 ; CHECK-GI-NEXT: dup v2.8h, v1.h[0] ; CHECK-GI-NEXT: sqneg v1.8h, v2.8h diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-aba-abd.ll b/llvm/test/CodeGen/AArch64/arm64-neon-aba-abd.ll index 6c7ddd9..ccd1917 100644 --- a/llvm/test/CodeGen/AArch64/arm64-neon-aba-abd.ll +++ b/llvm/test/CodeGen/AArch64/arm64-neon-aba-abd.ll @@ -575,3 +575,69 @@ define <4 x i32> @knownbits_sabd_and_mul_mask(<4 x i32> %a0, <4 x i32> %a1) { %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3> ret <4 x i32> %6 } + +define <4 x i16> @trunc_abdu_foldable(<4 x i16> %a, <4 x i16> %b) { +; CHECK-SD-LABEL: trunc_abdu_foldable: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: uabd v0.4h, v0.4h, v1.4h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: trunc_abdu_foldable: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0 +; CHECK-GI-NEXT: uabd v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: ret + %ext_a = zext <4 x i16> %a to <4 x i32> + %ext_b = zext <4 x i16> %b to <4 x i32> + %abd = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %ext_a, <4 x i32> %ext_b) + %trunc = trunc <4 x i32> %abd to <4 x i16> + ret <4 x i16> %trunc +} + +define <4 x i16> @trunc_abds_foldable(<4 x i16> %a, <4 x i16> %b) { +; CHECK-SD-LABEL: trunc_abds_foldable: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sabd v0.4h, v0.4h, v1.4h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: trunc_abds_foldable: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-GI-NEXT: sshll v1.4s, v1.4h, #0 +; CHECK-GI-NEXT: sabd v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-GI-NEXT: ret + %a32 = sext <4 x i16> %a to <4 x i32> + %b32 = sext <4 x i16> %b to <4 x i32> + %abd32 = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %a32, <4 x i32> %b32) + %res16 = trunc <4 x i32> %abd32 to <4 x i16> + ret <4 x i16> %res16 +} + +define <4 x i16> @trunc_abdu_not_foldable(<4 x i16> %a, <4 x i32> %b) { +; CHECK-LABEL: trunc_abdu_not_foldable: +; CHECK: // %bb.0: +; CHECK-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-NEXT: uabd v0.4s, v0.4s, v1.4s +; CHECK-NEXT: xtn v0.4h, v0.4s +; CHECK-NEXT: ret + %ext_a = zext <4 x i16> %a to <4 x i32> + %abd = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %ext_a, <4 x i32> %b) + %trunc = trunc <4 x i32> %abd to <4 x i16> + ret <4 x i16> %trunc +} + +define <4 x i16> @truncate_abds_testcase1(<4 x i16> %a, <4 x i32> %b) { +; CHECK-LABEL: truncate_abds_testcase1: +; CHECK: // %bb.0: +; CHECK-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-NEXT: sabd v0.4s, v0.4s, v1.4s +; CHECK-NEXT: xtn v0.4h, v0.4s +; CHECK-NEXT: ret + %a32 = sext <4 x i16> %a to <4 x i32> + %abd32 = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %a32, <4 x i32> %b) + %res16 = trunc <4 x i32> %abd32 to <4 x i16> + ret <4 x i16> %res16 +} diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll index ecf3f69..0d427c0 100644 --- a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll +++ b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll @@ -1608,6 +1608,18 @@ define <16 x i8> @poly_mulv16i8(<16 x i8> %lhs, <16 x i8> %rhs) { ret <16 x i8> %prod } +define <16 x i8> @commutable_poly_mul(<16 x i8> %lhs, <16 x i8> %rhs) { +; CHECK-LABEL: commutable_poly_mul: +; CHECK: // %bb.0: +; CHECK-NEXT: pmul v0.16b, v0.16b, v1.16b +; CHECK-NEXT: add v0.16b, v0.16b, v0.16b +; CHECK-NEXT: ret + %1 = call <16 x i8> @llvm.aarch64.neon.pmul.v16i8(<16 x i8> %lhs, <16 x i8> %rhs) + %2 = call <16 x i8> @llvm.aarch64.neon.pmul.v16i8(<16 x i8> %rhs, <16 x i8> %lhs) + %3 = add <16 x i8> %1, %2 + ret <16 x i8> %3 +} + declare <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16>, <4 x i16>) declare <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16>, <8 x i16>) declare <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32>, <2 x i32>) diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll index 78881c8..ede5a7c 100644 --- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll @@ -44,6 +44,35 @@ define <2 x i64> @sabdl2d(ptr %A, ptr %B) nounwind { ret <2 x i64> %tmp4 } +define void @commutable_sabdl(ptr %A, ptr %B, ptr %C) nounwind { +; CHECK-SD-LABEL: commutable_sabdl: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ldr d0, [x0] +; CHECK-SD-NEXT: ldr d1, [x1] +; CHECK-SD-NEXT: sabdl.8h v0, v1, v0 +; CHECK-SD-NEXT: str q0, [x2] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: commutable_sabdl: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ldr d0, [x0] +; CHECK-GI-NEXT: ldr d1, [x1] +; CHECK-GI-NEXT: sabdl.8h v0, v0, v1 +; CHECK-GI-NEXT: str q0, [x2] +; CHECK-GI-NEXT: str q0, [x2] +; CHECK-GI-NEXT: ret + %tmp1 = load <8 x i8>, ptr %A + %tmp2 = load <8 x i8>, ptr %B + %tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) + %tmp4 = zext <8 x i8> %tmp3 to <8 x i16> + store <8 x i16> %tmp4, ptr %C + %tmp5 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp1) + %tmp6 = zext <8 x i8> %tmp5 to <8 x i16> + %tmp7 = getelementptr i8, ptr %C, i64 16 + store <8 x i16> %tmp6, ptr %C + ret void +} + define <8 x i16> @sabdl2_8h(ptr %A, ptr %B) nounwind { ; CHECK-SD-LABEL: sabdl2_8h: ; CHECK-SD: // %bb.0: @@ -155,6 +184,35 @@ define <2 x i64> @uabdl2d(ptr %A, ptr %B) nounwind { ret <2 x i64> %tmp4 } +define void @commutable_uabdl(ptr %A, ptr %B, ptr %C) nounwind { +; CHECK-SD-LABEL: commutable_uabdl: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ldr d0, [x0] +; CHECK-SD-NEXT: ldr d1, [x1] +; CHECK-SD-NEXT: uabdl.8h v0, v1, v0 +; CHECK-SD-NEXT: str q0, [x2] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: commutable_uabdl: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ldr d0, [x0] +; CHECK-GI-NEXT: ldr d1, [x1] +; CHECK-GI-NEXT: uabdl.8h v0, v0, v1 +; CHECK-GI-NEXT: str q0, [x2] +; CHECK-GI-NEXT: str q0, [x2] +; CHECK-GI-NEXT: ret + %tmp1 = load <8 x i8>, ptr %A + %tmp2 = load <8 x i8>, ptr %B + %tmp3 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) + %tmp4 = zext <8 x i8> %tmp3 to <8 x i16> + store <8 x i16> %tmp4, ptr %C + %tmp5 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp1) + %tmp6 = zext <8 x i8> %tmp5 to <8 x i16> + %tmp7 = getelementptr i8, ptr %C, i64 16 + store <8 x i16> %tmp6, ptr %C + ret void +} + define <8 x i16> @uabdl2_8h(ptr %A, ptr %B) nounwind { ; CHECK-SD-LABEL: uabdl2_8h: ; CHECK-SD: // %bb.0: diff --git a/llvm/test/CodeGen/AArch64/arm64-vmul.ll b/llvm/test/CodeGen/AArch64/arm64-vmul.ll index 07400bb..d12f7ce 100644 --- a/llvm/test/CodeGen/AArch64/arm64-vmul.ll +++ b/llvm/test/CodeGen/AArch64/arm64-vmul.ll @@ -3,6 +3,7 @@ ; RUN: llc -mtriple=aarch64-none-elf -mattr=+aes -global-isel -global-isel-abort=2 2>&1 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI ; CHECK-GI: warning: Instruction selection used fallback path for pmull8h +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for commutable_pmull8h ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmulh_1s ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_2s ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_4s @@ -78,6 +79,20 @@ define <2 x i64> @smull2d(ptr %A, ptr %B) nounwind { ret <2 x i64> %tmp3 } +define void @commutable_smull(<2 x i32> %A, <2 x i32> %B, ptr %C) { +; CHECK-LABEL: commutable_smull: +; CHECK: // %bb.0: +; CHECK-NEXT: smull v0.2d, v0.2s, v1.2s +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret + %1 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %A, <2 x i32> %B) + %2 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %B, <2 x i32> %A) + store <2 x i64> %1, ptr %C + %3 = getelementptr i8, ptr %C, i64 16 + store <2 x i64> %2, ptr %3 + ret void +} + declare <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone @@ -121,6 +136,20 @@ define <2 x i64> @umull2d(ptr %A, ptr %B) nounwind { ret <2 x i64> %tmp3 } +define void @commutable_umull(<2 x i32> %A, <2 x i32> %B, ptr %C) { +; CHECK-LABEL: commutable_umull: +; CHECK: // %bb.0: +; CHECK-NEXT: umull v0.2d, v0.2s, v1.2s +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret + %1 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %A, <2 x i32> %B) + %2 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %B, <2 x i32> %A) + store <2 x i64> %1, ptr %C + %3 = getelementptr i8, ptr %C, i64 16 + store <2 x i64> %2, ptr %3 + ret void +} + declare <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone declare <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone @@ -212,6 +241,20 @@ define <8 x i16> @pmull8h(ptr %A, ptr %B) nounwind { ret <8 x i16> %tmp3 } +define void @commutable_pmull8h(<8 x i8> %A, <8 x i8> %B, ptr %C) { +; CHECK-LABEL: commutable_pmull8h: +; CHECK: // %bb.0: +; CHECK-NEXT: pmull v0.8h, v0.8b, v1.8b +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: ret + %1 = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %A, <8 x i8> %B) + %2 = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %B, <8 x i8> %A) + store <8 x i16> %1, ptr %C + %3 = getelementptr i8, ptr %C, i8 16 + store <8 x i16> %2, ptr %3 + ret void +} + declare <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone define <4 x i16> @sqdmulh_4h(ptr %A, ptr %B) nounwind { @@ -487,10 +530,10 @@ define void @smlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, ; CHECK-GI-LABEL: smlal2d_chain_with_constant: ; CHECK-GI: // %bb.0: ; CHECK-GI-NEXT: mvn v3.8b, v2.8b -; CHECK-GI-NEXT: adrp x8, .LCPI27_0 +; CHECK-GI-NEXT: adrp x8, .LCPI30_0 ; CHECK-GI-NEXT: smull v1.2d, v1.2s, v3.2s ; CHECK-GI-NEXT: smlal v1.2d, v0.2s, v2.2s -; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI27_0] +; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI30_0] ; CHECK-GI-NEXT: add v0.2d, v1.2d, v0.2d ; CHECK-GI-NEXT: str q0, [x0] ; CHECK-GI-NEXT: ret @@ -566,8 +609,8 @@ define void @smlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, ; ; CHECK-GI-LABEL: smlsl2d_chain_with_constant: ; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: adrp x8, .LCPI31_0 -; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI31_0] +; CHECK-GI-NEXT: adrp x8, .LCPI34_0 +; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI34_0] ; CHECK-GI-NEXT: smlsl v3.2d, v0.2s, v2.2s ; CHECK-GI-NEXT: mvn v0.8b, v2.8b ; CHECK-GI-NEXT: smlsl v3.2d, v1.2s, v0.2s @@ -829,10 +872,10 @@ define void @umlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, ; CHECK-GI-LABEL: umlal2d_chain_with_constant: ; CHECK-GI: // %bb.0: ; CHECK-GI-NEXT: mvn v3.8b, v2.8b -; CHECK-GI-NEXT: adrp x8, .LCPI43_0 +; CHECK-GI-NEXT: adrp x8, .LCPI46_0 ; CHECK-GI-NEXT: umull v1.2d, v1.2s, v3.2s ; CHECK-GI-NEXT: umlal v1.2d, v0.2s, v2.2s -; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI43_0] +; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI46_0] ; CHECK-GI-NEXT: add v0.2d, v1.2d, v0.2d ; CHECK-GI-NEXT: str q0, [x0] ; CHECK-GI-NEXT: ret @@ -908,8 +951,8 @@ define void @umlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, ; ; CHECK-GI-LABEL: umlsl2d_chain_with_constant: ; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: adrp x8, .LCPI47_0 -; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI47_0] +; CHECK-GI-NEXT: adrp x8, .LCPI50_0 +; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI50_0] ; CHECK-GI-NEXT: umlsl v3.2d, v0.2s, v2.2s ; CHECK-GI-NEXT: mvn v0.8b, v2.8b ; CHECK-GI-NEXT: umlsl v3.2d, v1.2s, v0.2s @@ -3222,6 +3265,20 @@ define <16 x i8> @test_pmull_high_64(<2 x i64> %l, <2 x i64> %r) nounwind { ret <16 x i8> %val } +define <16 x i8> @test_commutable_pmull_64(i64 %l, i64 %r) nounwind { +; CHECK-LABEL: test_commutable_pmull_64: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov d0, x1 +; CHECK-NEXT: fmov d1, x0 +; CHECK-NEXT: pmull v0.1q, v1.1d, v0.1d +; CHECK-NEXT: add v0.16b, v0.16b, v0.16b +; CHECK-NEXT: ret + %1 = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %l, i64 %r) + %2 = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %r, i64 %l) + %3 = add <16 x i8> %1, %2 + ret <16 x i8> %3 +} + declare <16 x i8> @llvm.aarch64.neon.pmull64(i64, i64) define <1 x i64> @test_mul_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) nounwind { diff --git a/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir b/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir index 23ac67c..805d244 100644 --- a/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir +++ b/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir @@ -96,3 +96,23 @@ body: | $q25 = ORRv16i8 $q3, killed $q3 RET_ReallyLR implicit $q22 ... +--- +name: DoubleOp +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $q2 + + ; CHECK-LABEL: name: DoubleOp + ; CHECK: liveins: $q2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $q0 = MOVIv8i16 1, 0 + ; CHECK-NEXT: renamable $q1 = ORRv16i8 renamable $q2, renamable $q2 + ; CHECK-NEXT: renamable $q1 = BSLv16i8 killed renamable $q1, renamable $q2, renamable $q0 + ; CHECK-NEXT: renamable $q0 = SQADDv8i16 killed renamable $q1, killed renamable $q0 + ; CHECK-NEXT: RET undef $lr, implicit $q0 + renamable $q0 = MOVIv8i16 1, 0 + renamable $q1 = BSPv16i8 killed renamable $q2, renamable $q2, renamable $q0 + renamable $q0 = SQADDv8i16 killed renamable $q1, killed renamable $q0 + RET_ReallyLR implicit $q0 +... diff --git a/llvm/test/CodeGen/AArch64/csel-subs-dag-combine.ll b/llvm/test/CodeGen/AArch64/csel-subs-dag-combine.ll new file mode 100644 index 0000000..5036be9 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/csel-subs-dag-combine.ll @@ -0,0 +1,112 @@ +; RUN: llc -debug-only=isel -o /dev/null < %s 2>&1 | FileCheck %s + +; REQUIRES: asserts + +; These tests ensure that we don't combine +; CSEL a, b, cc, SUBS(SUB(x,y), 0) -> CSEL a, b, cc, SUBS(x,y) +; if the flags set by SUBS(SUB(x,y), 0) have more than one use. +; +; This restriction exists because combining SUBS(SUB(x,y), 0) -> SUBS(x,y) is +; only valid if there are no users of the overflow flags (C/V) generated by the +; SUBS. Currently, we only check the flags used by the CSEL, and therefore we +; conservatively reject cases where the SUBS's flags have other uses. + +target triple = "aarch64-unknown-linux-gnu" + +; CHECK-LABEL: Legalized selection DAG: %bb.0 'combine_subs:' +; CHECK-NEXT: SelectionDAG has 13 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t5: i32 = sub t2, t4 +; CHECK-NEXT: t14: i32,i32 = AArch64ISD::SUBS t5, Constant:i32<0> +; CHECK-NEXT: t16: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t14:1 +; CHECK-NEXT: t11: ch,glue = CopyToReg t0, Register:i32 $w0, t16 +; CHECK-NEXT: t12: ch = AArch64ISD::RET_GLUE t11, Register:i32 $w0, t11:1 + +; CHECK-LABEL: Optimized legalized selection DAG: %bb.0 'combine_subs:' +; CHECK-NEXT: SelectionDAG has 11 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t18: i32,i32 = AArch64ISD::SUBS t2, t4 +; CHECK-NEXT: t16: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t18:1 +; CHECK-NEXT: t11: ch,glue = CopyToReg t0, Register:i32 $w0, t16 +; CHECK-NEXT: t12: ch = AArch64ISD::RET_GLUE t11, Register:i32 $w0, t11:1 + +define i32 @combine_subs(i32 %a, i32 %b) { + %sub = sub i32 %a, %b + %cc = icmp ne i32 %sub, 0 + %sel = select i1 %cc, i32 %a, i32 %b + ret i32 %sel +} + +; CHECK-LABEL: Legalized selection DAG: %bb.0 'combine_subs_multiple_sub_uses:' +; CHECK-NEXT: SelectionDAG has 14 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t5: i32 = sub t2, t4 +; CHECK-NEXT: t15: i32,i32 = AArch64ISD::SUBS t5, Constant:i32<0> +; CHECK-NEXT: t17: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t15:1 +; CHECK-NEXT: t10: i32 = add t17, t5 +; CHECK-NEXT: t12: ch,glue = CopyToReg t0, Register:i32 $w0, t10 +; CHECK-NEXT: t13: ch = AArch64ISD::RET_GLUE t12, Register:i32 $w0, t12:1 + +; CHECK-LABEL: Optimized legalized selection DAG: %bb.0 'combine_subs_multiple_sub_uses:' +; CHECK-NEXT: SelectionDAG has 12 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t17: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t19:1 +; CHECK-NEXT: t10: i32 = add t17, t19 +; CHECK-NEXT: t12: ch,glue = CopyToReg t0, Register:i32 $w0, t10 +; CHECK-NEXT: t19: i32,i32 = AArch64ISD::SUBS t2, t4 +; CHECK-NEXT: t13: ch = AArch64ISD::RET_GLUE t12, Register:i32 $w0, t12:1 + +define i32 @combine_subs_multiple_sub_uses(i32 %a, i32 %b) { + %sub = sub i32 %a, %b + %cc = icmp ne i32 %sub, 0 + %sel = select i1 %cc, i32 %a, i32 %b + %add = add i32 %sel, %sub + ret i32 %add +} + +; CHECK-LABEL: Legalized selection DAG: %bb.0 'do_not_combine_subs_multiple_flag_uses:' +; CHECK-NEXT: SelectionDAG has 19 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t24: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t21:1 +; CHECK-NEXT: t6: i32,ch = CopyFromReg t0, Register:i32 %2 +; CHECK-NEXT: t8: i32,ch = CopyFromReg t0, Register:i32 %3 +; CHECK-NEXT: t23: i32 = AArch64ISD::CSEL t6, t8, Constant:i32<1>, t21:1 +; CHECK-NEXT: t15: i32 = add t24, t23 +; CHECK-NEXT: t17: ch,glue = CopyToReg t0, Register:i32 $w0, t15 +; CHECK-NEXT: t9: i32 = sub t2, t4 +; CHECK-NEXT: t21: i32,i32 = AArch64ISD::SUBS t9, Constant:i32<0> +; CHECK-NEXT: t18: ch = AArch64ISD::RET_GLUE t17, Register:i32 $w0, t17:1 + +; CHECK-LABEL: Optimized legalized selection DAG: %bb.0 'do_not_combine_subs_multiple_flag_uses:' +; CHECK-NEXT: SelectionDAG has 19 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t24: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t21:1 +; CHECK-NEXT: t6: i32,ch = CopyFromReg t0, Register:i32 %2 +; CHECK-NEXT: t8: i32,ch = CopyFromReg t0, Register:i32 %3 +; CHECK-NEXT: t23: i32 = AArch64ISD::CSEL t6, t8, Constant:i32<1>, t21:1 +; CHECK-NEXT: t15: i32 = add t24, t23 +; CHECK-NEXT: t17: ch,glue = CopyToReg t0, Register:i32 $w0, t15 +; CHECK-NEXT: t9: i32 = sub t2, t4 +; CHECK-NEXT: t21: i32,i32 = AArch64ISD::SUBS t9, Constant:i32<0> +; CHECK-NEXT: t18: ch = AArch64ISD::RET_GLUE t17, Register:i32 $w0, t17:1 + +define i32 @do_not_combine_subs_multiple_flag_uses(i32 %a, i32 %b, i32 %c, i32 %d) { + %sub = sub i32 %a, %b + %cc = icmp ne i32 %sub, 0 + %sel = select i1 %cc, i32 %a, i32 %b + %other = select i1 %cc, i32 %c, i32 %d + %add = add i32 %sel, %other + ret i32 %add +} diff --git a/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll b/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll index d1e0729..6a91d85 100644 --- a/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll +++ b/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll @@ -11,10 +11,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @cvtn_f16_tuple(i64 %stride, p ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z2.h, z10.h }, pn8/z, [x1] @@ -52,10 +52,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @cvtnt_f32_tuple(i64 %stride, ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: mov z1.d, z0.d diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir index aed3145..e970d83 100644 --- a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir +++ b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir @@ -9,16 +9,16 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 - ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG + ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill - ; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG + ; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK-NEXT: addvl sp, sp, #-1 - ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG + ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: // implicit-def: $z8 ; CHECK-NEXT: // implicit-def: $p4 ; CHECK-NEXT: addvl sp, sp, #1 - ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG + ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #2 diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir index 17b1ad2..03a6aab 100644 --- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir +++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir @@ -64,7 +64,7 @@ # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 32 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 32 @@ -79,7 +79,8 @@ # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 # ASM: .cfi_def_cfa_offset 32 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 16 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 16 * VG # ASM: .cfi_def_cfa wsp, 32 # ASM: .cfi_def_cfa_offset 16 # ASM: .cfi_def_cfa_offset 0 @@ -88,8 +89,8 @@ # # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_offset: +32 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_offset: +32 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_lit16, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +32 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO: DW_CFA_def_cfa_offset: +0 @@ -129,7 +130,7 @@ body: | # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 48 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # # CHECK-NEXT: $x20 = IMPLICIT_DEF @@ -152,7 +153,8 @@ body: | # ASM-NEXT: .cfi_offset w21, -16 # ASM-NEXT: .cfi_offset w29, -32 # ASM: .cfi_def_cfa_offset 48 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 16 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 48 + 16 * VG # # ASM: .cfi_def_cfa wsp, 48 # ASM: .cfi_def_cfa_offset 32 @@ -166,9 +168,8 @@ body: | # UNWINDINFO: DW_CFA_offset: reg20 -8 # UNWINDINFO-NEXT: DW_CFA_offset: reg21 -16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -32 -# UNWINDINFO: DW_CFA_def_cfa_offset: +48 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# +# UNWINDINFO: DW_CFA_def_cfa_offset: +48 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +48, DW_OP_bregx 0x2e +0, DW_OP_lit16, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +48 # UNWINDINFO: DW_CFA_def_cfa_offset: +32 # UNWINDINFO: DW_CFA_def_cfa_offset: +0 @@ -272,7 +273,7 @@ body: | # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 32 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 16 # CHECK-NEXT: STR_ZXI $z0, killed $[[TMP]], 2 @@ -295,7 +296,8 @@ body: | # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 # ASM: .cfi_def_cfa_offset 32 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 24 * VG # # ASM: .cfi_def_cfa wsp, 32 # ASM: .cfi_def_cfa_offset 16 @@ -305,7 +307,7 @@ body: | # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 # UNWINDINFO: DW_CFA_def_cfa_offset: +32 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus # # UNWINDINFO: DW_CFA_def_cfa: reg31 +32 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 @@ -434,7 +436,7 @@ body: | # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 32 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $[[TMP:x[0-9]+]] = ADDVL_XXI $sp, 1 # CHECK-NEXT: $x0 = LDRXui killed $[[TMP]], 4 @@ -451,7 +453,8 @@ body: | # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 # ASM: .cfi_def_cfa_offset 32 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 8 * VG # # ASM: .cfi_def_cfa wsp, 32 # ASM: .cfi_def_cfa_offset 16 @@ -461,7 +464,7 @@ body: | # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 # UNWINDINFO: DW_CFA_def_cfa_offset: +32 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus # # UNWINDINFO: DW_CFA_def_cfa: reg31 +32 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 @@ -504,23 +507,23 @@ body: | # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 # CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $[[TMP2:x[0-9]+]] = ADDVL_XXI $sp, 1 # CHECK-NEXT: STR_ZXI $z0, killed $[[TMP2]], 255 @@ -529,21 +532,21 @@ body: | # CHECK-NEXT: STR_PXI $p0, killed $[[TMP2]], 255 # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 9 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 # CHECK-NEXT: $sp, $[[SCRATCH]] = frame-destroy LDRXpost $sp, 16 @@ -554,48 +557,65 @@ body: | # ASM-LABEL: test_address_sve_out_of_range: # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 256 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 512 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 768 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1024 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1280 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1536 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1792 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2048 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2056 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 256 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 512 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 768 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1024 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1280 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1536 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1792 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 2048 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 2056 * VG # -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1808 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1560 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1312 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1064 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 816 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 568 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 320 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1808 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1560 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1312 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1064 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 816 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 568 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 320 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 72 * VG # ASM: .cfi_def_cfa wsp, 16 # ASM: .cfi_def_cfa_offset 0 # ASM-NEXT: .cfi_restore w29 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +256, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +512, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +768, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1024, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1280, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1536, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1792, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +2048, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +2056, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +256, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +512, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +768, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1024, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1280, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1536, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1792, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +2048, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +2056, DW_OP_mul, DW_OP_plus # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1808, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1560, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1312, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1064, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +816, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +568, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +320, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +72, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1808, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1560, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1312, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1064, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +816, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +568, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +320, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +72, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +16 # UNWINDINFO: DW_CFA_def_cfa_offset: +0 # UNWINDINFO-NEXT: DW_CFA_restore: reg29 @@ -702,15 +722,15 @@ body: | # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 # CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 # CHECK: $sp = frame-setup ADDVL_XXI $sp, -1 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: frame-setup STR_PXI killed $p6, $sp, 5 # CHECK: frame-setup STR_PXI killed $p5, $sp, 6 # CHECK: frame-setup STR_PXI killed $p4, $sp, 7 # CHECK: $sp = frame-setup SUBXri $sp, 32, 0 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK: $p6 = frame-destroy LDR_PXI $sp, 5 # CHECK: $p5 = frame-destroy LDR_PXI $sp, 6 # CHECK: $p4 = frame-destroy LDR_PXI $sp, 7 @@ -725,20 +745,23 @@ body: | # ASM-LABEL: save_restore_pregs_sve: # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 8 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 8 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 48 + 8 * VG # -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 8 * VG # ASM: .cfi_def_cfa wsp, 16 # ASM: .cfi_def_cfa_offset 0 # ASM-NEXT: .cfi_restore w29 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +48, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +16 # UNWINDINFO: DW_CFA_def_cfa_offset: +0 # UNWINDINFO-NEXT: DW_CFA_restore: reg29 @@ -761,18 +784,18 @@ body: | # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 # CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: frame-setup STR_ZXI killed $z10, $sp, 0 # CHECK-NEXT: frame-setup STR_ZXI killed $z9, $sp, 1 # CHECK-NEXT: frame-setup STR_ZXI killed $z8, $sp, 2 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 32, 0 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0 # CHECK-NEXT: $z9 = frame-destroy LDR_ZXI $sp, 1 # CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2 @@ -789,13 +812,19 @@ body: | # ASM-LABEL: save_restore_zregs_sve: # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG -# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // $d8 @ cfa - 8 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d9 @ cfa - 16 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d10 @ cfa - 24 * VG - 16 +# ASM: .cfi_escape +# ASM-SAME: // sp + 48 + 24 * VG # -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 24 * VG # ASM: .cfi_def_cfa wsp, 16 # ASM-NEXT: .cfi_restore z8 # ASM-NEXT: .cfi_restore z9 @@ -805,13 +834,13 @@ body: | # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +48, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +16 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105 @@ -848,7 +877,7 @@ body: | # CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -32 # CHECK: $sp = frame-setup ADDVL_XXI $sp, -18 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: frame-setup STR_PXI killed $p15, $sp, 4 # CHECK: frame-setup STR_PXI killed $p14, $sp, 5 # CHECK: frame-setup STR_PXI killed $p5, $sp, 14 @@ -857,23 +886,23 @@ body: | # CHECK: frame-setup STR_ZXI killed $z22, $sp, 3 # CHECK: frame-setup STR_ZXI killed $z9, $sp, 16 # CHECK: frame-setup STR_ZXI killed $z8, $sp, 17 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-setup SUBXri $sp, 32, 0 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-setup ADDVL_XXI $sp, -1 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 1 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK: $z23 = frame-destroy LDR_ZXI $sp, 2 # CHECK: $z22 = frame-destroy LDR_ZXI $sp, 3 # CHECK: $z9 = frame-destroy LDR_ZXI $sp, 16 @@ -909,20 +938,33 @@ body: | # ASM-NEXT: .cfi_offset w20, -16 # ASM-NEXT: .cfi_offset w21, -24 # ASM-NEXT: .cfi_offset w29, -32 -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG -# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 32 - 32 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 32 - 40 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG -# ASM: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 144 * VG -# ASM: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 152 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 144 * VG +# ASM: .cfi_escape +# ASM-SAME: // $d8 @ cfa - 8 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d9 @ cfa - 16 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d10 @ cfa - 24 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d11 @ cfa - 32 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d12 @ cfa - 40 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d13 @ cfa - 48 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d14 @ cfa - 56 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d15 @ cfa - 64 * VG - 32 +# ASM: .cfi_escape +# ASM-SAME: // sp + 64 + 144 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 64 + 152 * VG # -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 152 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 152 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 144 * VG # ASM: .cfi_def_cfa wsp, 32 # ASM-NEXT: .cfi_restore z8 # ASM-NEXT: .cfi_restore z9 @@ -943,20 +985,20 @@ body: | # UNWINDINFO-NEXT: DW_CFA_offset: reg20 -16 # UNWINDINFO-NEXT: DW_CFA_offset: reg21 -24 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -32 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +144, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -32, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -40, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -48, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -56, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -64, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +64, DW_OP_plus, DW_OP_consts +144, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +64, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_consts +144, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_bregx 0x2e +0, DW_OP_consts -32, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_bregx 0x2e +0, DW_OP_consts -40, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_bregx 0x2e +0, DW_OP_consts -48, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_bregx 0x2e +0, DW_OP_consts -56, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_bregx 0x2e +0, DW_OP_consts -64, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +64, DW_OP_bregx 0x2e +0, DW_OP_consts +144, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +64, DW_OP_bregx 0x2e +0, DW_OP_consts +152, DW_OP_mul, DW_OP_plus # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +144, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_consts +152, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_consts +144, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +32 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105 @@ -1025,14 +1067,14 @@ body: | # CHECK-NEXT: STR_ZXI killed $z22, $sp, 3 # CHECK: STR_ZXI killed $z9, $sp, 16 # CHECK-NEXT: STR_ZXI killed $z8, $sp, 17 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $[[TMP:x[0-9]+]] = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: $[[TMP]] = frame-setup ADDVL_XXI $[[TMP]], -1 # CHECK-NEXT: $sp = frame-setup ANDXri killed $[[TMP]] @@ -1067,14 +1109,22 @@ body: | # ASM: .cfi_def_cfa w29, 16 # ASM-NEXT: .cfi_offset w30, -8 # ASM-NEXT: .cfi_offset w29, -16 -# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +# ASM: .cfi_escape +# ASM-SAME: // $d8 @ cfa - 8 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d9 @ cfa - 16 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d10 @ cfa - 24 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d11 @ cfa - 32 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d12 @ cfa - 40 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d13 @ cfa - 48 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d14 @ cfa - 56 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d15 @ cfa - 64 * VG - 16 # # ASM: .cfi_restore z8 # ASM-NEXT: .cfi_restore z9 @@ -1093,14 +1143,14 @@ body: | # UNWINDINFO: DW_CFA_def_cfa: reg29 +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -32, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -40, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -48, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -56, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -64, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_bregx 0x2e +0, DW_OP_consts -32, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_bregx 0x2e +0, DW_OP_consts -40, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_bregx 0x2e +0, DW_OP_consts -48, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_bregx 0x2e +0, DW_OP_consts -56, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_bregx 0x2e +0, DW_OP_consts -64, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus # # UNWINDINFO: DW_CFA_restore_extended: reg104 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105 @@ -1188,17 +1238,17 @@ body: | # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 # CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: STR_PXI killed $p15, $sp, 6 # CHECK-NEXT: STR_PXI killed $p4, $sp, 7 # CHECK-NEXT: STR_ZXI killed $z23, $sp, 1 # CHECK-NEXT: STR_ZXI killed $z8, $sp, 2 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -7 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 7 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 1 # CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2 # CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 6 @@ -1214,11 +1264,15 @@ body: | # ASM-LABEL: frame_layout: # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG -# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 80 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // $d8 @ cfa - 8 * VG - 16 +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 80 * VG # -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 24 * VG # ASM: .cfi_def_cfa wsp, 16 # ASM-NEXT: .cfi_restore z8 # ASM: .cfi_def_cfa_offset 0 @@ -1226,11 +1280,11 @@ body: | # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +80, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +80, DW_OP_mul, DW_OP_plus # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +16 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104 # UNWINDINFO: DW_CFA_def_cfa_offset: +0 diff --git a/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll b/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll index 2cf8621..474a9d1 100644 --- a/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll +++ b/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll @@ -36,7 +36,7 @@ define <vscale x 16 x i1> @match_nxv16i8_v4i8(<vscale x 16 x i8> %op1, <4 x i8> ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 ; CHECK-NEXT: umov w8, v1.h[1] @@ -241,7 +241,7 @@ define <vscale x 16 x i1> @match_nxv16i8_v32i8(<vscale x 16 x i8> %op1, <32 x i8 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 ; CHECK-NEXT: mov z3.b, z1.b[1] @@ -463,7 +463,7 @@ define <vscale x 4 x i1> @match_nxv4xi32_v4i32(<vscale x 4 x i32> %op1, <4 x i32 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 ; CHECK-NEXT: mov z2.s, z1.s[1] diff --git a/llvm/test/CodeGen/AArch64/lifetime-poison.ll b/llvm/test/CodeGen/AArch64/lifetime-poison.ll index e04530d..dfb76d1 100644 --- a/llvm/test/CodeGen/AArch64/lifetime-poison.ll +++ b/llvm/test/CodeGen/AArch64/lifetime-poison.ll @@ -8,7 +8,7 @@ define void @test() { ; CHECK-LABEL: test: ; CHECK: // %bb.0: ; CHECK-NEXT: ret - call void @llvm.lifetime.start.p0(i64 4, ptr poison) - call void @llvm.lifetime.end.p0(i64 4, ptr poison) + call void @llvm.lifetime.start.p0(ptr poison) + call void @llvm.lifetime.end.p0(ptr poison) ret void } diff --git a/llvm/test/CodeGen/AArch64/luti-with-sme2.ll b/llvm/test/CodeGen/AArch64/luti-with-sme2.ll index 2d30167..59e1cba 100644 --- a/llvm/test/CodeGen/AArch64/luti-with-sme2.ll +++ b/llvm/test/CodeGen/AArch64/luti-with-sme2.ll @@ -9,10 +9,10 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16> } @test_luti4_lane_i16_x2_tuple( ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] @@ -50,10 +50,10 @@ define { <vscale x 8 x half>, <vscale x 8 x half> } @test_luti4_lane_f16_x2_tupl ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] @@ -91,10 +91,10 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @test_luti4_lane_bf16_x2 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] diff --git a/llvm/test/CodeGen/AArch64/midpoint-int.ll b/llvm/test/CodeGen/AArch64/midpoint-int.ll index 15c1dff..79bba53 100644 --- a/llvm/test/CodeGen/AArch64/midpoint-int.ll +++ b/llvm/test/CodeGen/AArch64/midpoint-int.ll @@ -255,12 +255,11 @@ define i64 @scalar_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { define i16 @scalar_i16_signed_reg_reg(i16 %a1, i16 %a2) nounwind { ; CHECK-LABEL: scalar_i16_signed_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sxth w9, w1 -; CHECK-NEXT: sxth w10, w0 +; CHECK-NEXT: sxth w9, w0 ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w9, w9, w1, sxth ; CHECK-NEXT: cneg w8, w8, le +; CHECK-NEXT: cneg w9, w9, mi ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: madd w0, w9, w8, w0 ; CHECK-NEXT: ret @@ -278,12 +277,11 @@ define i16 @scalar_i16_signed_reg_reg(i16 %a1, i16 %a2) nounwind { define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind { ; CHECK-LABEL: scalar_i16_unsigned_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: and w9, w1, #0xffff -; CHECK-NEXT: and w10, w0, #0xffff +; CHECK-NEXT: and w9, w0, #0xffff ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w9, w9, w1, uxth ; CHECK-NEXT: cneg w8, w8, ls +; CHECK-NEXT: cneg w9, w9, mi ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: madd w0, w9, w8, w0 ; CHECK-NEXT: ret @@ -303,14 +301,13 @@ define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind { define i16 @scalar_i16_signed_mem_reg(ptr %a1_addr, i16 %a2) nounwind { ; CHECK-LABEL: scalar_i16_signed_mem_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sxth w9, w1 -; CHECK-NEXT: ldrsh w10, [x0] +; CHECK-NEXT: ldrsh w9, [x0] ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w10, w9, w1, sxth ; CHECK-NEXT: cneg w8, w8, le -; CHECK-NEXT: lsr w9, w9, #1 -; CHECK-NEXT: madd w0, w9, w8, w10 +; CHECK-NEXT: cneg w10, w10, mi +; CHECK-NEXT: lsr w10, w10, #1 +; CHECK-NEXT: madd w0, w10, w8, w9 ; CHECK-NEXT: ret %a1 = load i16, ptr %a1_addr %t3 = icmp sgt i16 %a1, %a2 ; signed @@ -382,12 +379,11 @@ define i16 @scalar_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { define i8 @scalar_i8_signed_reg_reg(i8 %a1, i8 %a2) nounwind { ; CHECK-LABEL: scalar_i8_signed_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sxtb w9, w1 -; CHECK-NEXT: sxtb w10, w0 +; CHECK-NEXT: sxtb w9, w0 ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w9, w9, w1, sxtb ; CHECK-NEXT: cneg w8, w8, le +; CHECK-NEXT: cneg w9, w9, mi ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: madd w0, w9, w8, w0 ; CHECK-NEXT: ret @@ -405,12 +401,11 @@ define i8 @scalar_i8_signed_reg_reg(i8 %a1, i8 %a2) nounwind { define i8 @scalar_i8_unsigned_reg_reg(i8 %a1, i8 %a2) nounwind { ; CHECK-LABEL: scalar_i8_unsigned_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: and w9, w1, #0xff -; CHECK-NEXT: and w10, w0, #0xff +; CHECK-NEXT: and w9, w0, #0xff ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w9, w9, w1, uxtb ; CHECK-NEXT: cneg w8, w8, ls +; CHECK-NEXT: cneg w9, w9, mi ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: madd w0, w9, w8, w0 ; CHECK-NEXT: ret @@ -430,14 +425,13 @@ define i8 @scalar_i8_unsigned_reg_reg(i8 %a1, i8 %a2) nounwind { define i8 @scalar_i8_signed_mem_reg(ptr %a1_addr, i8 %a2) nounwind { ; CHECK-LABEL: scalar_i8_signed_mem_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sxtb w9, w1 -; CHECK-NEXT: ldrsb w10, [x0] +; CHECK-NEXT: ldrsb w9, [x0] ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w10, w9, w1, sxtb ; CHECK-NEXT: cneg w8, w8, le -; CHECK-NEXT: lsr w9, w9, #1 -; CHECK-NEXT: madd w0, w9, w8, w10 +; CHECK-NEXT: cneg w10, w10, mi +; CHECK-NEXT: lsr w10, w10, #1 +; CHECK-NEXT: madd w0, w10, w8, w9 ; CHECK-NEXT: ret %a1 = load i8, ptr %a1_addr %t3 = icmp sgt i8 %a1, %a2 ; signed diff --git a/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll b/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll index 7b55c69..1ceb25b 100644 --- a/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll +++ b/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll @@ -13,10 +13,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @tbl2_b_tuple(i64 %stride, ptr ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: ld1b { z3.b, z11.b }, pn8/z, [x1] ; CHECK-NEXT: ld1b { z4.b, z12.b }, pn8/z, [x1, x0] @@ -53,10 +53,10 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16> } @tbl2_h_tuple(i64 %stride, ptr ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] @@ -94,10 +94,10 @@ define { <vscale x 4 x i32>, <vscale x 4 x i32> } @tbl2_s_tuple(i64 %stride, ptr ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1w { z3.s, z11.s }, pn8/z, [x1] @@ -135,10 +135,10 @@ define { <vscale x 2 x i64>, <vscale x 2 x i64> } @tbl2_d_tuple(i64 %stride, ptr ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1d { z3.d, z11.d }, pn8/z, [x1] @@ -176,10 +176,10 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @tbl2_bf16_tuple(i64 %st ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] @@ -217,10 +217,10 @@ define { <vscale x 4 x float>, <vscale x 4 x float> } @tbl2_f32_tuple(i64 %strid ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1w { z3.s, z11.s }, pn8/z, [x1] @@ -258,10 +258,10 @@ define { <vscale x 2 x double>, <vscale x 2 x double> } @tbl2_f64_tuple(i64 %str ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1d { z3.d, z11.d }, pn8/z, [x1] diff --git a/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll b/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll index 0853325..6fcfc5b 100644 --- a/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll +++ b/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll @@ -328,7 +328,7 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 { ; CHECK-NEXT: .cfi_offset w30, -24 ; CHECK-NEXT: .cfi_offset w29, -32 ; CHECK-NEXT: addvl sp, sp, #-18 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 32 + 144 * VG ; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill @@ -351,16 +351,16 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 { ; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 32 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 32 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d8 @ cfa - 8 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d9 @ cfa - 16 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d10 @ cfa - 24 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d11 @ cfa - 32 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d12 @ cfa - 40 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d13 @ cfa - 48 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d14 @ cfa - 56 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d15 @ cfa - 64 * VG - 32 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 152 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x11, 0x98, 0x01, 0x1e, 0x22 // sp + 32 + 152 * VG ; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP @@ -371,7 +371,7 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 { ; CHECK-NEXT: smstart sm ; CHECK-NEXT: .cfi_restore vg ; CHECK-NEXT: addvl sp, sp, #1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 32 + 144 * VG ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload @@ -448,14 +448,14 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 { ; FP-CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill ; FP-CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; FP-CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48 ; FP-CHECK-NEXT: addvl sp, sp, #-1 ; FP-CHECK-NEXT: str z0, [x29, #-19, mul vl] // 16-byte Folded Spill ; FP-CHECK-NEXT: //APP diff --git a/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll b/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll index b0390ec..8398e07 100644 --- a/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll +++ b/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll @@ -36,7 +36,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1 ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: lsl x8, x0, #1 ; CHECK-NEXT: add x9, x1, x0 @@ -129,10 +129,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @bfcvt_tuple(i64 %stride, ptr ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z2.h, z10.h }, pn8/z, [x1] diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll index b4a83c1..58d2e25 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll @@ -58,7 +58,7 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: lsl x8, x0, #1 ; CHECK-NEXT: add x9, x1, x0 diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll index 0bc9e15..3bb516d 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll @@ -24,10 +24,10 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16> } @multi_vector_sat_shift_narrow ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1w { z2.s, z10.s }, pn8/z, [x1] @@ -98,7 +98,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1 ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: lsl x8, x0, #1 ; CHECK-NEXT: add x9, x1, x0 diff --git a/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir b/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir index 1d04cc6..c3338b1 100644 --- a/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir +++ b/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir @@ -17,7 +17,7 @@ body: | ; CHECK-NEXT: stp d9, d8, [sp, #16] ; CHECK-NEXT: str x29, [sp, #32] ; CHECK-NEXT: addvl sp, sp, #-2 - ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 16 * VG + ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 48 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: .cfi_offset b8, -24 ; CHECK-NEXT: .cfi_offset b9, -32 @@ -97,7 +97,7 @@ body: | ; CHECK: str x29, [sp, #-16]! ; CHECK-NEXT: addvl sp, sp, #-2 - ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG + ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: lsl x9, x1, #1 ; CHECK-NEXT: ptrue pn8.b diff --git a/llvm/test/CodeGen/AArch64/split-vector-insert.ll b/llvm/test/CodeGen/AArch64/split-vector-insert.ll index 555e38a..109059e 100644 --- a/llvm/test/CodeGen/AArch64/split-vector-insert.ll +++ b/llvm/test/CodeGen/AArch64/split-vector-insert.ll @@ -16,7 +16,7 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> % ; CHECK-LEGALIZATION-NEXT: .cfi_def_cfa_offset 16 ; CHECK-LEGALIZATION-NEXT: .cfi_offset w29, -16 ; CHECK-LEGALIZATION-NEXT: addvl sp, sp, #-3 -; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-LEGALIZATION-NEXT: cntd x8 ; CHECK-LEGALIZATION-NEXT: ptrue p0.d, vl2 ; CHECK-LEGALIZATION-NEXT: mov w9, #2 // =0x2 @@ -59,7 +59,7 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> % ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: cntd x8 ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: mov w9, #2 // =0x2 @@ -111,7 +111,7 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x ; CHECK-LEGALIZATION-NEXT: .cfi_def_cfa_offset 16 ; CHECK-LEGALIZATION-NEXT: .cfi_offset w29, -16 ; CHECK-LEGALIZATION-NEXT: addvl sp, sp, #-3 -; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-LEGALIZATION-NEXT: cntd x8 ; CHECK-LEGALIZATION-NEXT: ptrue p0.d, vl2 ; CHECK-LEGALIZATION-NEXT: mov w9, #2 // =0x2 @@ -154,7 +154,7 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: cntd x8 ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: mov w9, #2 // =0x2 diff --git a/llvm/test/CodeGen/AArch64/stack-hazard.ll b/llvm/test/CodeGen/AArch64/stack-hazard.ll index 3a33405..4615b1a 100644 --- a/llvm/test/CodeGen/AArch64/stack-hazard.ll +++ b/llvm/test/CodeGen/AArch64/stack-hazard.ll @@ -388,7 +388,7 @@ define i32 @csr_d8_allocnxv4i32(i64 %d) "aarch64_pstate_sm_compatible" { ; CHECK0-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill ; CHECK0-NEXT: str x29, [sp, #8] // 8-byte Folded Spill ; CHECK0-NEXT: addvl sp, sp, #-1 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK0-NEXT: .cfi_offset w29, -8 ; CHECK0-NEXT: .cfi_offset b8, -16 ; CHECK0-NEXT: mov z0.s, #0 // =0x0 @@ -407,7 +407,7 @@ define i32 @csr_d8_allocnxv4i32(i64 %d) "aarch64_pstate_sm_compatible" { ; CHECK64-NEXT: str x29, [sp, #72] // 8-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #64 ; CHECK64-NEXT: addvl sp, sp, #-1 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 8 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 144 + 8 * VG ; CHECK64-NEXT: .cfi_offset w29, -8 ; CHECK64-NEXT: .cfi_offset b8, -80 ; CHECK64-NEXT: mov z0.s, #0 // =0x0 @@ -429,7 +429,7 @@ define i32 @csr_d8_allocnxv4i32(i64 %d) "aarch64_pstate_sm_compatible" { ; CHECK1024-NEXT: str x29, [sp, #1032] // 8-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1024 ; CHECK1024-NEXT: addvl sp, sp, #-1 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2064 + 8 * VG ; CHECK1024-NEXT: .cfi_offset w29, -8 ; CHECK1024-NEXT: .cfi_offset b8, -1040 ; CHECK1024-NEXT: mov z0.s, #0 // =0x0 @@ -955,9 +955,9 @@ define i32 @svecc_csr_d8(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK0-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK0-NEXT: addvl sp, sp, #-1 ; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK0-NEXT: .cfi_offset w29, -16 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP ; CHECK0-NEXT: mov w0, wzr @@ -973,9 +973,9 @@ define i32 @svecc_csr_d8(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK64-NEXT: addvl sp, sp, #-1 ; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #64 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 8 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 144 + 8 * VG ; CHECK64-NEXT: .cfi_offset w29, -16 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80 ; CHECK64-NEXT: mov w0, wzr ; CHECK64-NEXT: //APP ; CHECK64-NEXT: //NO_APP @@ -993,9 +993,9 @@ define i32 @svecc_csr_d8(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK1024-NEXT: addvl sp, sp, #-1 ; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1024 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2064 + 8 * VG ; CHECK1024-NEXT: .cfi_offset w29, -16 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040 ; CHECK1024-NEXT: mov w0, wzr ; CHECK1024-NEXT: //APP ; CHECK1024-NEXT: //NO_APP @@ -1017,10 +1017,10 @@ define i32 @svecc_csr_d8d9(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_ps ; CHECK0-NEXT: addvl sp, sp, #-2 ; CHECK0-NEXT: str z9, [sp] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK0-NEXT: .cfi_offset w29, -16 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP ; CHECK0-NEXT: mov w0, wzr @@ -1038,10 +1038,10 @@ define i32 @svecc_csr_d8d9(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_ps ; CHECK64-NEXT: str z9, [sp] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #64 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 16 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 144 + 16 * VG ; CHECK64-NEXT: .cfi_offset w29, -16 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 80 - 16 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 80 ; CHECK64-NEXT: mov w0, wzr ; CHECK64-NEXT: //APP ; CHECK64-NEXT: //NO_APP @@ -1061,10 +1061,10 @@ define i32 @svecc_csr_d8d9(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_ps ; CHECK1024-NEXT: str z9, [sp] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1024 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 16 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2064 + 16 * VG ; CHECK1024-NEXT: .cfi_offset w29, -16 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1040 - 16 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1040 ; CHECK1024-NEXT: mov w0, wzr ; CHECK1024-NEXT: //APP ; CHECK1024-NEXT: //NO_APP @@ -1086,9 +1086,9 @@ define i32 @svecc_csr_d8_allocd(double %d, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK0-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK0-NEXT: addvl sp, sp, #-1 ; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK0-NEXT: .cfi_offset w29, -16 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP ; CHECK0-NEXT: addvl x8, sp, #1 @@ -1106,9 +1106,9 @@ define i32 @svecc_csr_d8_allocd(double %d, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK64-NEXT: addvl sp, sp, #-1 ; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #80 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 160 + 8 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 160 + 8 * VG ; CHECK64-NEXT: .cfi_offset w29, -16 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80 ; CHECK64-NEXT: mov w0, wzr ; CHECK64-NEXT: //APP ; CHECK64-NEXT: //NO_APP @@ -1127,9 +1127,9 @@ define i32 @svecc_csr_d8_allocd(double %d, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK1024-NEXT: addvl sp, sp, #-1 ; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1040 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2080 + 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG ; CHECK1024-NEXT: .cfi_offset w29, -16 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040 ; CHECK1024-NEXT: mov w0, wzr ; CHECK1024-NEXT: //APP ; CHECK1024-NEXT: //NO_APP @@ -1153,9 +1153,9 @@ define i32 @svecc_csr_d8_alloci64(i64 %d, <vscale x 4 x i32> %vs) "aarch64_pstat ; CHECK0-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK0-NEXT: addvl sp, sp, #-1 ; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK0-NEXT: .cfi_offset w29, -16 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP ; CHECK0-NEXT: mov x8, x0 @@ -1174,9 +1174,9 @@ define i32 @svecc_csr_d8_alloci64(i64 %d, <vscale x 4 x i32> %vs) "aarch64_pstat ; CHECK64-NEXT: addvl sp, sp, #-1 ; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #80 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 160 + 8 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 160 + 8 * VG ; CHECK64-NEXT: .cfi_offset w29, -16 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80 ; CHECK64-NEXT: mov x8, x0 ; CHECK64-NEXT: mov w0, wzr ; CHECK64-NEXT: //APP @@ -1196,9 +1196,9 @@ define i32 @svecc_csr_d8_alloci64(i64 %d, <vscale x 4 x i32> %vs) "aarch64_pstat ; CHECK1024-NEXT: addvl sp, sp, #-1 ; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1040 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2080 + 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG ; CHECK1024-NEXT: .cfi_offset w29, -16 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040 ; CHECK1024-NEXT: mov x8, x0 ; CHECK1024-NEXT: mov w0, wzr ; CHECK1024-NEXT: //APP @@ -1224,9 +1224,9 @@ define i32 @svecc_csr_d8_allocnxv4i32(i64 %d, <vscale x 4 x i32> %vs) "aarch64_p ; CHECK0-NEXT: addvl sp, sp, #-1 ; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK0-NEXT: addvl sp, sp, #-1 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK0-NEXT: .cfi_offset w29, -16 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK0-NEXT: mov z0.s, #0 // =0x0 ; CHECK0-NEXT: mov w0, wzr ; CHECK0-NEXT: //APP @@ -1246,9 +1246,9 @@ define i32 @svecc_csr_d8_allocnxv4i32(i64 %d, <vscale x 4 x i32> %vs) "aarch64_p ; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #64 ; CHECK64-NEXT: addvl sp, sp, #-1 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 16 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 144 + 16 * VG ; CHECK64-NEXT: .cfi_offset w29, -16 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80 ; CHECK64-NEXT: mov z0.s, #0 // =0x0 ; CHECK64-NEXT: add x8, sp, #64 ; CHECK64-NEXT: mov w0, wzr @@ -1271,9 +1271,9 @@ define i32 @svecc_csr_d8_allocnxv4i32(i64 %d, <vscale x 4 x i32> %vs) "aarch64_p ; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1024 ; CHECK1024-NEXT: addvl sp, sp, #-1 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 16 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2064 + 16 * VG ; CHECK1024-NEXT: .cfi_offset w29, -16 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040 ; CHECK1024-NEXT: mov z0.s, #0 // =0x0 ; CHECK1024-NEXT: add x8, sp, #1024 ; CHECK1024-NEXT: mov w0, wzr @@ -1311,7 +1311,7 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK0-NEXT: str z9, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: sub sp, sp, #16 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 80 + 64 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 80 + 64 * VG ; CHECK0-NEXT: .cfi_offset w19, -8 ; CHECK0-NEXT: .cfi_offset w20, -16 ; CHECK0-NEXT: .cfi_offset w21, -24 @@ -1320,14 +1320,14 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK0-NEXT: .cfi_offset w24, -48 ; CHECK0-NEXT: .cfi_offset w25, -56 ; CHECK0-NEXT: .cfi_offset w29, -64 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64 ; CHECK0-NEXT: mov x8, x0 ; CHECK0-NEXT: mov w0, wzr ; CHECK0-NEXT: //APP @@ -1368,7 +1368,7 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK64-NEXT: str z9, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #96 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x01, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 224 + 64 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x01, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 224 + 64 * VG ; CHECK64-NEXT: .cfi_offset w19, -8 ; CHECK64-NEXT: .cfi_offset w20, -16 ; CHECK64-NEXT: .cfi_offset w21, -24 @@ -1377,14 +1377,14 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK64-NEXT: .cfi_offset w24, -48 ; CHECK64-NEXT: .cfi_offset w25, -56 ; CHECK64-NEXT: .cfi_offset w29, -64 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128 ; CHECK64-NEXT: mov x8, x0 ; CHECK64-NEXT: mov w0, wzr ; CHECK64-NEXT: //APP @@ -1431,7 +1431,7 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK1024-NEXT: str z9, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1056 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2144 + 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 2144 + 64 * VG ; CHECK1024-NEXT: .cfi_offset w19, -8 ; CHECK1024-NEXT: .cfi_offset w20, -16 ; CHECK1024-NEXT: .cfi_offset w21, -24 @@ -1440,14 +1440,14 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK1024-NEXT: .cfi_offset w24, -48 ; CHECK1024-NEXT: .cfi_offset w25, -56 ; CHECK1024-NEXT: .cfi_offset w29, -64 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088 ; CHECK1024-NEXT: mov x8, x0 ; CHECK1024-NEXT: mov w0, wzr ; CHECK1024-NEXT: //APP @@ -1869,7 +1869,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK0-NEXT: .cfi_offset w30, -40 ; CHECK0-NEXT: .cfi_offset w29, -48 ; CHECK0-NEXT: addvl sp, sp, #-18 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG ; CHECK0-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK0-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK0-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -1898,14 +1898,14 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48 ; CHECK0-NEXT: mov x8, x0 ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP @@ -1990,7 +1990,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: .cfi_offset w30, -40 ; CHECK64-NEXT: .cfi_offset w29, -48 ; CHECK64-NEXT: addvl sp, sp, #-18 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG ; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2019,16 +2019,16 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 112 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 112 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 112 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 112 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 112 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 112 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 112 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 112 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 112 ; CHECK64-NEXT: sub sp, sp, #64 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x01, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 176 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x01, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 176 + 144 * VG ; CHECK64-NEXT: mov x8, x0 ; CHECK64-NEXT: //APP ; CHECK64-NEXT: //NO_APP @@ -2051,7 +2051,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: movk w0, #59491, lsl #16 ; CHECK64-NEXT: .cfi_restore vg ; CHECK64-NEXT: add sp, sp, #64 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG ; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -2119,7 +2119,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK1024-NEXT: .cfi_offset w30, -40 ; CHECK1024-NEXT: .cfi_offset w29, -48 ; CHECK1024-NEXT: addvl sp, sp, #-18 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG ; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2148,16 +2148,16 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1072 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1072 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1072 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1072 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1072 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1072 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1072 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1072 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1072 ; CHECK1024-NEXT: sub sp, sp, #1024 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2096 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 2096 + 144 * VG ; CHECK1024-NEXT: mov x8, x0 ; CHECK1024-NEXT: //APP ; CHECK1024-NEXT: //NO_APP @@ -2180,7 +2180,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK1024-NEXT: movk w0, #59491, lsl #16 ; CHECK1024-NEXT: .cfi_restore vg ; CHECK1024-NEXT: add sp, sp, #1024 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG ; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -2252,7 +2252,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK0-NEXT: .cfi_offset w30, -40 ; CHECK0-NEXT: .cfi_offset w29, -48 ; CHECK0-NEXT: addvl sp, sp, #-18 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG ; CHECK0-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK0-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK0-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2281,16 +2281,16 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48 ; CHECK0-NEXT: sub sp, sp, #48 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 96 + 144 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 96 + 144 * VG ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP ; CHECK0-NEXT: bl __arm_sme_state @@ -2312,7 +2312,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK0-NEXT: movk w0, #59491, lsl #16 ; CHECK0-NEXT: .cfi_restore vg ; CHECK0-NEXT: add sp, sp, #48 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG ; CHECK0-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK0-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK0-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -2376,7 +2376,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: .cfi_offset w30, -40 ; CHECK64-NEXT: .cfi_offset w29, -48 ; CHECK64-NEXT: addvl sp, sp, #-18 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG ; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2405,16 +2405,16 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 112 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 112 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 112 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 112 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 112 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 112 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 112 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 112 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 112 ; CHECK64-NEXT: sub sp, sp, #112 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x01, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 224 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x01, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 224 + 144 * VG ; CHECK64-NEXT: //APP ; CHECK64-NEXT: //NO_APP ; CHECK64-NEXT: bl __arm_sme_state @@ -2436,7 +2436,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: movk w0, #59491, lsl #16 ; CHECK64-NEXT: .cfi_restore vg ; CHECK64-NEXT: add sp, sp, #112 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG ; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -2504,7 +2504,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK1024-NEXT: .cfi_offset w30, -40 ; CHECK1024-NEXT: .cfi_offset w29, -48 ; CHECK1024-NEXT: addvl sp, sp, #-18 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG ; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2533,16 +2533,16 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1072 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1072 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1072 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1072 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1072 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1072 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1072 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1072 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1072 ; CHECK1024-NEXT: sub sp, sp, #1072 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2144 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 2144 + 144 * VG ; CHECK1024-NEXT: //APP ; CHECK1024-NEXT: //NO_APP ; CHECK1024-NEXT: bl __arm_sme_state @@ -2564,7 +2564,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK1024-NEXT: movk w0, #59491, lsl #16 ; CHECK1024-NEXT: .cfi_restore vg ; CHECK1024-NEXT: add sp, sp, #1072 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG ; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -3192,14 +3192,14 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x ; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64 ; CHECK0-NEXT: mov w9, w0 ; CHECK0-NEXT: mov x8, sp ; CHECK0-NEXT: mov w2, w1 @@ -3327,14 +3327,14 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x ; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128 ; CHECK64-NEXT: sub sp, sp, #64 ; CHECK64-NEXT: mov w9, w0 ; CHECK64-NEXT: mov x8, sp @@ -3469,14 +3469,14 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x ; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088 ; CHECK1024-NEXT: sub sp, sp, #1024 ; CHECK1024-NEXT: mov w9, w0 ; CHECK1024-NEXT: mov x8, sp @@ -3616,14 +3616,14 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i ; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64 ; CHECK0-NEXT: sub x9, sp, #1024 ; CHECK0-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK0-NEXT: mov w2, w1 @@ -3743,14 +3743,14 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i ; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128 ; CHECK64-NEXT: sub x9, sp, #1088 ; CHECK64-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK64-NEXT: mov w2, w1 @@ -3875,14 +3875,14 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i ; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088 ; CHECK1024-NEXT: sub x9, sp, #2048 ; CHECK1024-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK1024-NEXT: mov w2, w1 @@ -4016,14 +4016,14 @@ define i32 @svecc_call_dynamic_and_scalable_alloca(<4 x i16> %P0, i32 %P1, i32 % ; CHECK0-NEXT: .cfi_offset w28, -48 ; CHECK0-NEXT: .cfi_offset w30, -56 ; CHECK0-NEXT: .cfi_offset w29, -64 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64 ; CHECK0-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK0-NEXT: ubfiz x8, x0, #2, #32 ; CHECK0-NEXT: mov x9, sp @@ -4125,14 +4125,14 @@ define i32 @svecc_call_dynamic_and_scalable_alloca(<4 x i16> %P0, i32 %P1, i32 % ; CHECK64-NEXT: .cfi_offset w28, -48 ; CHECK64-NEXT: .cfi_offset w30, -56 ; CHECK64-NEXT: .cfi_offset w29, -64 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128 ; CHECK64-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK64-NEXT: ubfiz x8, x0, #2, #32 ; CHECK64-NEXT: mov x9, sp @@ -4240,14 +4240,14 @@ define i32 @svecc_call_dynamic_and_scalable_alloca(<4 x i16> %P0, i32 %P1, i32 % ; CHECK1024-NEXT: .cfi_offset w28, -48 ; CHECK1024-NEXT: .cfi_offset w30, -56 ; CHECK1024-NEXT: .cfi_offset w29, -64 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088 ; CHECK1024-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK1024-NEXT: ubfiz x8, x0, #2, #32 ; CHECK1024-NEXT: mov x9, sp diff --git a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll index 56d865e..59b95be 100644 --- a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll +++ b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll @@ -18,7 +18,7 @@ define void @sve_1_vector(ptr %out) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -38,7 +38,7 @@ define void @sve_4_vector(ptr %out) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: addvl sp, sp, #4 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -63,7 +63,7 @@ define void @sve_16_vector(ptr %out) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-16 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 128 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x01, 0x1e, 0x22 // sp + 16 + 128 * VG ; CHECK-NEXT: str xzr, [sp] ; CHECK-NEXT: addvl sp, sp, #16 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 @@ -103,7 +103,7 @@ define void @sve_17_vector(ptr %out) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl x9, sp, #-17 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 136 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x01, 0x1e, 0x22 // $x9 + 16 + 136 * VG ; CHECK-NEXT: .LBB3_1: // %entry ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096 @@ -155,9 +155,9 @@ define void @sve_1v_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload @@ -180,15 +180,15 @@ define void @sve_4v_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: str z11, [sp] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: ldr z11, [sp] // 16-byte Folded Reload @@ -217,7 +217,7 @@ define void @sve_16v_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-16 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 128 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x01, 0x1e, 0x22 // sp + 16 + 128 * VG ; CHECK-NEXT: str xzr, [sp] ; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill ; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill @@ -235,14 +235,14 @@ define void @sve_16v_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload @@ -287,7 +287,7 @@ define void @sve_1p_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP @@ -310,7 +310,7 @@ define void @sve_4p_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str p11, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p10, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p9, [sp, #6, mul vl] // 2-byte Folded Spill @@ -339,7 +339,7 @@ define void @sve_16v_1p_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl x9, sp, #-17 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 136 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x01, 0x1e, 0x22 // $x9 + 16 + 136 * VG ; CHECK-NEXT: .LBB9_1: // %entry ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096 @@ -370,14 +370,14 @@ define void @sve_16v_1p_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload @@ -426,7 +426,7 @@ define void @sve_1_vector_16_arr(ptr %out) #0 { ; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: .cfi_def_cfa wsp, 32 ; CHECK-NEXT: add sp, sp, #16 @@ -453,9 +453,9 @@ define void @sve_1_vector_4096_arr(ptr %out) #0 { ; CHECK-NEXT: sub x9, sp, #3, lsl #12 // =12288 ; CHECK-NEXT: .cfi_def_cfa w9, 12304 ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0f, 0x79, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 12304 + 256 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x79, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x02, 0x1e, 0x22 // $x9 + 12304 + 256 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0f, 0x79, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 12304 + 512 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x79, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x04, 0x1e, 0x22 // $x9 + 12304 + 512 * VG ; CHECK-NEXT: .LBB11_1: // %entry ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096 @@ -470,9 +470,9 @@ define void @sve_1_vector_4096_arr(ptr %out) #0 { ; CHECK-NEXT: ldr xzr, [sp] ; CHECK-NEXT: .cfi_def_cfa_register wsp ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0f, 0x8f, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x88, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 12304 + 264 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x02, 0x1e, 0x22 // sp + 12304 + 264 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 12304 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 12304 + 16 * VG ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: .cfi_def_cfa wsp, 12304 ; CHECK-NEXT: add sp, sp, #3, lsl #12 // =12288 @@ -538,38 +538,38 @@ define void @sve_1024_64k_guard(ptr %out) #0 "stack-probe-size"="65536" { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 256 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x02, 0x1e, 0x22 // sp + 16 + 256 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 512 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x04, 0x1e, 0x22 // sp + 16 + 512 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 768 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x06, 0x1e, 0x22 // sp + 16 + 768 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1024 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x08, 0x1e, 0x22 // sp + 16 + 1024 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1280 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0a, 0x1e, 0x22 // sp + 16 + 1280 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1536 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0c, 0x1e, 0x22 // sp + 16 + 1536 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1792 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0e, 0x1e, 0x22 // sp + 16 + 1792 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2048 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x10, 0x1e, 0x22 // sp + 16 + 2048 * VG ; CHECK-NEXT: str xzr, [sp] ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1800 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x0e, 0x1e, 0x22 // sp + 16 + 1800 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1552 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x0c, 0x1e, 0x22 // sp + 16 + 1552 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1304 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x98, 0x0a, 0x1e, 0x22 // sp + 16 + 1304 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1056 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x08, 0x1e, 0x22 // sp + 16 + 1056 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 808 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x06, 0x1e, 0x22 // sp + 16 + 808 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 560 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x04, 0x1e, 0x22 // sp + 16 + 560 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 312 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x02, 0x1e, 0x22 // sp + 16 + 312 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG ; CHECK-NEXT: addvl sp, sp, #8 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -588,23 +588,23 @@ define void @sve_1028_64k_guard(ptr %out) #0 "stack-probe-size"="65536" { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl x9, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 256 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x02, 0x1e, 0x22 // $x9 + 16 + 256 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 512 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x04, 0x1e, 0x22 // $x9 + 16 + 512 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 768 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x06, 0x1e, 0x22 // $x9 + 16 + 768 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1024 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x08, 0x1e, 0x22 // $x9 + 16 + 1024 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1280 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0a, 0x1e, 0x22 // $x9 + 16 + 1280 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1536 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0c, 0x1e, 0x22 // $x9 + 16 + 1536 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1792 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0e, 0x1e, 0x22 // $x9 + 16 + 1792 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 2048 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x10, 0x1e, 0x22 // $x9 + 16 + 2048 * VG ; CHECK-NEXT: addvl x9, x9, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 2056 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x10, 0x1e, 0x22 // $x9 + 16 + 2056 * VG ; CHECK-NEXT: .LBB14_1: // %entry ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: sub sp, sp, #16, lsl #12 // =65536 @@ -619,21 +619,21 @@ define void @sve_1028_64k_guard(ptr %out) #0 "stack-probe-size"="65536" { ; CHECK-NEXT: ldr xzr, [sp] ; CHECK-NEXT: .cfi_def_cfa_register wsp ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1808 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x0e, 0x1e, 0x22 // sp + 16 + 1808 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1560 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x98, 0x0c, 0x1e, 0x22 // sp + 16 + 1560 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1312 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x0a, 0x1e, 0x22 // sp + 16 + 1312 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1064 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x08, 0x1e, 0x22 // sp + 16 + 1064 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 816 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x06, 0x1e, 0x22 // sp + 16 + 816 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 568 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x04, 0x1e, 0x22 // sp + 16 + 568 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 320 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x02, 0x1e, 0x22 // sp + 16 + 320 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG ; CHECK-NEXT: addvl sp, sp, #9 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -656,7 +656,7 @@ define void @sve_5_vector(ptr %out) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-5 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 40 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x28, 0x1e, 0x22 // sp + 16 + 40 * VG ; CHECK-NEXT: str xzr, [sp] ; CHECK-NEXT: addvl sp, sp, #5 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 @@ -682,21 +682,21 @@ define void @sve_unprobed_area(<vscale x 4 x float> %a, i32 %n) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: str xzr, [sp] ; CHECK-NEXT: str p9, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: addvl sp, sp, #4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll b/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll index 0711f69..df83762 100644 --- a/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll +++ b/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll @@ -5,8 +5,8 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "aarch64--linux-android" declare void @use(ptr) -declare void @llvm.lifetime.start.p0(i64, ptr nocapture) -declare void @llvm.lifetime.end.p0(i64, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) define void @OneVarNoInit() sanitize_memtag { @@ -16,18 +16,18 @@ define void @OneVarNoInit() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[TX]], i64 16) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -39,19 +39,19 @@ define void @OneVarInitConst() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 42, i64 0) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) store i32 42, ptr %x, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -64,21 +64,21 @@ define void @ArrayInitConst() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 16, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 42, i64 0) ; CHECK-NEXT: [[TX8_16:%.*]] = getelementptr i8, ptr [[TX]], i32 16 ; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX8_16]], i64 48) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 64) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, i32 16, align 4 - call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) store i32 42, ptr %x, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -90,7 +90,7 @@ define void @ArrayInitConst2() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 16, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 1 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TX]], i32 2 ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 184683593770, i64 -1) @@ -98,19 +98,19 @@ define void @ArrayInitConst2() sanitize_memtag { ; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX8_16]], i64 48) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 64) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, i32 16, align 4 - call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) store i32 42, ptr %x, align 4 %0 = getelementptr i32, ptr %x, i32 1 store i32 43, ptr %0, align 4 %1 = getelementptr i32, ptr %x, i32 2 store i64 -1, ptr %1, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -122,23 +122,23 @@ define void @ArrayInitConstSplit() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 16, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 1 ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 -4294967296, i64 4294967295) ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TX]], i32 16 ; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TMP1]], i64 48) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 64) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, i32 16, align 4 - call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) %0 = getelementptr i32, ptr %x, i32 1 store i64 -1, ptr %0, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -150,7 +150,7 @@ define void @ArrayInitConstWithHoles() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 32, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 128, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 5 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TX]], i32 14 ; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX]], i64 16) @@ -164,18 +164,18 @@ define void @ArrayInitConstWithHoles() sanitize_memtag { ; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX8_64]], i64 64) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 128) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 128, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, i32 32, align 4 - call void @llvm.lifetime.start.p0(i64 128, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) %0 = getelementptr i32, ptr %x, i32 5 store i32 42, ptr %0, align 4 %1 = getelementptr i32, ptr %x, i32 14 store i32 43, ptr %1, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 128, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -187,20 +187,20 @@ define void @InitNonConst(i32 %v) sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16 ; CHECK-NEXT: [[X_TAG:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[V]] to i64 ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[X_TAG]], i64 [[TMP0]], i64 0) ; CHECK-NEXT: call void @use(ptr nonnull [[X_TAG]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) store i32 %v, ptr %x, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -212,7 +212,7 @@ define void @InitNonConst2(i32 %v, i32 %w) sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 4, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[V]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TX]], i32 1 ; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[W]] to i64 @@ -221,17 +221,17 @@ define void @InitNonConst2(i32 %v, i32 %w) sanitize_memtag { ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 [[VW]], i64 0) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, i32 4, align 4 - call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) store i32 %v, ptr %x, align 4 %0 = getelementptr i32, ptr %x, i32 1 store i32 %w, ptr %0, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -243,19 +243,19 @@ define void @InitVector() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 4, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 bitcast (<2 x i32> <i32 1, i32 2> to i64), i64 0) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, i32 4, align 4 - call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) store <2 x i32> <i32 1, i32 2>, ptr %x, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } @@ -302,23 +302,23 @@ define void @InitVectorSplit() sanitize_memtag { ; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0) ; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 4, align 16 ; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 1 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 bitcast (<2 x i32> <i32 1, i32 2> to i64), 32 ; CHECK-NEXT: [[LSHR:%.*]] = lshr i64 bitcast (<2 x i32> <i32 1, i32 2> to i64), 32 ; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 [[TMP1]], i64 [[LSHR]]) ; CHECK-NEXT: call void @use(ptr nonnull [[TX]]) ; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]]) ; CHECK-NEXT: ret void ; entry: %x = alloca i32, i32 4, align 4 - call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %x) + call void @llvm.lifetime.start.p0(ptr nonnull %x) %0 = getelementptr i32, ptr %x, i32 1 store <2 x i32> <i32 1, i32 2>, ptr %0, align 4 call void @use(ptr nonnull %x) - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %x) + call void @llvm.lifetime.end.p0(ptr nonnull %x) ret void } diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir b/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir index 45f6bfe..0fa5103 100644 --- a/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir +++ b/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir @@ -18,15 +18,15 @@ %C.tag = call ptr @llvm.aarch64.tagp.p0(ptr %C, ptr %basetag, i64 1) call void @llvm.aarch64.settag(ptr %C.tag, i64 32) call void @F56(ptr %C.tag) - call void @llvm.lifetime.start.p0(i64 32, ptr %A) + call void @llvm.lifetime.start.p0(ptr %A) call void @llvm.aarch64.settag(ptr %A.tag, i64 32) call void @F56(ptr %A.tag) call void @llvm.aarch64.settag(ptr %A, i64 32) - call void @llvm.lifetime.end.p0(i64 32, ptr %A) - call void @llvm.lifetime.start.p0(i64 32, ptr %A) + call void @llvm.lifetime.end.p0(ptr %A) + call void @llvm.lifetime.start.p0(ptr %A) call void @llvm.memcpy.p0.p0.i64(ptr align 4 %A, ptr align 4 @glob, i64 32, i1 false) call void @F78(ptr %A) - call void @llvm.lifetime.end.p0(i64 32, ptr %A) + call void @llvm.lifetime.end.p0(ptr %A) call void @llvm.aarch64.settag(ptr %C, i64 32) ret void } diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll index aa9cccc..91adf82 100644 --- a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll +++ b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll @@ -17,17 +17,17 @@ S0: S1: ; CHECK-LABEL: S1: - call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %v) #1 + call void @llvm.lifetime.start.p0(ptr nonnull %v) #1 ; CHECK: call void @llvm.aarch64.settag(ptr %v.tag, i64 48) - call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %w) #1 + call void @llvm.lifetime.start.p0(ptr nonnull %w) #1 ; CHECK: call void @llvm.aarch64.settag(ptr %w.tag, i64 48) %t1 = call i32 @g1(ptr nonnull %v, ptr nonnull %w) #1 ; CHECK: call i32 @g1 ; CHECK-NOT: settag{{.*}}%v ; CHECK: call void @llvm.aarch64.settag(ptr %w, i64 48) ; CHECK-NOT: settag{{.*}}%v - call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w) #1 -; CHECK: call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w) + call void @llvm.lifetime.end.p0(ptr nonnull %w) #1 +; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %w) %b1 = icmp eq i32 %t1, 0 br i1 %b1, label %S2, label %S3 ; CHECK-NOT: settag @@ -40,7 +40,7 @@ S2: S3: ; CHECK-LABEL: S3: - call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %v) #1 + call void @llvm.lifetime.end.p0(ptr nonnull %v) #1 tail call void @z1() #1 br label %exit2 ; CHECK-NOT: settag @@ -73,9 +73,9 @@ declare void @z1() #0 declare void @z2() #0 -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/sve-alloca.ll b/llvm/test/CodeGen/AArch64/sve-alloca.ll index 2520095..8b7fa9e 100644 --- a/llvm/test/CodeGen/AArch64/sve-alloca.ll +++ b/llvm/test/CodeGen/AArch64/sve-alloca.ll @@ -46,14 +46,14 @@ define void @foo(<vscale x 4 x i64> %dst, i1 %cond) { ; CHECK-NEXT: .cfi_offset w28, -16 ; CHECK-NEXT: .cfi_offset w30, -24 ; CHECK-NEXT: .cfi_offset w29, -32 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 32 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 32 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d8 @ cfa - 8 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d9 @ cfa - 16 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d10 @ cfa - 24 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d11 @ cfa - 32 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d12 @ cfa - 40 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d13 @ cfa - 48 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d14 @ cfa - 56 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d15 @ cfa - 64 * VG - 32 ; CHECK-NEXT: rdvl x9, #2 ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: add x9, x9, #15 diff --git a/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll b/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll index 30a8396..254b8e0 100644 --- a/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll +++ b/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll @@ -43,17 +43,17 @@ define void @fbyte(<vscale x 16 x i8> %v){ ; NOPAIR-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; NOPAIR-NEXT: .cfi_offset w30, -8 ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; NOPAIR-NEXT: bl my_func ; NOPAIR-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; NOPAIR-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload @@ -113,17 +113,17 @@ define void @fbyte(<vscale x 16 x i8> %v){ ; PAIR-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; PAIR-NEXT: .cfi_offset w30, -8 ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; PAIR-NEXT: bl my_func ; PAIR-NEXT: ptrue pn8.b ; PAIR-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload @@ -187,17 +187,17 @@ define void @fhalf(<vscale x 8 x half> %v) { ; NOPAIR-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; NOPAIR-NEXT: .cfi_offset w30, -8 ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; NOPAIR-NEXT: bl my_func ; NOPAIR-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; NOPAIR-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload @@ -257,17 +257,17 @@ define void @fhalf(<vscale x 8 x half> %v) { ; PAIR-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; PAIR-NEXT: .cfi_offset w30, -8 ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; PAIR-NEXT: bl my_func ; PAIR-NEXT: ptrue pn8.b ; PAIR-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload @@ -310,11 +310,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs() { ; NOPAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; NOPAIR-NEXT: //APP ; NOPAIR-NEXT: //NO_APP ; NOPAIR-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload @@ -336,11 +336,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs() { ; PAIR-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP ; PAIR-NEXT: ptrue pn8.b @@ -368,11 +368,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs2() { ; NOPAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; NOPAIR-NEXT: //APP ; NOPAIR-NEXT: //NO_APP ; NOPAIR-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload @@ -393,11 +393,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs2() { ; PAIR-NEXT: str p10, [sp, #6, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: st1b { z8.b, z9.b }, pn9, [sp, #2, mul vl] // 32-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP ; PAIR-NEXT: ptrue pn9.b @@ -421,10 +421,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_regs() { ; NOPAIR-NEXT: addvl sp, sp, #-2 ; NOPAIR-NEXT: str z9, [sp] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 ; NOPAIR-NEXT: //APP ; NOPAIR-NEXT: //NO_APP ; NOPAIR-NEXT: ldr z9, [sp] // 16-byte Folded Reload @@ -440,10 +440,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_regs() { ; PAIR-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: str z8, [sp, #2, mul vl] // 16-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP ; PAIR-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload @@ -494,10 +494,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_2_z_regs_negative() { ; NOPAIR-NEXT: addvl sp, sp, #-2 ; NOPAIR-NEXT: str z10, [sp] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 16 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 16 * VG - 16 ; NOPAIR-NEXT: //APP ; NOPAIR-NEXT: //NO_APP ; NOPAIR-NEXT: ldr z10, [sp] // 16-byte Folded Reload @@ -512,10 +512,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_2_z_regs_negative() { ; PAIR-NEXT: addvl sp, sp, #-2 ; PAIR-NEXT: str z10, [sp] // 16-byte Folded Spill ; PAIR-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 16 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 16 * VG - 16 ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP ; PAIR-NEXT: ldr z10, [sp] // 16-byte Folded Reload @@ -536,7 +536,7 @@ define aarch64_sve_vector_pcs void @test_clobbers_p_reg_negative() { ; NOPAIR-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; NOPAIR-NEXT: addvl sp, sp, #-1 ; NOPAIR-NEXT: str p10, [sp, #7, mul vl] // 2-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; NOPAIR-NEXT: .cfi_offset w29, -16 ; NOPAIR-NEXT: //APP ; NOPAIR-NEXT: //NO_APP @@ -550,7 +550,7 @@ define aarch64_sve_vector_pcs void @test_clobbers_p_reg_negative() { ; PAIR-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; PAIR-NEXT: addvl sp, sp, #-1 ; PAIR-NEXT: str p10, [sp, #7, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; PAIR-NEXT: .cfi_offset w29, -16 ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll index 5e4c891..9066051 100644 --- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll +++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll @@ -438,7 +438,7 @@ define void @non_sve_caller_non_sve_callee_high_range() { ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: movi d0, #0000000000000000 @@ -464,7 +464,7 @@ define void @non_sve_caller_high_range_non_sve_callee_high_range(float %f0, floa ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: movi d0, #0000000000000000 @@ -523,17 +523,17 @@ define <vscale x 4 x float> @sve_caller_non_sve_callee_high_range(<vscale x 4 x ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 168 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x01, 0x1e, 0x22 // sp + 16 + 168 * VG ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: mov z25.d, z0.d ; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: movi d0, #0000000000000000 @@ -621,17 +621,17 @@ define <vscale x 4 x float> @sve_ret_caller_non_sve_callee_high_range() { ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 16 + 160 * VG ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: movi d0, #0000000000000000 ; CHECK-NEXT: fmov s1, #1.00000000 ; CHECK-NEXT: addvl x0, sp, #1 @@ -686,7 +686,7 @@ define void @verify_all_operands_are_initialised() { ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill ; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: movi d0, #0000000000000000 diff --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll index d02aa06..6c6a691 100644 --- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll @@ -8,7 +8,7 @@ define <4 x i32> @extract_v4i32_nxv16i32_12(<vscale x 16 x i32> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: str z3, [sp, #3, mul vl] ; CHECK-NEXT: str z2, [sp, #2, mul vl] @@ -27,7 +27,7 @@ define <8 x i16> @extract_v8i16_nxv32i16_8(<vscale x 32 x i16> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: str z1, [sp, #1, mul vl] ; CHECK-NEXT: str z0, [sp] @@ -44,7 +44,7 @@ define <4 x i16> @extract_v4i16_nxv32i16_8(<vscale x 32 x i16> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: str z3, [sp, #3, mul vl] ; CHECK-NEXT: str z2, [sp, #2, mul vl] @@ -65,7 +65,7 @@ define <2 x i16> @extract_v2i16_nxv32i16_8(<vscale x 32 x i16> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-8 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: str z3, [sp, #3, mul vl] @@ -94,7 +94,7 @@ define <2 x i64> @extract_v2i64_nxv8i64_8(<vscale x 8 x i64> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov w9, #8 // =0x8 @@ -120,7 +120,7 @@ define <4 x float> @extract_v4f32_nxv16f32_12(<vscale x 16 x float> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: str z3, [sp, #3, mul vl] ; CHECK-NEXT: str z2, [sp, #2, mul vl] @@ -168,7 +168,7 @@ define <4 x i1> @extract_v4i1_nxv32i1_16(<vscale x 32 x i1> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-8 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1 ; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1 @@ -224,7 +224,7 @@ define <4 x i3> @extract_v4i3_nxv32i3_16(<vscale x 32 x i3> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-8 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: str z1, [sp, #1, mul vl] @@ -271,7 +271,7 @@ define <4 x i64> @extract_v4i64_nxv8i64_0(<vscale x 8 x i64> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: str z1, [sp, #1, mul vl] ; CHECK-NEXT: str z0, [sp] diff --git a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll index cbede1b..4aaa25e 100644 --- a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll @@ -63,7 +63,7 @@ define <vscale x 14 x i1> @extract_nxv14i1_nxv28i1_14(<vscale x 28 x i1> %in) uw ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: punpkhi p2.h, p1.b ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: punpklo p1.h, p1.b diff --git a/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll b/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll index 4b93900..8750867 100644 --- a/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll +++ b/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll @@ -49,7 +49,7 @@ define half @fadda_nxv6f16(<vscale x 6 x half> %v, half %s) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov w8, #32768 // =0x8000 ; CHECK-NEXT: ptrue p0.d @@ -73,7 +73,7 @@ define half @fadda_nxv10f16(<vscale x 10 x half> %v, half %s) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: // kill: def $h2 killed $h2 def $z2 diff --git a/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll b/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll index 1b6b92a..4374409 100644 --- a/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll +++ b/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll @@ -254,7 +254,7 @@ define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, #-4476578029606273024 // =0xc1e0000000000000 ; CHECK-NEXT: ptrue p0.d @@ -341,7 +341,7 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, #-4548635623644200960 // =0xc0e0000000000000 ; CHECK-NEXT: ptrue p0.d diff --git a/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll b/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll index b3aefb8..1df2819 100644 --- a/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll +++ b/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll @@ -208,7 +208,7 @@ define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) { ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #281474974613504 // =0xffffffe00000 @@ -275,7 +275,7 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) { ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #281337537757184 // =0xffe000000000 diff --git a/llvm/test/CodeGen/AArch64/sve-insert-element.ll b/llvm/test/CodeGen/AArch64/sve-insert-element.ll index 7f558e3..8ca005a 100644 --- a/llvm/test/CodeGen/AArch64/sve-insert-element.ll +++ b/llvm/test/CodeGen/AArch64/sve-insert-element.ll @@ -588,7 +588,7 @@ define <vscale x 32 x i1> @test_predicate_insert_32xi1(<vscale x 32 x i1> %val, ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: rdvl x8, #2 ; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1 ; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1 diff --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll index dcf3317..73c783d 100644 --- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll @@ -186,7 +186,7 @@ define void @insert_v2i64_nxv16i64(<2 x i64> %sv0, <2 x i64> %sv1, ptr %out) uwt ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: str z0, [sp] ; CHECK-NEXT: str q1, [sp, #32] @@ -229,7 +229,7 @@ define void @insert_v2i64_nxv16i64_lo2(ptr %psv, ptr %out) uwtable { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [sp, #16] ; CHECK-NEXT: ldr z0, [sp, #1, mul vl] @@ -896,7 +896,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_0(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -923,7 +923,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_1(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -950,7 +950,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_2(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -977,7 +977,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_3(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -1004,7 +1004,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_4(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -1031,7 +1031,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_5(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -1058,7 +1058,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_6(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -1085,7 +1085,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_7(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -1112,7 +1112,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_8(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1139,7 +1139,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_9(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1166,7 +1166,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_10(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1193,7 +1193,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_11(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1220,7 +1220,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_12(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1247,7 +1247,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_13(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1274,7 +1274,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_14(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1301,7 +1301,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_15(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b diff --git a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir index 6d09425..2a7e8a43c 100644 --- a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir +++ b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir @@ -41,13 +41,13 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_positive_offset - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) @@ -64,7 +64,7 @@ body: | ; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) ; CHECK-NEXT: renamable $z0 = LDNF1D_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s64) from %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -100,13 +100,13 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_negative_offset - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) @@ -123,7 +123,7 @@ body: | ; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) ; CHECK-NEXT: renamable $z0 = LDNF1D_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s64) from %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -159,44 +159,44 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_positive_offset_out_of_range - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_H_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SH_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SH_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1W_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -231,44 +231,44 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_negative_offset_out_of_range - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_H_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SH_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SH_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1W_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 diff --git a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir index 1352b9d..863d4d1 100644 --- a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir +++ b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir @@ -41,13 +41,13 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_positive_offset - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, $sp, 7 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, $sp, 7 :: (load (s16) from %ir.object) ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, $sp, 7 :: (load (s32) from %ir.object, align 8) @@ -56,7 +56,7 @@ body: | ; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, $sp, 7 :: (store (s16) into %ir.object, align 8) ; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, $sp, 7 :: (store (s32) into %ir.object, align 8) ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, $sp, 7 :: (store (s64) into %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -84,13 +84,13 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_negative_offset - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, $sp, -8 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, $sp, -8 :: (load (s16) from %ir.object) ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, $sp, -8 :: (load (s32) from %ir.object) @@ -99,7 +99,7 @@ body: | ; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, $sp, -8 :: (store (s16) into %ir.object, align 8) ; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, $sp, -8 :: (store (s32) into %ir.object, align 8) ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, $sp, -8 :: (store (s64) into %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -127,30 +127,30 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_positive_offset_out_of_range - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, killed $x8, 7 :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, killed $x8, 7 :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, killed $x8, 7 :: (load (s32) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1D_ZRI renamable $p0, killed $x8, 7 :: (load (s64) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: STNT1B_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s8) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s16) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s32) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s64) into %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -178,30 +178,30 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_negative_offset_out_of_range - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, killed $x8, -8 :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, killed $x8, -8 :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, killed $x8, -8 :: (load (s32) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1D_ZRI renamable $p0, killed $x8, -8 :: (load (s64) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: STNT1B_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s8) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s16) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s32) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s64) into %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 diff --git a/llvm/test/CodeGen/AArch64/sve-llrint.ll b/llvm/test/CodeGen/AArch64/sve-llrint.ll index b0198cf..12d49183 100644 --- a/llvm/test/CodeGen/AArch64/sve-llrint.ll +++ b/llvm/test/CodeGen/AArch64/sve-llrint.ll @@ -88,7 +88,7 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f16(<vscale x 8 x half> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: uunpklo z1.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h @@ -161,11 +161,11 @@ define <vscale x 16 x i64> @llrint_v16i64_v16f16(<vscale x 16 x half> %x) { ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; CHECK-NEXT: uunpklo z2.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: mov w8, #64511 // =0xfbff @@ -299,16 +299,16 @@ define <vscale x 32 x i64> @llrint_v32i64_v32f16(<vscale x 32 x half> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: uunpklo z4.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: mov w9, #64511 // =0xfbff @@ -614,7 +614,7 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f32(<vscale x 8 x float> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: uunpklo z2.d, z0.s ; CHECK-NEXT: uunpkhi z0.d, z0.s @@ -684,11 +684,11 @@ define <vscale x 16 x i64> @llrint_v16i64_v16f32(<vscale x 16 x float> %x) { ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; CHECK-NEXT: uunpklo z4.d, z0.s ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: mov w8, #-553648128 // =0xdf000000 @@ -818,16 +818,16 @@ define <vscale x 32 x i64> @llrint_v32i64_v32f32(<vscale x 32 x float> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: uunpklo z24.d, z0.s ; CHECK-NEXT: uunpkhi z25.d, z0.s ; CHECK-NEXT: mov w9, #-553648128 // =0xdf000000 @@ -1125,7 +1125,7 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f64(<vscale x 8 x double> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000 @@ -1190,10 +1190,10 @@ define <vscale x 16 x i64> @llrint_v16f64(<vscale x 16 x double> %x) { ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000 ; CHECK-NEXT: mov z26.d, #0x8000000000000000 @@ -1312,16 +1312,16 @@ define <vscale x 32 x i64> @llrint_v32f64(<vscale x 32 x double> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: ldr z0, [x0] ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ldr z2, [x0, #2, mul vl] diff --git a/llvm/test/CodeGen/AArch64/sve-lrint.ll b/llvm/test/CodeGen/AArch64/sve-lrint.ll index aa586390..58ac53d 100644 --- a/llvm/test/CodeGen/AArch64/sve-lrint.ll +++ b/llvm/test/CodeGen/AArch64/sve-lrint.ll @@ -89,7 +89,7 @@ define <vscale x 8 x iXLen> @lrint_v8f16(<vscale x 8 x half> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: uunpklo z1.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h @@ -162,11 +162,11 @@ define <vscale x 16 x iXLen> @lrint_v16f16(<vscale x 16 x half> %x) { ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; CHECK-NEXT: uunpklo z2.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: mov w8, #64511 // =0xfbff @@ -300,16 +300,16 @@ define <vscale x 32 x iXLen> @lrint_v32f16(<vscale x 32 x half> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: uunpklo z4.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: mov w9, #64511 // =0xfbff @@ -615,7 +615,7 @@ define <vscale x 8 x iXLen> @lrint_v8f32(<vscale x 8 x float> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: uunpklo z2.d, z0.s ; CHECK-NEXT: uunpkhi z0.d, z0.s @@ -685,11 +685,11 @@ define <vscale x 16 x iXLen> @lrint_v16f32(<vscale x 16 x float> %x) { ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; CHECK-NEXT: uunpklo z4.d, z0.s ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: mov w8, #-553648128 // =0xdf000000 @@ -819,16 +819,16 @@ define <vscale x 32 x iXLen> @lrint_v32f32(<vscale x 32 x float> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: uunpklo z24.d, z0.s ; CHECK-NEXT: uunpkhi z25.d, z0.s ; CHECK-NEXT: mov w9, #-553648128 // =0xdf000000 @@ -1126,7 +1126,7 @@ define <vscale x 8 x iXLen> @lrint_v8f64(<vscale x 8 x double> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000 @@ -1191,10 +1191,10 @@ define <vscale x 16 x iXLen> @lrint_v16f64(<vscale x 16 x double> %x) { ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000 ; CHECK-NEXT: mov z26.d, #0x8000000000000000 @@ -1313,16 +1313,16 @@ define <vscale x 32 x iXLen> @lrint_v32f64(<vscale x 32 x double> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: ldr z0, [x0] ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ldr z2, [x0, #2, mul vl] diff --git a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll index 6e08606..24df76b 100644 --- a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll +++ b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll @@ -53,7 +53,7 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @add_nxv64i1(<vscale x 64 x i1> ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill @@ -137,7 +137,7 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @sub_nxv64i1(<vscale x 64 x i1> ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill diff --git a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll index 9a4231a..0bc8cb8 100644 --- a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll +++ b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll @@ -20,7 +20,7 @@ define i8 @split_extract_32i8_idx(<vscale x 32 x i8> %a, i32 %idx) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #2 ; CHECK-NEXT: mov w9, w0 @@ -43,7 +43,7 @@ define i16 @split_extract_16i16_idx(<vscale x 16 x i16> %a, i32 %idx) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #1 ; CHECK-NEXT: mov w9, w0 @@ -66,7 +66,7 @@ define i32 @split_extract_8i32_idx(<vscale x 8 x i32> %a, i32 %idx) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov w9, w0 @@ -89,7 +89,7 @@ define i64 @split_extract_8i64_idx(<vscale x 8 x i64> %a, i32 %idx) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov w9, w0 @@ -134,7 +134,7 @@ define i16 @split_extract_16i16(<vscale x 16 x i16> %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #1 ; CHECK-NEXT: mov w9, #128 // =0x80 @@ -157,7 +157,7 @@ define i32 @split_extract_16i32(<vscale x 16 x i32> %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #1 ; CHECK-NEXT: mov w9, #34464 // =0x86a0 @@ -183,7 +183,7 @@ define i64 @split_extract_4i64(<vscale x 4 x i64> %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cntw x8 ; CHECK-NEXT: mov w9, #10 // =0xa diff --git a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll index d7ed42d..4ed59bc 100644 --- a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll +++ b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll @@ -21,7 +21,7 @@ define <vscale x 32 x i8> @split_insert_32i8_idx(<vscale x 32 x i8> %a, i8 %elt, ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #2 ; CHECK-NEXT: mov x9, sp @@ -45,7 +45,7 @@ define <vscale x 8 x float> @split_insert_8f32_idx(<vscale x 8 x float> %a, floa ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov x9, sp @@ -69,7 +69,7 @@ define <vscale x 8 x i64> @split_insert_8i64_idx(<vscale x 8 x i64> %a, i64 %elt ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov x9, sp @@ -130,7 +130,7 @@ define <vscale x 32 x i16> @split_insert_32i16(<vscale x 32 x i16> %a, i16 %elt) ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #2 ; CHECK-NEXT: mov w9, #128 // =0x80 @@ -159,7 +159,7 @@ define <vscale x 8 x i32> @split_insert_8i32(<vscale x 8 x i32> %a, i32 %elt) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov w9, #16960 // =0x4240 diff --git a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll index c5cf459..e0da9b57 100644 --- a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll +++ b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll @@ -16,7 +16,7 @@ define i32 @csr_d8_allocnxv4i32i32f64(double %d) "aarch64_pstate_sm_compatible" ; CHECK-NEXT: str x29, [sp, #8] // 8-byte Folded Spill ; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -8 ; CHECK-NEXT: .cfi_offset b8, -16 ; CHECK-NEXT: mov z1.s, #0 // =0x0 @@ -219,7 +219,7 @@ define i32 @csr_d8_allocnxv4i32i32f64_stackargsi32f64(double %d0, double %d1, do ; CHECK-NEXT: str x29, [sp, #8] // 8-byte Folded Spill ; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -8 ; CHECK-NEXT: .cfi_offset b8, -16 ; CHECK-NEXT: mov z1.s, #0 // =0x0 @@ -266,7 +266,7 @@ define i32 @svecc_z8_allocnxv4i32i32f64_fp(double %d, <vscale x 4 x i32> %v) "aa ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK-NEXT: mov w0, wzr ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP @@ -310,7 +310,7 @@ define i32 @svecc_z8_allocnxv4i32i32f64_stackargsi32_fp(double %d, i32 %i0, i32 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK-NEXT: mov w0, wzr ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP @@ -383,7 +383,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK-NEXT: .cfi_offset w30, -40 ; CHECK-NEXT: .cfi_offset w29, -48 ; CHECK-NEXT: addvl sp, sp, #-18 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG ; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -412,14 +412,14 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48 ; CHECK-NEXT: mov x8, x0 ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP diff --git a/llvm/test/CodeGen/AArch64/sve-trunc.ll b/llvm/test/CodeGen/AArch64/sve-trunc.ll index 0ec6538..50580cb 100644 --- a/llvm/test/CodeGen/AArch64/sve-trunc.ll +++ b/llvm/test/CodeGen/AArch64/sve-trunc.ll @@ -115,7 +115,7 @@ define <vscale x 16 x i1> @trunc_i64toi1_split3(<vscale x 16 x i64> %in) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: and z7.d, z7.d, #0x1 ; CHECK-NEXT: and z6.d, z6.d, #0x1 diff --git a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll index 8a504cd..198e0a3 100644 --- a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll +++ b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll @@ -105,7 +105,7 @@ define <vscale x 8 x i32> @test_compress_large(<vscale x 8 x i32> %vec, <vscale ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: cnth x9 diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll index 0eacac2..1dbd7dd 100644 --- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll +++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll @@ -276,7 +276,7 @@ define <vscale x 16 x i8> @ld1_x2_i8_z0_taken(target("aarch64.svcount") %pn, ptr ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ld1b { z2.b, z3.b }, pn8/z, [x0] @@ -298,7 +298,7 @@ define <vscale x 16 x i8> @ld1_x2_i8_z0_taken_scalar(target("aarch64.svcount") % ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ld1b { z2.b, z3.b }, pn8/z, [x0, x1] @@ -585,7 +585,7 @@ define <vscale x 8 x i16> @ld1_x4_i16_z0_taken(target("aarch64.svcount") %pn, pt ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ld1h { z4.h - z7.h }, pn8/z, [x0] @@ -607,7 +607,7 @@ define <vscale x 8 x i16> @ld1_x4_i16_z0_taken_scalar(target("aarch64.svcount") ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ld1h { z4.h - z7.h }, pn8/z, [x0, x1, lsl #1] @@ -896,7 +896,7 @@ define <vscale x 4 x i32> @ldnt1_x2_i32_z0_taken(target("aarch64.svcount") %pn, ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ldnt1w { z2.s, z3.s }, pn8/z, [x0] @@ -918,7 +918,7 @@ define <vscale x 4 x i32> @ldnt1_x2_i32_z0_taken_scalar(target("aarch64.svcount" ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ldnt1w { z2.s, z3.s }, pn8/z, [x0, x1, lsl #2] @@ -1205,7 +1205,7 @@ define <vscale x 2 x i64> @ldnt1_x4_i64_z0_taken(target("aarch64.svcount") %pn, ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ldnt1d { z4.d - z7.d }, pn8/z, [x0] @@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @ldnt1_x4_i64_z0_taken_scalar(target("aarch64.svcount" ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ldnt1d { z4.d - z7.d }, pn8/z, [x0, x1, lsl #3] diff --git a/llvm/test/CodeGen/AArch64/unwind-preserved.ll b/llvm/test/CodeGen/AArch64/unwind-preserved.ll index 822be14..7e1f63d 100644 --- a/llvm/test/CodeGen/AArch64/unwind-preserved.ll +++ b/llvm/test/CodeGen/AArch64/unwind-preserved.ll @@ -13,7 +13,7 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-18 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -42,27 +42,27 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 16 + 160 * VG ; CHECK-NEXT: .cfi_remember_state ; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: .Ltmp0: +; CHECK-NEXT: .Ltmp0: // EH_LABEL ; CHECK-NEXT: bl may_throw_sve -; CHECK-NEXT: .Ltmp1: +; CHECK-NEXT: .Ltmp1: // EH_LABEL ; CHECK-NEXT: str z0, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: b .LBB0_1 ; CHECK-NEXT: .LBB0_1: // %.Lcontinue ; CHECK-NEXT: ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -108,10 +108,10 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB0_2: // %.Lunwind ; CHECK-NEXT: .cfi_restore_state -; CHECK-NEXT: .Ltmp2: +; CHECK-NEXT: .Ltmp2: // EH_LABEL ; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -165,7 +165,7 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; GISEL-NEXT: .cfi_offset w30, -8 ; GISEL-NEXT: .cfi_offset w29, -16 ; GISEL-NEXT: addvl sp, sp, #-18 -; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; GISEL-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; GISEL-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; GISEL-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -194,27 +194,27 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; GISEL-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; GISEL-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; GISEL-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; GISEL-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; GISEL-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; GISEL-NEXT: addvl sp, sp, #-2 -; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG +; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 16 + 160 * VG ; GISEL-NEXT: .cfi_remember_state ; GISEL-NEXT: str z0, [sp] // 16-byte Folded Spill -; GISEL-NEXT: .Ltmp0: +; GISEL-NEXT: .Ltmp0: // EH_LABEL ; GISEL-NEXT: bl may_throw_sve -; GISEL-NEXT: .Ltmp1: +; GISEL-NEXT: .Ltmp1: // EH_LABEL ; GISEL-NEXT: str z0, [sp, #1, mul vl] // 16-byte Folded Spill ; GISEL-NEXT: b .LBB0_1 ; GISEL-NEXT: .LBB0_1: // %.Lcontinue ; GISEL-NEXT: ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: addvl sp, sp, #2 -; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; GISEL-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -260,10 +260,10 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; GISEL-NEXT: ret ; GISEL-NEXT: .LBB0_2: // %.Lunwind ; GISEL-NEXT: .cfi_restore_state -; GISEL-NEXT: .Ltmp2: +; GISEL-NEXT: .Ltmp2: // EH_LABEL ; GISEL-NEXT: ldr z0, [sp] // 16-byte Folded Reload ; GISEL-NEXT: addvl sp, sp, #2 -; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; GISEL-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -355,9 +355,9 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v) ; CHECK-NEXT: .cfi_offset b23, -272 ; CHECK-NEXT: .cfi_remember_state ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: .Ltmp3: +; CHECK-NEXT: .Ltmp3: // EH_LABEL ; CHECK-NEXT: bl may_throw_neon -; CHECK-NEXT: .Ltmp4: +; CHECK-NEXT: .Ltmp4: // EH_LABEL ; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: b .LBB1_1 ; CHECK-NEXT: .LBB1_1: // %.Lcontinue @@ -394,7 +394,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v) ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB1_2: // %.Lunwind ; CHECK-NEXT: .cfi_restore_state -; CHECK-NEXT: .Ltmp5: +; CHECK-NEXT: .Ltmp5: // EH_LABEL ; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-NEXT: ldp x29, x30, [sp, #288] // 16-byte Folded Reload ; CHECK-NEXT: ldp q9, q8, [sp, #256] // 32-byte Folded Reload @@ -462,10 +462,10 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v) ; GISEL-NEXT: .cfi_offset b23, -272 ; GISEL-NEXT: .cfi_remember_state ; GISEL-NEXT: str q0, [sp] // 16-byte Folded Spill -; GISEL-NEXT: .Ltmp3: +; GISEL-NEXT: .Ltmp3: // EH_LABEL ; GISEL-NEXT: bl may_throw_neon ; GISEL-NEXT: str q0, [sp, #16] // 16-byte Folded Spill -; GISEL-NEXT: .Ltmp4: +; GISEL-NEXT: .Ltmp4: // EH_LABEL ; GISEL-NEXT: b .LBB1_1 ; GISEL-NEXT: .LBB1_1: // %.Lcontinue ; GISEL-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload @@ -501,7 +501,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v) ; GISEL-NEXT: ret ; GISEL-NEXT: .LBB1_2: // %.Lunwind ; GISEL-NEXT: .cfi_restore_state -; GISEL-NEXT: .Ltmp5: +; GISEL-NEXT: .Ltmp5: // EH_LABEL ; GISEL-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; GISEL-NEXT: ldp x29, x30, [sp, #288] // 16-byte Folded Reload ; GISEL-NEXT: ldp q9, q8, [sp, #256] // 32-byte Folded Reload diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll index a066b15..e6a8bac 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll @@ -1917,8 +1917,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX9-NEXT: s_mov_b32 s0, 0 ; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:4 ; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_movk_i32 s0, 0x3e80 ; GFX9-NEXT: v_mov_b32_e32 v0, 15 -; GFX9-NEXT: s_movk_i32 s0, 0x3e84 +; GFX9-NEXT: s_add_i32 s0, s0, 4 ; GFX9-NEXT: scratch_store_dword off, v0, s0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: scratch_load_dword v0, off, s0 glc @@ -1933,7 +1934,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s9 ; GFX10-NEXT: v_mov_b32_e32 v0, 13 ; GFX10-NEXT: v_mov_b32_e32 v1, 15 -; GFX10-NEXT: s_movk_i32 s0, 0x3e84 +; GFX10-NEXT: s_movk_i32 s0, 0x3e80 +; GFX10-NEXT: s_add_i32 s0, s0, 4 ; GFX10-NEXT: scratch_store_dword off, v0, off offset:4 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: scratch_store_dword off, v1, s0 @@ -1945,10 +1947,11 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX942-LABEL: store_load_large_imm_offset_kernel: ; GFX942: ; %bb.0: ; %bb ; GFX942-NEXT: v_mov_b32_e32 v0, 13 +; GFX942-NEXT: s_movk_i32 s0, 0x3e80 ; GFX942-NEXT: scratch_store_dword off, v0, off offset:4 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v0, 15 -; GFX942-NEXT: s_movk_i32 s0, 0x3e84 +; GFX942-NEXT: s_add_i32 s0, s0, 4 ; GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1 @@ -1958,7 +1961,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX11-LABEL: store_load_large_imm_offset_kernel: ; GFX11: ; %bb.0: ; %bb ; GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15 -; GFX11-NEXT: s_movk_i32 s0, 0x3e84 +; GFX11-NEXT: s_movk_i32 s0, 0x3e80 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_add_i32 s0, s0, 4 ; GFX11-NEXT: scratch_store_b32 off, v0, off offset:4 dlc ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc @@ -1986,8 +1991,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; UNALIGNED_GFX9-NEXT: s_mov_b32 s0, 0 ; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s0 offset:4 ; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) +; UNALIGNED_GFX9-NEXT: s_movk_i32 s0, 0x3e80 ; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v0, 15 -; UNALIGNED_GFX9-NEXT: s_movk_i32 s0, 0x3e84 +; UNALIGNED_GFX9-NEXT: s_add_i32 s0, s0, 4 ; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s0 ; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX9-NEXT: scratch_load_dword v0, off, s0 glc @@ -2002,7 +2008,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; UNALIGNED_GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s9 ; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v0, 13 ; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 15 -; UNALIGNED_GFX10-NEXT: s_movk_i32 s0, 0x3e84 +; UNALIGNED_GFX10-NEXT: s_movk_i32 s0, 0x3e80 +; UNALIGNED_GFX10-NEXT: s_add_i32 s0, s0, 4 ; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v0, off offset:4 ; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v1, s0 @@ -2014,10 +2021,11 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; UNALIGNED_GFX942-LABEL: store_load_large_imm_offset_kernel: ; UNALIGNED_GFX942: ; %bb.0: ; %bb ; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 13 +; UNALIGNED_GFX942-NEXT: s_movk_i32 s0, 0x3e80 ; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, off offset:4 sc0 sc1 ; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 15 -; UNALIGNED_GFX942-NEXT: s_movk_i32 s0, 0x3e84 +; UNALIGNED_GFX942-NEXT: s_add_i32 s0, s0, 4 ; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1 ; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1 @@ -2027,7 +2035,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; UNALIGNED_GFX11-LABEL: store_load_large_imm_offset_kernel: ; UNALIGNED_GFX11: ; %bb.0: ; %bb ; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15 -; UNALIGNED_GFX11-NEXT: s_movk_i32 s0, 0x3e84 +; UNALIGNED_GFX11-NEXT: s_movk_i32 s0, 0x3e80 +; UNALIGNED_GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; UNALIGNED_GFX11-NEXT: s_add_i32 s0, s0, 4 ; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v0, off offset:4 dlc ; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc @@ -2061,11 +2071,13 @@ define void @store_load_large_imm_offset_foo() { ; GFX9-LABEL: store_load_large_imm_offset_foo: ; GFX9: ; %bb.0: ; %bb ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_movk_i32 s0, 0x3e80 ; GFX9-NEXT: v_mov_b32_e32 v0, 13 +; GFX9-NEXT: s_add_i32 s1, s32, s0 ; GFX9-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, 15 -; GFX9-NEXT: s_add_i32 s0, s32, 0x3e84 +; GFX9-NEXT: s_add_i32 s0, s1, 4 ; GFX9-NEXT: scratch_store_dword off, v0, s0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: scratch_load_dword v0, off, s0 glc @@ -2076,8 +2088,10 @@ define void @store_load_large_imm_offset_foo() { ; GFX10: ; %bb.0: ; %bb ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v0, 13 +; GFX10-NEXT: s_movk_i32 s0, 0x3e80 ; GFX10-NEXT: v_mov_b32_e32 v1, 15 -; GFX10-NEXT: s_add_i32 s0, s32, 0x3e84 +; GFX10-NEXT: s_add_i32 s1, s32, s0 +; GFX10-NEXT: s_add_i32 s0, s1, 4 ; GFX10-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: scratch_store_dword off, v1, s0 @@ -2089,11 +2103,13 @@ define void @store_load_large_imm_offset_foo() { ; GFX942-LABEL: store_load_large_imm_offset_foo: ; GFX942: ; %bb.0: ; %bb ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: s_movk_i32 s0, 0x3e80 ; GFX942-NEXT: v_mov_b32_e32 v0, 13 +; GFX942-NEXT: s_add_i32 s1, s32, s0 ; GFX942-NEXT: scratch_store_dword off, v0, s32 offset:4 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v0, 15 -; GFX942-NEXT: s_add_i32 s0, s32, 0x3e84 +; GFX942-NEXT: s_add_i32 s0, s1, 4 ; GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1 @@ -2104,7 +2120,10 @@ define void @store_load_large_imm_offset_foo() { ; GFX11: ; %bb.0: ; %bb ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15 -; GFX11-NEXT: s_add_i32 s0, s32, 0x3e84 +; GFX11-NEXT: s_movk_i32 s0, 0x3e80 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_add_i32 s1, s32, s0 +; GFX11-NEXT: s_add_i32 s0, s1, 4 ; GFX11-NEXT: scratch_store_b32 off, v0, s32 offset:4 dlc ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc @@ -2133,11 +2152,13 @@ define void @store_load_large_imm_offset_foo() { ; UNALIGNED_GFX9-LABEL: store_load_large_imm_offset_foo: ; UNALIGNED_GFX9: ; %bb.0: ; %bb ; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; UNALIGNED_GFX9-NEXT: s_movk_i32 s0, 0x3e80 ; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v0, 13 +; UNALIGNED_GFX9-NEXT: s_add_i32 s1, s32, s0 ; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s32 offset:4 ; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v0, 15 -; UNALIGNED_GFX9-NEXT: s_add_i32 s0, s32, 0x3e84 +; UNALIGNED_GFX9-NEXT: s_add_i32 s0, s1, 4 ; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s0 ; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX9-NEXT: scratch_load_dword v0, off, s0 glc @@ -2148,8 +2169,10 @@ define void @store_load_large_imm_offset_foo() { ; UNALIGNED_GFX10: ; %bb.0: ; %bb ; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v0, 13 +; UNALIGNED_GFX10-NEXT: s_movk_i32 s0, 0x3e80 ; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 15 -; UNALIGNED_GFX10-NEXT: s_add_i32 s0, s32, 0x3e84 +; UNALIGNED_GFX10-NEXT: s_add_i32 s1, s32, s0 +; UNALIGNED_GFX10-NEXT: s_add_i32 s0, s1, 4 ; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v0, s32 offset:4 ; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v1, s0 @@ -2161,11 +2184,13 @@ define void @store_load_large_imm_offset_foo() { ; UNALIGNED_GFX942-LABEL: store_load_large_imm_offset_foo: ; UNALIGNED_GFX942: ; %bb.0: ; %bb ; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; UNALIGNED_GFX942-NEXT: s_movk_i32 s0, 0x3e80 ; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 13 +; UNALIGNED_GFX942-NEXT: s_add_i32 s1, s32, s0 ; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, s32 offset:4 sc0 sc1 ; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 15 -; UNALIGNED_GFX942-NEXT: s_add_i32 s0, s32, 0x3e84 +; UNALIGNED_GFX942-NEXT: s_add_i32 s0, s1, 4 ; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1 ; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1 @@ -2176,7 +2201,10 @@ define void @store_load_large_imm_offset_foo() { ; UNALIGNED_GFX11: ; %bb.0: ; %bb ; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15 -; UNALIGNED_GFX11-NEXT: s_add_i32 s0, s32, 0x3e84 +; UNALIGNED_GFX11-NEXT: s_movk_i32 s0, 0x3e80 +; UNALIGNED_GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; UNALIGNED_GFX11-NEXT: s_add_i32 s1, s32, s0 +; UNALIGNED_GFX11-NEXT: s_add_i32 s0, s1, 4 ; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v0, s32 offset:4 dlc ; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll index 2785b78..481a254 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll @@ -2243,36 +2243,22 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr ; ; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat: ; GFX1250: ; %bb.0: ; %main_body +; GFX1250-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-NEXT: s_mov_b32 s1, exec_lo -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s1, 0 -; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0 -; GFX1250-NEXT: s_cbranch_execz .LBB51_3 +; GFX1250-NEXT: s_cbranch_execz .LBB51_2 ; GFX1250-NEXT: ; %bb.1: -; GFX1250-NEXT: s_bcnt1_i32_b32 s1, s1 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s1 -; GFX1250-NEXT: s_load_b32 s1, s[4:5], 0x24 +; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0 +; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_mov_b32_e32 v4, s1 -; GFX1250-NEXT: ds_load_b64 v[2:3], v4 -; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1] -; GFX1250-NEXT: .LBB51_2: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_add_f64_e32 v[6:7], v[2:3], v[0:1] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[6:7], v4, v[6:7], v[2:3] +; GFX1250-NEXT: v_dual_mul_f64 v[0:1], 4.0, v[0:1] :: v_dual_mov_b32 v2, s0 +; GFX1250-NEXT: ds_add_f64 v2, v[0:1] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3] -; GFX1250-NEXT: v_mov_b64_e32 v[2:3], v[6:7] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB51_2 -; GFX1250-NEXT: .LBB51_3: +; GFX1250-NEXT: .LBB51_2: ; GFX1250-NEXT: s_endpgm main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0 @@ -2322,36 +2308,22 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3 ; ; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush: ; GFX1250: ; %bb.0: ; %main_body +; GFX1250-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-NEXT: s_mov_b32 s1, exec_lo -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s1, 0 -; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0 -; GFX1250-NEXT: s_cbranch_execz .LBB52_3 +; GFX1250-NEXT: s_cbranch_execz .LBB52_2 ; GFX1250-NEXT: ; %bb.1: -; GFX1250-NEXT: s_bcnt1_i32_b32 s1, s1 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s1 -; GFX1250-NEXT: s_load_b32 s1, s[4:5], 0x24 +; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0 +; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_mov_b32_e32 v4, s1 -; GFX1250-NEXT: ds_load_b64 v[2:3], v4 -; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1] -; GFX1250-NEXT: .LBB52_2: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: v_dual_mul_f64 v[0:1], 4.0, v[0:1] :: v_dual_mov_b32 v2, s0 +; GFX1250-NEXT: ds_add_f64 v2, v[0:1] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_add_f64_e32 v[6:7], v[2:3], v[0:1] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[6:7], v4, v[6:7], v[2:3] -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3] -; GFX1250-NEXT: v_mov_b64_e32 v[2:3], v[6:7] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB52_2 -; GFX1250-NEXT: .LBB52_3: +; GFX1250-NEXT: .LBB52_2: ; GFX1250-NEXT: s_endpgm main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0 @@ -2401,36 +2373,22 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp ; ; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe: ; GFX1250: ; %bb.0: ; %main_body +; GFX1250-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-NEXT: s_mov_b32 s1, exec_lo -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s1, 0 -; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0 -; GFX1250-NEXT: s_cbranch_execz .LBB53_3 +; GFX1250-NEXT: s_cbranch_execz .LBB53_2 ; GFX1250-NEXT: ; %bb.1: -; GFX1250-NEXT: s_bcnt1_i32_b32 s1, s1 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s1 -; GFX1250-NEXT: s_load_b32 s1, s[4:5], 0x24 +; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0 +; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_mov_b32_e32 v4, s1 -; GFX1250-NEXT: ds_load_b64 v[2:3], v4 -; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1] -; GFX1250-NEXT: .LBB53_2: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_add_f64_e32 v[6:7], v[2:3], v[0:1] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[6:7], v4, v[6:7], v[2:3] +; GFX1250-NEXT: v_dual_mul_f64 v[0:1], 4.0, v[0:1] :: v_dual_mov_b32 v2, s0 +; GFX1250-NEXT: ds_add_f64 v2, v[0:1] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3] -; GFX1250-NEXT: v_mov_b64_e32 v[2:3], v[6:7] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB53_2 -; GFX1250-NEXT: .LBB53_3: +; GFX1250-NEXT: .LBB53_2: ; GFX1250-NEXT: s_endpgm main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0 @@ -2459,23 +2417,9 @@ define double @local_atomic_fadd_f64_rtn_pat(ptr addrspace(3) %ptr, double %data ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_mov_b32_e32 v2, v0 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: .LBB54_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_mov_b64_e32 v[4:5], v[0:1] -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_add_f64_e32 v[0:1], 4.0, v[4:5] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[4:5] +; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0 +; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB54_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end -; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll index 62f8f89..79a9291 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12,GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12,GFX1250 %s ; Natural mapping define amdgpu_ps float @raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { @@ -99,26 +100,47 @@ define amdgpu_ps <2 x float> @raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vg ; GFX8-NEXT: $vgpr1 = COPY [[COPY9]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 ; - ; GFX12-LABEL: name: raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub1 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY8]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY9]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; GFX1200-LABEL: name: raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub1 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY8]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY9]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub1 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY8]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY9]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to <2 x float> ret <2 x float> %cast @@ -142,22 +164,39 @@ define amdgpu_ps void @raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgp ; GFX8-NEXT: BUFFER_ATOMIC_ADD_X2_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -217,58 +256,111 @@ define amdgpu_ps float @raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_OFFEN_RTN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -328,57 +420,109 @@ define amdgpu_ps void @raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgp ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_OFFEN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_OFFEN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_OFFEN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -400,21 +544,40 @@ define amdgpu_ps float @raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_OFFEN_RTN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 4095 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll index 364ed62..9f1b7a6 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefix=GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck --check-prefix=GFX1250 %s ; Natural mapping @@ -24,24 +25,43 @@ define amdgpu_ps float @raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_ ; GFX8-NEXT: $vgpr0 = COPY [[COPY8]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY8]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY8]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY8]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -66,22 +86,39 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__ ; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -145,62 +182,119 @@ define amdgpu_ps float @raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_ ; GFX8-NEXT: $vgpr0 = COPY [[COPY15]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[COPY15]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY15]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY15]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -263,60 +357,115 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__ ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -341,24 +490,46 @@ define amdgpu_ps float @raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_ ; GFX8-NEXT: $vgpr0 = COPY [[COPY8]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY8]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY8]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY6]], [[COPY8]], 0, implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY9]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 4095 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float @@ -395,33 +566,61 @@ define amdgpu_ps double @raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr ; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1 - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec - ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec - ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1 + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec + ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1 + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec + ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to double ret double %cast @@ -450,26 +649,47 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__ ; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -542,71 +762,137 @@ define amdgpu_ps double @raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr ; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 - ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY17:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub0 - ; GFX12-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub1 - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY18]], implicit $exec - ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]] - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY19]], implicit $exec - ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub0 + ; GFX1200-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub1 + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY18]], implicit $exec + ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]] + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY19]], implicit $exec + ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]] + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub0 + ; GFX1250-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub1 + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY18]], implicit $exec + ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]] + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY19]], implicit $exec + ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to double ret double %cast @@ -673,64 +959,123 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__ ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 - ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]] + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -764,33 +1109,64 @@ define amdgpu_ps double @raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr ; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1 - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec - ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec - ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1 + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec + ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY8]], [[COPY10]], 0, implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1 + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec + ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 %voffset = add i32 %voffset.base, 4095 %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to double diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll index 46ca43b..7003bb1 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1250 %s ; FIXME: Test with SI when argument lowering not broken for f16 ; Natural mapping @@ -124,52 +125,99 @@ define amdgpu_ps float @raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffs ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret float %val } @@ -226,55 +274,105 @@ define amdgpu_ps float @raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffs ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY5]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret float %val } @@ -509,23 +607,41 @@ define amdgpu_ps <2 x float> @raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sg ; GFX8-NEXT: $vgpr1 = COPY [[COPY7]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 ; - ; GFX12-LABEL: name: raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; GFX1200-LABEL: name: raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 %val = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret <2 x float> %val } @@ -551,25 +667,45 @@ define amdgpu_ps <3 x float> @raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sg ; GFX8-NEXT: $vgpr2 = COPY [[COPY8]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 ; - ; GFX12-LABEL: name: raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub0 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub1 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub2 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]] - ; GFX12-NEXT: $vgpr2 = COPY [[COPY8]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 + ; GFX1200-LABEL: name: raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub0 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub1 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub2 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1200-NEXT: $vgpr2 = COPY [[COPY8]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub0 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub1 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub2 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1250-NEXT: $vgpr2 = COPY [[COPY8]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 %val = call <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret <3 x float> %val } @@ -597,27 +733,49 @@ define amdgpu_ps <4 x float> @raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sg ; GFX8-NEXT: $vgpr3 = COPY [[COPY9]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 ; - ; GFX12-LABEL: name: raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub0 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub1 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub2 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub3 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]] - ; GFX12-NEXT: $vgpr2 = COPY [[COPY8]] - ; GFX12-NEXT: $vgpr3 = COPY [[COPY9]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX1200-LABEL: name: raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub0 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub1 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub2 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub3 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1200-NEXT: $vgpr2 = COPY [[COPY8]] + ; GFX1200-NEXT: $vgpr3 = COPY [[COPY9]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub0 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub1 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub2 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub3 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1250-NEXT: $vgpr2 = COPY [[COPY8]] + ; GFX1250-NEXT: $vgpr3 = COPY [[COPY9]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 %val = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret <4 x float> %val } @@ -715,23 +873,41 @@ define amdgpu_ps <4 x half> @raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgp ; GFX8-NEXT: $vgpr1 = COPY [[COPY7]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 ; - ; GFX12-LABEL: name: raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; GFX1200-LABEL: name: raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 %val = call <4 x half> @llvm.amdgcn.raw.buffer.load.v4f16(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret <4 x half> %val } @@ -929,52 +1105,99 @@ define amdgpu_ps half @raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffse ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call half @llvm.amdgcn.raw.buffer.load.f16(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret half %val } @@ -1028,52 +1251,99 @@ define amdgpu_ps float @raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffse ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call i8 @llvm.amdgcn.raw.buffer.load.i8(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %zext = zext i8 %val to i32 %cast = bitcast i32 %zext to float @@ -1194,20 +1464,38 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 16 %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret float %val @@ -1229,20 +1517,38 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 4095 %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret float %val @@ -1267,20 +1573,38 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4096, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4096, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 4096 %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret float %val @@ -1522,54 +1846,103 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000 - ; GFX12-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[S_ADD_I32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000 + ; GFX1200-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[S_ADD_I32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000 + ; GFX1250-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[S_ADD_I32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %soffset = add i32 %soffset.base, 5000 %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret float %val @@ -1627,52 +2000,102 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec + ; GFX1250-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 5000 %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret float %val diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll index 3fbfb63..4784ac5 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll @@ -5,7 +5,8 @@ ; RUN: llc -global-isel -mcpu=gfx900 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910 ; RUN: llc -global-isel -mcpu=gfx1010 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910 ; RUN: llc -global-isel -mcpu=gfx1100 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX11 -; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX12 +; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1200 +; RUN: llc -global-isel -mcpu=gfx1250 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1250 define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) { ; GFX67-LABEL: name: raw_buffer_load_i8_tfe @@ -110,27 +111,49 @@ define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspa ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_i8_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_i8_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_i8_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { i8, i32 } @llvm.amdgcn.raw.buffer.load.sl_i8i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { i8, i32 } %res, 0 store i8 %data, ptr addrspace(1) %data_addr @@ -242,27 +265,49 @@ define amdgpu_ps void @raw_buffer_load_i16_tfe(<4 x i32> inreg %rsrc, ptr addrsp ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_i16_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_i16_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_i16_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { i16, i32 } @llvm.amdgcn.raw.buffer.load.sl_i16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { i16, i32 } %res, 0 store i16 %data, ptr addrspace(1) %data_addr @@ -374,27 +419,49 @@ define amdgpu_ps void @raw_buffer_load_f16_tfe(<4 x i32> inreg %rsrc, ptr addrsp ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_f16_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_f16_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f16_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { half, i32 } @llvm.amdgcn.raw.buffer.load.sl_f16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { half, i32 } %res, 0 store half %data, ptr addrspace(1) %data_addr @@ -506,27 +573,49 @@ define amdgpu_ps void @raw_buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrsp ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { i32, i32 } @llvm.amdgcn.raw.buffer.load.sl_i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { i32, i32 } %res, 0 store i32 %data, ptr addrspace(1) %data_addr @@ -646,29 +735,53 @@ define amdgpu_ps void @raw_buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v2i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v2i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v2i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <2 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { <2 x i32>, i32 } %res, 0 store <2 x i32> %data, ptr addrspace(1) %data_addr @@ -788,29 +901,53 @@ define amdgpu_ps void @raw_buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v2f32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v2f32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v2f32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <2 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { <2 x float>, i32 } %res, 0 store <2 x float> %data, ptr addrspace(1) %data_addr @@ -977,30 +1114,55 @@ define amdgpu_ps void @raw_buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v3i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v3i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v3i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <3 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { <3 x i32>, i32 } %res, 0 store <3 x i32> %data, ptr addrspace(1) %data_addr @@ -1167,30 +1329,55 @@ define amdgpu_ps void @raw_buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v3f32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v3f32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v3f32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <3 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { <3 x float>, i32 } %res, 0 store <3 x float> %data, ptr addrspace(1) %data_addr @@ -1318,31 +1505,57 @@ define amdgpu_ps void @raw_buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v4i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v4i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v4i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <4 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { <4 x i32>, i32 } %res, 0 store <4 x i32> %data, ptr addrspace(1) %data_addr @@ -1470,31 +1683,57 @@ define amdgpu_ps void @raw_buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v4f32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v4f32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v4f32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <4 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { <4 x float>, i32 } %res, 0 store <4 x float> %data, ptr addrspace(1) %data_addr diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll index 63ca7be..c365d57 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1250 %s ; FIXME: Test with SI when argument lowering not broken for f16 ; Natural mapping @@ -126,52 +127,99 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -309,55 +357,105 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -618,22 +716,39 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORDX2_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.v2f32(<2 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -657,23 +772,41 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORDX3_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v3f32 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v3f32 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v3f32 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.v3f32(<3 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -698,24 +831,43 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORDX4_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f32 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f32 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f32 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -876,22 +1028,39 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORDX2_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.v4f16(<4 x half> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -946,54 +1115,103 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE2]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE2]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE2]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.v4f16(<4 x half> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -1080,20 +1298,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_16 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_16 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_16 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %voffset.add = add i32 %voffset, 16 call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -1115,20 +1351,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %voffset.add = add i32 %voffset, 4095 call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -1153,20 +1407,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[V_ADD_CO_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4096 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4096 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4096 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %voffset.add = add i32 %voffset, 4096 call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -1256,20 +1528,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_16 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_16 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_16 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %voffset.add = add i32 %voffset, 16 call void @llvm.amdgcn.raw.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -1291,20 +1581,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %voffset.add = add i32 %voffset, 4095 call void @llvm.amdgcn.raw.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -1329,20 +1637,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[V_ADD_CO_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4096 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4096 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4096 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %voffset.add = add i32 %voffset, 4096 call void @llvm.amdgcn.raw.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -1400,52 +1726,102 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_offset_add_5000 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_offset_add_5000 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_offset_add_5000 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 %voffset.add = add i32 %voffset, 5000 call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -1501,51 +1877,97 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr_soffset_offset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr_soffset_offset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr_soffset_offset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 5000, i32 %soffset, i32 0) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll index 75d6c59..484639a 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s ; Natural mapping define amdgpu_ps float @struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) { @@ -22,23 +23,41 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_ ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -63,23 +82,41 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc_ ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -109,28 +146,51 @@ define amdgpu_ps <2 x float> @struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc_ ; GFX8-NEXT: $vgpr1 = COPY [[COPY10]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 ; - ; GFX12-LABEL: name: struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub1 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY9]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY10]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; GFX1200-LABEL: name: struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub1 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY9]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY10]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub1 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY9]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY10]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 %ret = call i64 @llvm.amdgcn.struct.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to <2 x float> ret <2 x float> %cast @@ -156,24 +216,43 @@ define amdgpu_ps void @struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__ ; GFX8-NEXT: BUFFER_ATOMIC_ADD_X2_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i64 @llvm.amdgcn.struct.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -236,61 +315,117 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_ ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -353,60 +488,115 @@ define amdgpu_ps void @struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__ ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -431,23 +621,41 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_ ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 3, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 3, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 3, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) %cast = bitcast i32 %ret to float ret float %cast diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll index c9d1227..7dab257 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s ; Natural mapping define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, i32 %cmp, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) { @@ -25,26 +26,47 @@ define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sg ; GFX8-NEXT: $vgpr0 = COPY [[COPY9]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY9]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY9]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY9]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -71,24 +93,43 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cm ; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -155,65 +196,125 @@ define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vg ; GFX8-NEXT: $vgpr0 = COPY [[COPY17]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[COPY17]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY17]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY17]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -279,63 +380,121 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cm ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -362,26 +521,50 @@ define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sg ; GFX8-NEXT: $vgpr0 = COPY [[COPY9]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY9]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY9]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY7]], [[COPY9]], 0, implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[V_ADD_U32_e64_]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY10]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 4095 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float @@ -420,35 +603,65 @@ define amdgpu_ps double @struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__s ; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1 - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec - ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec - ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1 + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec + ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1 + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec + ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 %ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to double ret double %cast @@ -479,28 +692,51 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cm ; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -576,74 +812,143 @@ define amdgpu_ps double @struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__v ; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 - ; GFX12-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 - ; GFX12-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 - ; GFX12-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY19:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub0 - ; GFX12-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub1 - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY20]], implicit $exec - ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]] - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY21]], implicit $exec - ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1200-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY19:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub0 + ; GFX1200-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub1 + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY20]], implicit $exec + ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]] + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY21]], implicit $exec + ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]] + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1250-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY19:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub0 + ; GFX1250-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub1 + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY20]], implicit $exec + ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]] + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY21]], implicit $exec + ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 %ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to double ret double %cast @@ -713,67 +1018,129 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cm ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 - ; GFX12-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 - ; GFX12-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 - ; GFX12-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1200-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]] + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1250-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -809,35 +1176,68 @@ define amdgpu_ps double @struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__s ; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1 - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec - ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec - ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1 + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec + ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY9]], [[COPY11]], 0, implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[V_ADD_U32_e64_]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY12]].sub0 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY12]].sub1 + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec + ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY14]], implicit $exec + ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 %voffset = add i32 %voffset.base, 4095 %ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to double diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll index 9b5e46b3..dbef90f 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s ; Natural mapping define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) { @@ -21,22 +22,39 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret float %val } @@ -63,25 +81,45 @@ define amdgpu_ps <2 x float> @struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__ ; GFX8-NEXT: $vgpr1 = COPY [[COPY8]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 ; - ; GFX12-LABEL: name: struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; GFX1200-LABEL: name: struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; + ; GFX1250-LABEL: name: struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 %val = call <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret <2 x float> %val } @@ -110,27 +148,49 @@ define amdgpu_ps <3 x float> @struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__ ; GFX8-NEXT: $vgpr2 = COPY [[COPY9]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 ; - ; GFX12-LABEL: name: struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub2 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]] - ; GFX12-NEXT: $vgpr2 = COPY [[COPY9]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 + ; GFX1200-LABEL: name: struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub2 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1200-NEXT: $vgpr2 = COPY [[COPY9]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 + ; + ; GFX1250-LABEL: name: struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub2 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1250-NEXT: $vgpr2 = COPY [[COPY9]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 %val = call <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret <3 x float> %val } @@ -161,29 +221,53 @@ define amdgpu_ps <4 x float> @struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__ ; GFX8-NEXT: $vgpr3 = COPY [[COPY10]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 ; - ; GFX12-LABEL: name: struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub2 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub3 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]] - ; GFX12-NEXT: $vgpr2 = COPY [[COPY9]] - ; GFX12-NEXT: $vgpr3 = COPY [[COPY10]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX1200-LABEL: name: struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub2 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub3 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1200-NEXT: $vgpr2 = COPY [[COPY9]] + ; GFX1200-NEXT: $vgpr3 = COPY [[COPY10]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; + ; GFX1250-LABEL: name: struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub2 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub3 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1250-NEXT: $vgpr2 = COPY [[COPY9]] + ; GFX1250-NEXT: $vgpr3 = COPY [[COPY10]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 %val = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret <4 x float> %val } @@ -208,23 +292,41 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_vindex0 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY4]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_vindex0 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY4]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_vindex0 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY4]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 0, i32 %voffset, i32 %soffset, i32 0) ret float %val } @@ -248,22 +350,42 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_voffset_add4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[V_ADD_U32_e64_]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 4095 %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret float %val @@ -287,22 +409,39 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_soffset_64 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 64 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_soffset_64 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 64 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_soffset_64 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 64 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 64, i32 0) ret float %val } @@ -363,59 +502,113 @@ define amdgpu_ps float @struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY4]] - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY4]] + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY4]] + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret float %val } @@ -438,22 +631,39 @@ define amdgpu_ps float @struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call i8 @llvm.amdgcn.struct.buffer.load.i8(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %ext = zext i8 %val to i32 %cast = bitcast i32 %ext to float @@ -478,22 +688,39 @@ define amdgpu_ps float @struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call i8 @llvm.amdgcn.struct.buffer.load.i8(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %ext = sext i8 %val to i32 %cast = bitcast i32 %ext to float @@ -519,23 +746,41 @@ define amdgpu_ps float @struct_buffer_load_i8_sext_wrong_width(<4 x i32> inreg % ; GFX8-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_i8_sext_wrong_width - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) - ; GFX12-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]], 0, 4, implicit $exec - ; GFX12-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_i8_sext_wrong_width + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1200-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]], 0, 4, implicit $exec + ; GFX1200-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_i8_sext_wrong_width + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1250-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]], 0, 4, implicit $exec + ; GFX1250-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call i8 @llvm.amdgcn.struct.buffer.load.i8(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %trunc = trunc i8 %val to i4 %ext = sext i4 %trunc to i32 @@ -561,22 +806,39 @@ define amdgpu_ps float @struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgp ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call i16 @llvm.amdgcn.struct.buffer.load.i16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %ext = zext i16 %val to i32 %cast = bitcast i32 %ext to float @@ -601,22 +863,39 @@ define amdgpu_ps float @struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgp ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call i16 @llvm.amdgcn.struct.buffer.load.i16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %ext = sext i16 %val to i32 %cast = bitcast i32 %ext to float @@ -642,23 +921,41 @@ define amdgpu_ps float @struct_buffer_load_i16_sext_wrong_width(<4 x i32> inreg ; GFX8-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_i16_sext_wrong_width - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]], 0, 8, implicit $exec - ; GFX12-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_i16_sext_wrong_width + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]], 0, 8, implicit $exec + ; GFX1200-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_i16_sext_wrong_width + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]], 0, 8, implicit $exec + ; GFX1250-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call i16 @llvm.amdgcn.struct.buffer.load.i16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %trunc = trunc i16 %val to i8 %ext = sext i8 %trunc to i32 @@ -685,22 +982,39 @@ define amdgpu_ps half @struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voff ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call half @llvm.amdgcn.struct.buffer.load.f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret half %val } @@ -724,22 +1038,39 @@ define amdgpu_ps <2 x half> @struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__v ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call <2 x half> @llvm.amdgcn.struct.buffer.load.v2f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret <2 x half> %val } @@ -772,25 +1103,45 @@ define amdgpu_ps <4 x half> @struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__v ; GFX8-NEXT: $vgpr1 = COPY [[COPY8]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 ; - ; GFX12-LABEL: name: struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; GFX1200-LABEL: name: struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; + ; GFX1250-LABEL: name: struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 %val = call <4 x half> @llvm.amdgcn.struct.buffer.load.v4f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret <4 x half> %val } @@ -814,22 +1165,39 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 1) ret float %val } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll index 674fe1c..39cce20 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll @@ -5,7 +5,8 @@ ; RUN: llc -global-isel -mcpu=gfx900 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910 ; RUN: llc -global-isel -mcpu=gfx1010 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910 ; RUN: llc -global-isel -mcpu=gfx1100 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX11 -; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX12 +; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1200 +; RUN: llc -global-isel -mcpu=gfx1250 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1250 define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) { ; GFX67-LABEL: name: raw_buffer_load_i8_tfe @@ -114,29 +115,53 @@ define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspa ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_i8_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_i8_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_i8_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { i8, i32 } @llvm.amdgcn.struct.buffer.load.sl_i8i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { i8, i32 } %res, 0 store i8 %data, ptr addrspace(1) %data_addr @@ -252,29 +277,53 @@ define amdgpu_ps void @raw_buffer_load_i16_tfe(<4 x i32> inreg %rsrc, ptr addrsp ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_i16_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_i16_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_i16_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { i16, i32 } @llvm.amdgcn.struct.buffer.load.sl_i16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { i16, i32 } %res, 0 store i16 %data, ptr addrspace(1) %data_addr @@ -390,29 +439,53 @@ define amdgpu_ps void @raw_buffer_load_f16_tfe(<4 x i32> inreg %rsrc, ptr addrsp ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_f16_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_f16_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f16_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { half, i32 } @llvm.amdgcn.struct.buffer.load.sl_f16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { half, i32 } %res, 0 store half %data, ptr addrspace(1) %data_addr @@ -528,29 +601,53 @@ define amdgpu_ps void @raw_buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrsp ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { i32, i32 } @llvm.amdgcn.struct.buffer.load.sl_i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { i32, i32 } %res, 0 store i32 %data, ptr addrspace(1) %data_addr @@ -674,31 +771,57 @@ define amdgpu_ps void @raw_buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v2i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v2i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v2i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <2 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { <2 x i32>, i32 } %res, 0 store <2 x i32> %data, ptr addrspace(1) %data_addr @@ -822,31 +945,57 @@ define amdgpu_ps void @raw_buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v2f32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v2f32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v2f32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <2 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { <2 x float>, i32 } %res, 0 store <2 x float> %data, ptr addrspace(1) %data_addr @@ -1018,32 +1167,59 @@ define amdgpu_ps void @raw_buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v3i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v3i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v3i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <3 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { <3 x i32>, i32 } %res, 0 store <3 x i32> %data, ptr addrspace(1) %data_addr @@ -1215,32 +1391,59 @@ define amdgpu_ps void @raw_buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v3f32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v3f32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v3f32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <3 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { <3 x float>, i32 } %res, 0 store <3 x float> %data, ptr addrspace(1) %data_addr @@ -1372,33 +1575,61 @@ define amdgpu_ps void @raw_buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v4i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v4i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v4i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <4 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { <4 x i32>, i32 } %res, 0 store <4 x i32> %data, ptr addrspace(1) %data_addr @@ -1530,33 +1761,61 @@ define amdgpu_ps void @raw_buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v4f32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v4f32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v4f32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <4 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { <4 x float>, i32 } %res, 0 store <4 x float> %data, ptr addrspace(1) %data_addr diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll index 8183d85..c9771b5 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx810 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s ; Natural mapping define amdgpu_ps void @struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) { @@ -21,22 +22,39 @@ define amdgpu_ps void @struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex_ ; GFX8-NEXT: BUFFER_STORE_DWORD_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -61,24 +79,43 @@ define amdgpu_ps void @struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vinde ; GFX8-NEXT: BUFFER_STORE_DWORDX2_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.v2f32(<2 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -104,25 +141,45 @@ define amdgpu_ps void @struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vinde ; GFX8-NEXT: BUFFER_STORE_DWORDX3_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY6]], %subreg.sub3 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY6]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY6]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.v3f32(<3 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -149,26 +206,47 @@ define amdgpu_ps void @struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vinde ; GFX8-NEXT: BUFFER_STORE_DWORDX4_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -233,64 +311,123 @@ define amdgpu_ps void @struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vinde ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vindex__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_128 = COPY [[REG_SEQUENCE]] - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 - ; GFX12-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY13]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[COPY11]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vindex__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_128 = COPY [[REG_SEQUENCE]] + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY13]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[COPY11]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vindex__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_128_align2 = COPY [[REG_SEQUENCE]] + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY13]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[COPY11]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -313,22 +450,39 @@ define amdgpu_ps void @struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__ ; GFX8-NEXT: BUFFER_STORE_BYTE_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_BYTE_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_BYTE_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_BYTE_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %val.trunc = trunc i32 %val to i8 call void @llvm.amdgcn.struct.buffer.store.i8(i8 %val.trunc, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void @@ -352,22 +506,39 @@ define amdgpu_ps void @struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex_ ; GFX8-NEXT: BUFFER_STORE_SHORT_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_SHORT_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_SHORT_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_SHORT_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %val.trunc = trunc i32 %val to i16 call void @llvm.amdgcn.struct.buffer.store.i16(i16 %val.trunc, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void @@ -391,22 +562,39 @@ define amdgpu_ps void @struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex_ ; GFX8-NEXT: BUFFER_STORE_DWORD_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 1) ret void } @@ -429,22 +617,39 @@ define amdgpu_ps void @struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vinde ; GFX8-NEXT: BUFFER_STORE_DWORD_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -475,24 +680,43 @@ define amdgpu_ps void @struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vinde ; GFX8-NEXT: BUFFER_STORE_DWORDX2_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.v4f16(<4 x half> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll new file mode 100644 index 0000000..5fc9f4a --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll @@ -0,0 +1,1486 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN:llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefix=GFX1250 %s + +define float @global_system_atomic_fadd_f32(ptr addrspace(1) %ptr, float %val) { +; GFX1250-LABEL: global_system_atomic_fadd_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_add_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic + ret float %result +} + +define float @global_one_as_atomic_fadd_f32(ptr addrspace(1) %ptr, float %val) { +; GFX1250-LABEL: global_one_as_atomic_fadd_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_add_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic + ret float %result +} + +define double @global_system_atomic_fadd_f64(ptr addrspace(1) %ptr, double %val) { +; GFX1250-LABEL: global_system_atomic_fadd_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic + ret double %result +} + +define double @global_one_as_atomic_fadd_f64(ptr addrspace(1) %ptr, double %val) { +; GFX1250-LABEL: global_one_as_atomic_fadd_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic + ret double %result +} + +define float @global_system_atomic_fmin_f32(ptr addrspace(1) %ptr, float %val) { +; GFX1250-LABEL: global_system_atomic_fmin_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr addrspace(1) %ptr, float %val monotonic + ret float %result +} + +define float @global_one_as_atomic_fmin_f32(ptr addrspace(1) %ptr, float %val) { +; GFX1250-LABEL: global_one_as_atomic_fmin_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic + ret float %result +} + +define double @global_system_atomic_fmin_f64(ptr addrspace(1) %ptr, double %val) { +; GFX1250-LABEL: global_system_atomic_fmin_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val monotonic + ret double %result +} + +define double @global_one_as_atomic_fmin_f64(ptr addrspace(1) %ptr, double %val) { +; GFX1250-LABEL: global_one_as_atomic_fmin_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic + ret double %result +} + +define float @global_system_atomic_fmax_f32(ptr addrspace(1) %ptr, float %val) { +; GFX1250-LABEL: global_system_atomic_fmax_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val monotonic + ret float %result +} + +define float @global_one_as_atomic_fmax_f32(ptr addrspace(1) %ptr, float %val) { +; GFX1250-LABEL: global_one_as_atomic_fmax_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic + ret float %result +} + +define double @global_system_atomic_fmax_f64(ptr addrspace(1) %ptr, double %val) { +; GFX1250-LABEL: global_system_atomic_fmax_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val monotonic + ret double %result +} + +define double @global_one_as_atomic_fmax_f64(ptr addrspace(1) %ptr, double %val) { +; GFX1250-LABEL: global_one_as_atomic_fmax_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic + ret double %result +} + +define i32 @global_one_as_atomic_min_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_one_as_atomic_min_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @global_system_atomic_min_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_system_atomic_min_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr addrspace(1) %ptr, i32 %val monotonic + ret i32 %result +} + +define i32 @global_one_as_atomic_max_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_one_as_atomic_max_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @global_system_atomic_max_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_system_atomic_max_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr addrspace(1) %ptr, i32 %val monotonic + ret i32 %result +} + +define i32 @global_one_as_atomic_umin_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_one_as_atomic_umin_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @global_system_atomic_umin_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_system_atomic_umin_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr addrspace(1) %ptr, i32 %val monotonic + ret i32 %result +} + +define i32 @global_one_as_atomic_umax_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_one_as_atomic_umax_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @global_system_atomic_umax_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_system_atomic_umax_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr addrspace(1) %ptr, i32 %val monotonic + ret i32 %result +} + +define i64 @global_one_as_atomic_min_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_one_as_atomic_min_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @global_system_atomic_min_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_system_atomic_min_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr addrspace(1) %ptr, i64 %val monotonic + ret i64 %result +} + +define i64 @global_one_as_atomic_max_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_one_as_atomic_max_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @global_system_atomic_max_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_system_atomic_max_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr addrspace(1) %ptr, i64 %val monotonic + ret i64 %result +} + +define i64 @global_one_as_atomic_umin_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_one_as_atomic_umin_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @global_system_atomic_umin_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_system_atomic_umin_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr addrspace(1) %ptr, i64 %val monotonic + ret i64 %result +} + +define i64 @global_one_as_atomic_umax_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_one_as_atomic_umax_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @global_system_atomic_umax_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_system_atomic_umax_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr addrspace(1) %ptr, i64 %val monotonic + ret i64 %result +} + +define i16 @global_one_as_atomic_min_i16(ptr addrspace(1) %ptr, i16 %val) { +; GFX1250-LABEL: global_one_as_atomic_min_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: global_load_b32 v5, v[0:1], off +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB28_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_min_i16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB28_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} + +define i16 @global_one_as_atomic_umin_i16(ptr addrspace(1) %ptr, i16 %val) { +; GFX1250-LABEL: global_one_as_atomic_umin_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: global_load_b32 v5, v[0:1], off +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB29_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_min_u16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB29_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} + +define i16 @global_one_as_atomic_max_i16(ptr addrspace(1) %ptr, i16 %val) { +; GFX1250-LABEL: global_one_as_atomic_max_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: global_load_b32 v5, v[0:1], off +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB30_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_max_i16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB30_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} + +define i16 @global_one_as_atomic_umax_i16(ptr addrspace(1) %ptr, i16 %val) { +; GFX1250-LABEL: global_one_as_atomic_umax_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: global_load_b32 v5, v[0:1], off +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB31_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_max_u16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB31_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} + +define float @flat_system_atomic_fadd_f32(ptr %ptr, float %val) { +; GFX1250-LABEL: flat_system_atomic_fadd_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_add_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr %ptr, float %val monotonic + ret float %result +} + +define float @flat_one_as_atomic_fadd_f32(ptr %ptr, float %val) { +; GFX1250-LABEL: flat_one_as_atomic_fadd_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_add_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr %ptr, float %val syncscope("one-as") monotonic + ret float %result +} + +define double @flat_system_atomic_fadd_f64(ptr %ptr, double %val) { +; GFX1250-LABEL: flat_system_atomic_fadd_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base +; GFX1250-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB34_6 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1 +; GFX1250-NEXT: s_cbranch_execz .LBB34_3 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global +; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB34_3: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1 +; GFX1250-NEXT: s_cbranch_execz .LBB34_5 +; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB34_5: ; %Flow1 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB34_6: ; %Flow2 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB34_8 +; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3] +; GFX1250-NEXT: .LBB34_8: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr %ptr, double %val monotonic + ret double %result +} + +define double @flat_one_as_atomic_fadd_f64(ptr %ptr, double %val) { +; GFX1250-LABEL: flat_one_as_atomic_fadd_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base +; GFX1250-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB35_6 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1 +; GFX1250-NEXT: s_cbranch_execz .LBB35_3 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global +; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB35_3: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1 +; GFX1250-NEXT: s_cbranch_execz .LBB35_5 +; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB35_5: ; %Flow1 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB35_6: ; %Flow2 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB35_8 +; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3] +; GFX1250-NEXT: .LBB35_8: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr %ptr, double %val syncscope("one-as") monotonic + ret double %result +} + +define float @flat_system_atomic_fmin_f32(ptr %ptr, float %val) { +; GFX1250-LABEL: flat_system_atomic_fmin_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_min_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr %ptr, float %val monotonic + ret float %result +} + +define float @flat_one_as_atomic_fmin_f32(ptr %ptr, float %val) { +; GFX1250-LABEL: flat_one_as_atomic_fmin_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_min_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr %ptr, float %val syncscope("one-as") monotonic + ret float %result +} + +define double @flat_system_atomic_fmin_f64(ptr %ptr, double %val) { +; GFX1250-LABEL: flat_system_atomic_fmin_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB38_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB38_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB38_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] +; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB38_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr %ptr, double %val monotonic + ret double %result +} + +define double @flat_one_as_atomic_fmin_f64(ptr %ptr, double %val) { +; GFX1250-LABEL: flat_one_as_atomic_fmin_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB39_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB39_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB39_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] +; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB39_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr %ptr, double %val syncscope("one-as") monotonic + ret double %result +} + +define float @flat_system_atomic_fmax_f32(ptr %ptr, float %val) { +; GFX1250-LABEL: flat_system_atomic_fmax_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_max_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr %ptr, float %val monotonic + ret float %result +} + +define float @flat_one_as_atomic_fmax_f32(ptr %ptr, float %val) { +; GFX1250-LABEL: flat_one_as_atomic_fmax_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_max_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr %ptr, float %val syncscope("one-as") monotonic + ret float %result +} + +define double @flat_system_atomic_fmax_f64(ptr %ptr, double %val) { +; GFX1250-LABEL: flat_system_atomic_fmax_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB42_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB42_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB42_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] +; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB42_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr %ptr, double %val monotonic + ret double %result +} + +define double @flat_one_as_atomic_fmax_f64(ptr %ptr, double %val) { +; GFX1250-LABEL: flat_one_as_atomic_fmax_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB43_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB43_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB43_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] +; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB43_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr %ptr, double %val syncscope("one-as") monotonic + ret double %result +} + +define i32 @flat_one_as_atomic_min_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_one_as_atomic_min_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_min_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @flat_system_atomic_min_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_system_atomic_min_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_min_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr %ptr, i32 %val monotonic + ret i32 %result +} + +define i32 @flat_one_as_atomic_max_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_one_as_atomic_max_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_max_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @flat_system_atomic_max_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_system_atomic_max_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_max_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr %ptr, i32 %val monotonic + ret i32 %result +} + +define i32 @flat_one_as_atomic_umin_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_one_as_atomic_umin_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_min_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @flat_system_atomic_umin_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_system_atomic_umin_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_min_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr %ptr, i32 %val monotonic + ret i32 %result +} + +define i32 @flat_one_as_atomic_umax_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_one_as_atomic_umax_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_max_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @flat_system_atomic_umax_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_system_atomic_umax_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_max_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr %ptr, i32 %val monotonic + ret i32 %result +} + +define i64 @flat_one_as_atomic_min_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_one_as_atomic_min_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB52_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB52_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB52_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_min_i64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB52_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @flat_system_atomic_min_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_system_atomic_min_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB53_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB53_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB53_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_min_i64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB53_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr %ptr, i64 %val monotonic + ret i64 %result +} + +define i64 @flat_one_as_atomic_max_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_one_as_atomic_max_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB54_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB54_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB54_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_i64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB54_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @flat_system_atomic_max_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_system_atomic_max_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB55_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB55_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB55_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_i64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB55_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr %ptr, i64 %val monotonic + ret i64 %result +} + +define i64 @flat_one_as_atomic_umin_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_one_as_atomic_umin_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB56_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB56_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB56_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_min_u64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB56_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @flat_system_atomic_umin_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_system_atomic_umin_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB57_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB57_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB57_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_min_u64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB57_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr %ptr, i64 %val monotonic + ret i64 %result +} + +define i64 @flat_one_as_atomic_umax_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_one_as_atomic_umax_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB58_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB58_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB58_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_u64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB58_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @flat_system_atomic_umax_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_system_atomic_umax_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB59_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB59_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB59_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_u64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB59_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr %ptr, i64 %val monotonic + ret i64 %result +} + +define i16 @flat_one_as_atomic_min_i16(ptr %ptr, i16 %val) { +; GFX1250-LABEL: flat_one_as_atomic_min_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: flat_load_b32 v5, v[0:1] +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB60_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_min_i16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB60_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} + +define i16 @flat_one_as_atomic_umin_i16(ptr %ptr, i16 %val) { +; GFX1250-LABEL: flat_one_as_atomic_umin_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: flat_load_b32 v5, v[0:1] +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB61_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_min_u16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB61_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} + +define i16 @flat_one_as_atomic_max_i16(ptr %ptr, i16 %val) { +; GFX1250-LABEL: flat_one_as_atomic_max_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: flat_load_b32 v5, v[0:1] +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB62_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_max_i16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB62_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} + +define i16 @flat_one_as_atomic_umax_i16(ptr %ptr, i16 %val) { +; GFX1250-LABEL: flat_one_as_atomic_umax_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: flat_load_b32 v5, v[0:1] +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB63_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_max_u16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB63_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} diff --git a/llvm/test/CodeGen/AMDGPU/bf16-math.ll b/llvm/test/CodeGen/AMDGPU/bf16-math.ll index 9979e83..30a7864 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16-math.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16-math.ll @@ -368,10 +368,7 @@ define amdgpu_ps float @test_clamp_v2bf16_s(<2 x bfloat> inreg %src) { define amdgpu_ps bfloat @test_clamp_bf16_folding(bfloat %src) { ; GCN-LABEL: test_clamp_bf16_folding: ; GCN: ; %bb.0: -; GCN-NEXT: v_exp_bf16_e32 v0, v0 -; GCN-NEXT: v_nop -; GCN-NEXT: s_delay_alu instid0(TRANS32_DEP_1) -; GCN-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp +; GCN-NEXT: v_exp_bf16_e64 v0, v0 clamp ; GCN-NEXT: ; return to shader part epilog %exp = call bfloat @llvm.exp2.bf16(bfloat %src) %max = call bfloat @llvm.maxnum.bf16(bfloat %exp, bfloat 0.0) @@ -382,9 +379,7 @@ define amdgpu_ps bfloat @test_clamp_bf16_folding(bfloat %src) { define amdgpu_ps float @test_clamp_v2bf16_folding(<2 x bfloat> %src0, <2 x bfloat> %src1) { ; GCN-LABEL: test_clamp_v2bf16_folding: ; GCN: ; %bb.0: -; GCN-NEXT: v_pk_mul_bf16 v0, v0, v1 -; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GCN-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp +; GCN-NEXT: v_pk_mul_bf16 v0, v0, v1 clamp ; GCN-NEXT: ; return to shader part epilog %mul = fmul <2 x bfloat> %src0, %src1 %max = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %mul, <2 x bfloat> <bfloat 0.0, bfloat 0.0>) @@ -396,9 +391,7 @@ define amdgpu_ps float @test_clamp_v2bf16_folding(<2 x bfloat> %src0, <2 x bfloa define amdgpu_ps void @v_test_mul_add_v2bf16_vvv(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) { ; GCN-LABEL: v_test_mul_add_v2bf16_vvv: ; GCN: ; %bb.0: -; GCN-NEXT: v_pk_mul_bf16 v2, v2, v3 -; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GCN-NEXT: v_pk_add_bf16 v2, v2, v4 +; GCN-NEXT: v_pk_fma_bf16 v2, v2, v3, v4 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm %mul = fmul contract <2 x bfloat> %a, %b @@ -410,9 +403,7 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vvv(ptr addrspace(1) %out, <2 x bfl define amdgpu_ps void @v_test_mul_add_v2bf16_vss(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> inreg %b, <2 x bfloat> inreg %c) { ; GCN-LABEL: v_test_mul_add_v2bf16_vss: ; GCN: ; %bb.0: -; GCN-NEXT: v_pk_mul_bf16 v2, v2, s0 -; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GCN-NEXT: v_pk_add_bf16 v2, v2, s1 +; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, s1 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm %mul = fmul contract <2 x bfloat> %a, %b @@ -424,9 +415,9 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vss(ptr addrspace(1) %out, <2 x bfl define amdgpu_ps void @v_test_mul_add_v2bf16_sss(ptr addrspace(1) %out, <2 x bfloat> inreg %a, <2 x bfloat> inreg %b, <2 x bfloat> inreg %c) { ; GCN-LABEL: v_test_mul_add_v2bf16_sss: ; GCN: ; %bb.0: -; GCN-NEXT: v_pk_mul_bf16 v2, s0, s1 +; GCN-NEXT: v_mov_b32_e32 v2, s2 ; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GCN-NEXT: v_pk_add_bf16 v2, v2, s2 +; GCN-NEXT: v_pk_fma_bf16 v2, s0, s1, v2 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm %mul = fmul contract <2 x bfloat> %a, %b @@ -438,9 +429,7 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_sss(ptr addrspace(1) %out, <2 x bfl define amdgpu_ps void @v_test_mul_add_v2bf16_vsc(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> inreg %b) { ; GCN-LABEL: v_test_mul_add_v2bf16_vsc: ; GCN: ; %bb.0: -; GCN-NEXT: v_pk_mul_bf16 v2, v2, s0 -; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GCN-NEXT: v_pk_add_bf16 v2, v2, 0.5 op_sel_hi:[1,0] +; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, 0.5 op_sel_hi:[1,1,0] ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm %mul = fmul contract <2 x bfloat> %a, %b @@ -452,9 +441,9 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vsc(ptr addrspace(1) %out, <2 x bfl define amdgpu_ps void @v_test_mul_add_v2bf16_vll(ptr addrspace(1) %out, <2 x bfloat> %a) { ; GCN-LABEL: v_test_mul_add_v2bf16_vll: ; GCN: ; %bb.0: -; GCN-NEXT: v_pk_mul_bf16 v2, 0x42c83f80, v2 -; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GCN-NEXT: v_pk_add_bf16 v2, 0x43484000, v2 +; GCN-NEXT: s_mov_b32 s0, 0x43484000 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_pk_fma_bf16 v2, 0x42c83f80, v2, s0 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm %mul = fmul contract <2 x bfloat> %a, <bfloat 1.0, bfloat 100.0> diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll index 52e697c..505ddc8 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16.ll @@ -24671,7 +24671,6 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) { ret <32 x bfloat> %op } - declare bfloat @llvm.maxnum.bf16(bfloat, bfloat) declare <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat>, <2 x bfloat>) declare <3 x bfloat> @llvm.maxnum.v3bf16(<3 x bfloat>, <3 x bfloat>) @@ -29673,7 +29672,6 @@ define { bfloat, i16 } @v_frexp_bf16_i16(bfloat %a) { ret { bfloat, i16 } %op } - declare bfloat @llvm.log.bf16(bfloat) declare bfloat @llvm.log2.bf16(bfloat) declare bfloat @llvm.log10.bf16(bfloat) @@ -47043,18 +47041,10 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) { ; GFX8-LABEL: v_fmuladd_bf16: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX8-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1 -; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc -; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2 -; GFX8-NEXT: v_add_f32_e32 v0, v0, v1 +; GFX8-NEXT: v_fma_f32 v0, v0, v1, v2 ; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1 ; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0 ; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1 @@ -47067,20 +47057,13 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) { ; GFX900-LABEL: v_fmuladd_bf16: ; GFX900: ; %bb.0: ; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX900-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX900-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX900-NEXT: v_mul_f32_e32 v0, v0, v1 +; GFX900-NEXT: v_fma_f32 v0, v0, v1, v2 ; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1 ; GFX900-NEXT: s_movk_i32 s4, 0x7fff ; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4 -; GFX900-NEXT: v_or_b32_e32 v3, 0x400000, v0 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc -; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX900-NEXT: v_lshlrev_b32_e32 v1, 16, v2 -; GFX900-NEXT: v_add_f32_e32 v0, v0, v1 -; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4 ; GFX900-NEXT: v_or_b32_e32 v2, 0x400000, v0 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 ; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc @@ -47090,35 +47073,25 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) { ; GFX950-LABEL: v_fmuladd_bf16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v2 -; GFX950-NEXT: v_add_f32_e32 v0, v0, v1 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX950-NEXT: v_fmac_f32_e32 v2, v0, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v2, s0 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_fmuladd_bf16: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v0 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc_lo -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2 -; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX10-NEXT: v_add_f32_e32 v0, v0, v1 -; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo +; GFX10-NEXT: v_fmac_f32_e32 v2, v0, v1 +; GFX10-NEXT: v_bfe_u32 v0, v2, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v2 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX10-NEXT: v_add3_u32 v0, v0, v2, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; @@ -47126,55 +47099,38 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) { ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, 0 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.h, v1.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.h, v2.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l ; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l ; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l -; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v1, v3 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.h, v2.l -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc_lo -; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v3, v1, v2 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v3, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v3 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v3, 0x7fff ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_add_f32_e32 v0, v0, v3 -; GFX11TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo ; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_fmuladd_bf16: ; GFX11FAKE16: ; %bb.0: ; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX11FAKE16-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v0 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v2, v0, v1 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v2 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 ; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff -; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v0, v1, v3 :: v_dual_lshlrev_b32 v1, 16, v2 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX11FAKE16-NEXT: v_add_f32_e32 v0, v0, v1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v0 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v2, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] %op = call bfloat @llvm.fmuladd.bf16(bfloat %a, bfloat %b, bfloat %c) @@ -47235,39 +47191,22 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl ; GFX8-LABEL: v_fmuladd_v2bf16: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v0 -; GFX8-NEXT: v_mul_f32_e32 v3, v4, v3 -; GFX8-NEXT: v_bfe_u32 v4, v3, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v3 -; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4 -; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 -; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v2 -; GFX8-NEXT: v_add_f32_e32 v3, v3, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2 +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v1 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v0 +; GFX8-NEXT: v_fma_f32 v3, v5, v4, v3 ; GFX8-NEXT: v_bfe_u32 v4, v3, 16, 1 -; GFX8-NEXT: s_movk_i32 s4, 0x7fff ; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v3 +; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX8-NEXT: v_add_u32_e32 v4, vcc, s4, v4 -; GFX8-NEXT: v_mul_f32_e32 v0, v0, v1 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4 +; GFX8-NEXT: v_fma_f32 v0, v0, v1, v2 ; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1 ; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc ; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, s4, v1 -; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc -; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v2 -; GFX8-NEXT: v_add_f32_e32 v0, v0, v1 -; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0 ; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1 ; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 @@ -47279,36 +47218,22 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl ; GFX900-LABEL: v_fmuladd_v2bf16: ; GFX900: ; %bb.0: ; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; GFX900-NEXT: v_lshlrev_b32_e32 v4, 16, v0 -; GFX900-NEXT: v_mul_f32_e32 v3, v4, v3 -; GFX900-NEXT: v_bfe_u32 v4, v3, 16, 1 -; GFX900-NEXT: s_movk_i32 s4, 0x7fff -; GFX900-NEXT: v_add3_u32 v4, v4, v3, s4 -; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v3 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 -; GFX900-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc -; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX900-NEXT: v_lshlrev_b32_e32 v4, 16, v2 -; GFX900-NEXT: v_add_f32_e32 v3, v3, v4 +; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v2 +; GFX900-NEXT: v_lshlrev_b32_e32 v4, 16, v1 +; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v0 +; GFX900-NEXT: v_fma_f32 v3, v5, v4, v3 +; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX900-NEXT: v_bfe_u32 v4, v3, 16, 1 -; GFX900-NEXT: v_mul_f32_e32 v0, v0, v1 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: v_fma_f32 v0, v0, v1, v2 ; GFX900-NEXT: v_add3_u32 v4, v4, v3, s4 ; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v3 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1 ; GFX900-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc ; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4 -; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v0 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc -; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v2 -; GFX900-NEXT: v_add_f32_e32 v0, v0, v1 -; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4 ; GFX900-NEXT: v_or_b32_e32 v2, 0x400000, v0 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 ; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc @@ -47319,150 +47244,94 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl ; GFX950-LABEL: v_fmuladd_v2bf16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v0 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v1 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v0 +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_mul_f32_e32 v3, v4, v3 -; GFX950-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v3, s0 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v2 -; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v2 -; GFX950-NEXT: v_add_f32_e32 v3, v3, v4 -; GFX950-NEXT: v_add_f32_e32 v0, v0, v1 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v3 +; GFX950-NEXT: v_fmac_f32_e32 v3, v5, v4 +; GFX950-NEXT: v_fmac_f32_e32 v2, v0, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v2, v3 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_fmuladd_v2bf16: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v0 -; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX10-NEXT: v_mul_f32_e32 v3, v4, v3 -; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX10-NEXT: v_bfe_u32 v1, v3, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v3 -; GFX10-NEXT: v_bfe_u32 v4, v0, 16, 1 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v0 -; GFX10-NEXT: v_add3_u32 v1, v1, v3, 0x7fff ; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX10-NEXT: v_add3_u32 v4, v4, v0, 0x7fff +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v0 ; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 ; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo -; GFX10-NEXT: v_add_f32_e32 v1, v1, v3 ; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1 -; GFX10-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1 -; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v0 -; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo -; GFX10-NEXT: v_perm_b32 v0, v0, v1, 0x7060302 +; GFX10-NEXT: v_fmac_f32_e32 v3, v5, v4 +; GFX10-NEXT: v_fmac_f32_e32 v2, v0, v1 +; GFX10-NEXT: v_bfe_u32 v0, v3, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v3 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_bfe_u32 v1, v2, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v2 +; GFX10-NEXT: v_add3_u32 v0, v0, v3, 0x7fff +; GFX10-NEXT: v_add3_u32 v1, v1, v2, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo +; GFX10-NEXT: v_perm_b32 v0, v1, v0, 0x7060302 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11TRUE16-LABEL: v_fmuladd_v2bf16: ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v0 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v1 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0 ; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_dual_mul_f32 v3, v4, v3 :: v_dual_and_b32 v0, 0xffff0000, v0 -; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_bfe_u32 v1, v3, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v3 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX11TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_add_f32 v1, v1, v3 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX11TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX11TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX11TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0 -; GFX11TRUE16-NEXT: v_add3_u32 v3, v3, v0, 0x7fff -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v1, v0 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v2, v0, v1 :: v_dual_fmac_f32 v3, v5, v4 +; GFX11TRUE16-NEXT: v_bfe_u32 v1, v2, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v3, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 +; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v3, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v1 ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_fmuladd_v2bf16: ; GFX11FAKE16: ; %bb.0: ; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v0 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v1 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0 ; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_dual_mul_f32 v3, v4, v3 :: v_dual_and_b32 v0, 0xffff0000, v0 -; GFX11FAKE16-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_bfe_u32 v1, v3, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v3 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff +; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX11FAKE16-NEXT: v_bfe_u32 v4, v0, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11FAKE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_add_f32 v1, v1, v3 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX11FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11FAKE16-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX11FAKE16-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX11FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v0 -; GFX11FAKE16-NEXT: v_add3_u32 v3, v3, v0, 0x7fff -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v2, v0, v1 :: v_dual_fmac_f32 v3, v5, v4 +; GFX11FAKE16-NEXT: v_bfe_u32 v1, v2, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v3, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 +; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v3, 0x7fff +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo ; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v1, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x7060302 ; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] %op = call <2 x bfloat> @llvm.fmuladd.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) ret <2 x bfloat> %op @@ -47542,57 +47411,33 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl ; GFX8-LABEL: v_fmuladd_v3bf16: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX8-NEXT: v_mul_f32_e32 v1, v1, v3 +; GFX8-NEXT: v_fma_f32 v1, v1, v3, v5 ; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1 ; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1 ; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3 -; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 -; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc -; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; GFX8-NEXT: v_add_f32_e32 v1, v1, v3 -; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1 -; GFX8-NEXT: s_movk_i32 s4, 0x7fff -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3 ; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc -; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v0 -; GFX8-NEXT: v_mul_f32_e32 v3, v5, v3 -; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5 -; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 -; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v4 -; GFX8-NEXT: v_add_f32_e32 v3, v3, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v2 +; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v0 +; GFX8-NEXT: v_fma_f32 v3, v6, v5, v3 ; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff ; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5 -; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2 +; GFX8-NEXT: v_fma_f32 v0, v0, v2, v4 ; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1 ; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0 -; GFX8-NEXT: v_add_u32_e32 v2, vcc, s4, v2 -; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc -; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v4 -; GFX8-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2 ; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 @@ -47605,52 +47450,31 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl ; GFX900-LABEL: v_fmuladd_v3bf16: ; GFX900: ; %bb.0: ; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX900-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX900-NEXT: v_mul_f32_e32 v1, v1, v3 +; GFX900-NEXT: v_fma_f32 v1, v1, v3, v5 ; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1 ; GFX900-NEXT: s_movk_i32 s4, 0x7fff ; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4 -; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 -; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc -; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; GFX900-NEXT: v_add_f32_e32 v1, v1, v3 -; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1 -; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4 ; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v1 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc -; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v0 -; GFX900-NEXT: v_mul_f32_e32 v3, v5, v3 -; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1 -; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4 -; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v3 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 -; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc -; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v4 -; GFX900-NEXT: v_add_f32_e32 v3, v3, v5 +; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v2 +; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v0 +; GFX900-NEXT: v_fma_f32 v3, v6, v5, v3 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1 -; GFX900-NEXT: v_mul_f32_e32 v0, v0, v2 +; GFX900-NEXT: v_fma_f32 v0, v0, v2, v4 ; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4 ; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v3 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1 ; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc ; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4 -; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v0 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc -; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v4 -; GFX900-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1 -; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4 ; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v0 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 ; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc @@ -47662,211 +47486,132 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl ; GFX950-LABEL: v_fmuladd_v3bf16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX950-NEXT: v_mul_f32_e32 v1, v1, v3 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 -; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; GFX950-NEXT: v_add_f32_e32 v1, v1, v3 -; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 -; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v0 +; GFX950-NEXT: v_fmac_f32_e32 v5, v1, v3 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v5, s0 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v4 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v0 +; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_mul_f32_e32 v3, v5, v3 -; GFX950-NEXT: v_mul_f32_e32 v0, v0, v2 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v3, s0 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 -; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; GFX950-NEXT: v_add_f32_e32 v3, v3, v5 -; GFX950-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v3 +; GFX950-NEXT: v_fmac_f32_e32 v3, v6, v5 +; GFX950-NEXT: v_fmac_f32_e32 v4, v0, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v4, v3 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_fmuladd_v3bf16: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v4 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v0 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v2 -; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v0 -; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX10-NEXT: v_mul_f32_e32 v1, v1, v3 -; GFX10-NEXT: v_mul_f32_e32 v3, v7, v6 -; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2 -; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX10-NEXT: v_bfe_u32 v7, v3, 16, 1 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1 -; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v3 -; GFX10-NEXT: v_add3_u32 v7, v7, v3, 0x7fff -; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v0 -; GFX10-NEXT: v_add3_u32 v8, v8, v0, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v6, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v4 ; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX10-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX10-NEXT: v_add_f32_e32 v1, v1, v3 ; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v10, vcc_lo -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX10-NEXT: v_add_f32_e32 v2, v2, v5 ; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX10-NEXT: v_add_f32_e32 v0, v0, v4 -; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v2 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX10-NEXT: v_bfe_u32 v4, v1, 16, 1 -; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX10-NEXT: v_bfe_u32 v5, v0, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0 -; GFX10-NEXT: v_add3_u32 v4, v4, v1, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v7, vcc_lo -; GFX10-NEXT: v_add3_u32 v5, v5, v0, 0x7fff -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX10-NEXT: v_perm_b32 v0, v0, v2, 0x7060302 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo -; GFX10-NEXT: v_alignbit_b32 v1, s4, v1, 16 +; GFX10-NEXT: v_fmac_f32_e32 v6, v8, v7 +; GFX10-NEXT: v_fmac_f32_e32 v5, v1, v3 +; GFX10-NEXT: v_fmac_f32_e32 v4, v0, v2 +; GFX10-NEXT: v_bfe_u32 v1, v6, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v6 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX10-NEXT: v_bfe_u32 v0, v5, 16, 1 +; GFX10-NEXT: v_bfe_u32 v2, v4, 16, 1 +; GFX10-NEXT: v_add3_u32 v1, v1, v6, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v5 +; GFX10-NEXT: v_add3_u32 v0, v0, v5, 0x7fff +; GFX10-NEXT: v_add3_u32 v2, v2, v4, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX10-NEXT: v_cndmask_b32_e32 v3, v0, v8, vcc_lo +; GFX10-NEXT: v_perm_b32 v0, v2, v1, 0x7060302 +; GFX10-NEXT: v_alignbit_b32 v1, s4, v3, 16 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11TRUE16-LABEL: v_fmuladd_v3bf16: ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2 -; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v0 -; GFX11TRUE16-NEXT: v_dual_mul_f32 v1, v1, v3 :: v_dual_and_b32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v4 +; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v0, v2 -; GFX11TRUE16-NEXT: v_mul_f32_e32 v6, v7, v6 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1 -; GFX11TRUE16-NEXT: v_bfe_u32 v7, v0, 16, 1 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v4, v0, v2 :: v_dual_fmac_f32 v5, v1, v3 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v6, v8, v7 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_bfe_u32 v1, v4, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v3, v5, 16, 1 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v6, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v6 ; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 -; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v0 -; GFX11TRUE16-NEXT: v_add3_u32 v7, v7, v0, 0x7fff -; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff -; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 -; GFX11TRUE16-NEXT: v_add3_u32 v8, v9, v1, 0x7fff -; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v7, v10, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_add_f32_e32 v2, v2, v3 -; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v1, v8, v6 :: v_dual_and_b32 v0, 0xffff0000, v0 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX11TRUE16-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_and_b32 v1, 0xffff0000, v1 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX11TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_add_f32_e32 v1, v1, v5 -; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 -; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0 -; GFX11TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_add3_u32 v5, v6, v1, 0x7fff -; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v7, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v4, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v6, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v3, v5, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v2, v0 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc_lo -; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_fmuladd_v3bf16: ; GFX11FAKE16: ; %bb.0: ; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v0 -; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_dual_mul_f32 v0, v0, v2 :: v_dual_lshlrev_b32 v3, 16, v3 -; GFX11FAKE16-NEXT: v_bfe_u32 v8, v0, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v0 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_add3_u32 v8, v8, v0, 0x7fff -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX11FAKE16-NEXT: v_mul_f32_e32 v1, v1, v3 -; GFX11FAKE16-NEXT: v_mul_f32_e32 v3, v7, v6 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX11FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3 -; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v2, v6, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v4 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v4 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v2 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v0 ; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v8, v10, vcc_lo -; GFX11FAKE16-NEXT: v_add_f32_e32 v2, v2, v5 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) ; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v2 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v1, v1, v3 -; GFX11FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11FAKE16-NEXT: v_bfe_u32 v5, v0, 16, 1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1 -; GFX11FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0 -; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX11FAKE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff -; GFX11FAKE16-NEXT: v_add3_u32 v4, v4, v1, 0x7fff -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v7, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v2, 0x7060302 -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_alignbit_b32 v1, s0, v1, 16 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v5, 16, v5 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v4, v0, v2 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v4, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v5, v1, v3 +; GFX11FAKE16-NEXT: v_bfe_u32 v1, v6, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v5, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v6, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v5, 0x7fff +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v0, v8, vcc_lo +; GFX11FAKE16-NEXT: v_perm_b32 v0, v2, v1, 0x7060302 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_alignbit_b32 v1, s0, v3, 16 ; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] %op = call <3 x bfloat> @llvm.fmuladd.v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat> %c) ret <3 x bfloat> %op @@ -47966,75 +47711,43 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl ; GFX8-LABEL: v_fmuladd_v4bf16: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v3 -; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v1 -; GFX8-NEXT: v_mul_f32_e32 v6, v7, v6 -; GFX8-NEXT: v_bfe_u32 v7, v6, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v6 -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x7fff, v7 -; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v6 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 -; GFX8-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc -; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v5 -; GFX8-NEXT: v_add_f32_e32 v6, v6, v7 +; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v3 +; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v1 +; GFX8-NEXT: v_fma_f32 v6, v8, v7, v6 ; GFX8-NEXT: v_bfe_u32 v7, v6, 16, 1 -; GFX8-NEXT: s_movk_i32 s4, 0x7fff ; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v6 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 ; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX8-NEXT: v_add_u32_e32 v7, vcc, s4, v7 -; GFX8-NEXT: v_mul_f32_e32 v1, v1, v3 +; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x7fff, v7 +; GFX8-NEXT: v_fma_f32 v1, v1, v3, v5 ; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v6 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 ; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff ; GFX8-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc ; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1 ; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v1 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 -; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v7, vcc -; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v5 -; GFX8-NEXT: v_add_f32_e32 v1, v1, v3 -; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3 ; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc -; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v0 -; GFX8-NEXT: v_mul_f32_e32 v3, v5, v3 -; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 -; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v4 -; GFX8-NEXT: v_add_f32_e32 v3, v3, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v2 +; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v0 +; GFX8-NEXT: v_fma_f32 v3, v7, v5, v3 ; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1 ; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5 -; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2 +; GFX8-NEXT: v_fma_f32 v0, v0, v2, v4 ; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1 ; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0 -; GFX8-NEXT: v_add_u32_e32 v2, vcc, s4, v2 -; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc -; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v4 -; GFX8-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2 ; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 @@ -48048,68 +47761,40 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl ; GFX900-LABEL: v_fmuladd_v4bf16: ; GFX900: ; %bb.0: ; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v3 -; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v1 -; GFX900-NEXT: v_mul_f32_e32 v6, v7, v6 -; GFX900-NEXT: v_bfe_u32 v7, v6, 16, 1 -; GFX900-NEXT: s_movk_i32 s4, 0x7fff -; GFX900-NEXT: v_add3_u32 v7, v7, v6, s4 -; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v6 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 -; GFX900-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc -; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v5 -; GFX900-NEXT: v_add_f32_e32 v6, v6, v7 +; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v5 +; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v3 +; GFX900-NEXT: v_lshlrev_b32_e32 v8, 16, v1 +; GFX900-NEXT: v_fma_f32 v6, v8, v7, v6 +; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 ; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; GFX900-NEXT: v_bfe_u32 v7, v6, 16, 1 -; GFX900-NEXT: v_mul_f32_e32 v1, v1, v3 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: v_fma_f32 v1, v1, v3, v5 ; GFX900-NEXT: v_add3_u32 v7, v7, v6, s4 ; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v6 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 ; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1 ; GFX900-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc ; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4 -; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v1 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 -; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v7, vcc -; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v5 -; GFX900-NEXT: v_add_f32_e32 v1, v1, v3 -; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1 -; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4 ; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v1 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc -; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v0 -; GFX900-NEXT: v_mul_f32_e32 v3, v5, v3 -; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1 -; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4 -; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v3 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 -; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc -; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v4 -; GFX900-NEXT: v_add_f32_e32 v3, v3, v5 +; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v2 +; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v0 +; GFX900-NEXT: v_fma_f32 v3, v7, v5, v3 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1 -; GFX900-NEXT: v_mul_f32_e32 v0, v0, v2 +; GFX900-NEXT: v_fma_f32 v0, v0, v2, v4 ; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4 ; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1 ; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc ; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4 -; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v0 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc -; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v4 -; GFX900-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1 -; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4 ; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v0 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 ; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc @@ -48121,264 +47806,162 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl ; GFX950-LABEL: v_fmuladd_v4bf16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v3 -; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v1 +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v3 +; GFX950-NEXT: v_and_b32_e32 v8, 0xffff0000, v1 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX950-NEXT: v_mul_f32_e32 v1, v1, v3 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 -; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; GFX950-NEXT: v_mul_f32_e32 v6, v7, v6 -; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v5 -; GFX950-NEXT: v_add_f32_e32 v1, v1, v3 +; GFX950-NEXT: v_fmac_f32_e32 v6, v8, v7 +; GFX950-NEXT: v_fmac_f32_e32 v5, v1, v3 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 ; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 -; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v0 +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v0 +; GFX950-NEXT: v_fmac_f32_e32 v1, v7, v3 +; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_mul_f32_e32 v3, v5, v3 -; GFX950-NEXT: v_mul_f32_e32 v0, v0, v2 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v6, v6, s0 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v3, s0 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 -; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; GFX950-NEXT: v_add_f32_e32 v6, v6, v7 -; GFX950-NEXT: v_add_f32_e32 v3, v3, v5 -; GFX950-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v3 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, v6 +; GFX950-NEXT: v_fmac_f32_e32 v3, v0, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v3, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v5, v6 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_fmuladd_v4bf16: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v3 -; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v5 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v1 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 ; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v0 -; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX10-NEXT: v_mul_f32_e32 v6, v7, v6 -; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v2 +; GFX10-NEXT: v_fmac_f32_e32 v6, v8, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v4 +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v2 +; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX10-NEXT: v_mul_f32_e32 v1, v1, v3 -; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v5 +; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX10-NEXT: v_bfe_u32 v10, v6, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v6 -; GFX10-NEXT: v_mul_f32_e32 v7, v9, v7 -; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2 -; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX10-NEXT: v_add3_u32 v10, v10, v6, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v5, v1, v3 +; GFX10-NEXT: v_fmac_f32_e32 v7, v9, v8 +; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v6 +; GFX10-NEXT: v_fmac_f32_e32 v4, v0, v2 +; GFX10-NEXT: v_add3_u32 v0, v10, v6, 0x7fff +; GFX10-NEXT: v_bfe_u32 v2, v5, 16, 1 +; GFX10-NEXT: v_bfe_u32 v3, v7, 16, 1 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX10-NEXT: v_bfe_u32 v9, v7, 16, 1 -; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX10-NEXT: v_bfe_u32 v11, v0, 16, 1 -; GFX10-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v7 -; GFX10-NEXT: v_add3_u32 v9, v9, v7, 0x7fff -; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v0 -; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v6, vcc_lo +; GFX10-NEXT: v_bfe_u32 v8, v4, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX10-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo +; GFX10-NEXT: v_add3_u32 v0, v2, v5, 0x7fff +; GFX10-NEXT: v_add3_u32 v2, v3, v7, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v7 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 -; GFX10-NEXT: v_add3_u32 v11, v11, v0, 0x7fff -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 -; GFX10-NEXT: v_add_f32_e32 v3, v3, v8 -; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX10-NEXT: v_cndmask_b32_e32 v2, v9, v10, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v4 -; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; GFX10-NEXT: v_bfe_u32 v7, v3, 16, 1 -; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v11, v12, vcc_lo -; GFX10-NEXT: v_add_f32_e32 v1, v1, v5 -; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v3 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX10-NEXT: v_add_f32_e32 v2, v2, v6 -; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX10-NEXT: v_bfe_u32 v6, v1, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v1 -; GFX10-NEXT: v_add_f32_e32 v0, v0, v4 -; GFX10-NEXT: v_add3_u32 v4, v7, v3, 0x7fff -; GFX10-NEXT: v_bfe_u32 v7, v2, 16, 1 -; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1 -; GFX10-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc_lo -; GFX10-NEXT: v_add3_u32 v4, v6, v1, 0x7fff -; GFX10-NEXT: v_add3_u32 v5, v7, v2, 0x7fff -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v2 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX10-NEXT: v_add3_u32 v7, v8, v0, 0x7fff -; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0 -; GFX10-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX10-NEXT: v_perm_b32 v0, v0, v2, 0x7060302 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo -; GFX10-NEXT: v_perm_b32 v1, v1, v3, 0x7060302 +; GFX10-NEXT: v_add3_u32 v6, v8, v4, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX10-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX10-NEXT: v_cndmask_b32_e32 v4, v0, v9, vcc_lo +; GFX10-NEXT: v_perm_b32 v0, v3, v2, 0x7060302 +; GFX10-NEXT: v_perm_b32 v1, v4, v1, 0x7060302 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11TRUE16-LABEL: v_fmuladd_v4bf16: ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3 -; GFX11TRUE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v0 -; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_dual_mul_f32 v6, v7, v6 :: v_dual_lshlrev_b32 v3, 16, v3 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v3 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v10, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v1 ; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX11TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v5 -; GFX11TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 ; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_mul_f32_e32 v1, v1, v3 -; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v2 -; GFX11TRUE16-NEXT: v_dual_mul_f32 v3, v9, v7 :: v_dual_lshlrev_b32 v2, 16, v2 -; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6 -; GFX11TRUE16-NEXT: v_add3_u32 v9, v10, v6, 0x7fff -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v0, v2 -; GFX11TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v9, v7, vcc_lo -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_bfe_u32 v9, v0, 16, 1 -; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v0 -; GFX11TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1 -; GFX11TRUE16-NEXT: v_add3_u32 v9, v9, v0, 0x7fff -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v10, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff -; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v0, v9, v11 :: v_dual_and_b32 v1, 0xffff0000, v1 -; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_dual_add_f32 v1, v1, v5 :: v_dual_and_b32 v0, 0xffff0000, v0 -; GFX11TRUE16-NEXT: v_add_f32_e32 v2, v6, v8 -; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v5, v1, v3 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v7, 16, v4 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v2 +; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 +; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_bfe_u32 v3, v5, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v7, v10, v8 +; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v1, v0, v2 +; GFX11TRUE16-NEXT: v_add3_u32 v3, v3, v5, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v7, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v7 +; GFX11TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v6 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v7, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v4, v9, v6, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1 ; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1 -; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v3, v7, v6 :: v_dual_lshlrev_b32 v6, 16, v4 -; GFX11TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1 -; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; GFX11TRUE16-NEXT: v_add3_u32 v5, v5, v1, 0x7fff -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_dual_add_f32 v0, v0, v6 :: v_dual_and_b32 v3, 0xffff0000, v3 -; GFX11TRUE16-NEXT: v_add3_u32 v6, v7, v2, 0x7fff -; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v2 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11TRUE16-NEXT: v_add3_u32 v5, v9, v1, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc_lo ; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_add_f32_e32 v3, v3, v4 -; GFX11TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v0 ; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v10, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v7, vcc_lo -; GFX11TRUE16-NEXT: v_bfe_u32 v9, v3, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11TRUE16-NEXT: v_bfi_b32 v1, 0xffff, v1, v2 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_add3_u32 v5, v9, v3, 0x7fff -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v8, vcc_lo -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v3 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v1 +; GFX11TRUE16-NEXT: v_bfi_b32 v1, 0xffff, v3, v2 ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_fmuladd_v4bf16: ; GFX11FAKE16: ; %bb.0: ; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v9, 16, v0 ; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v1 -; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v5 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v3 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v3 ; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_dual_mul_f32 v6, v7, v6 :: v_dual_and_b32 v5, 0xffff0000, v5 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v2 -; GFX11FAKE16-NEXT: v_dual_mul_f32 v1, v1, v3 :: v_dual_and_b32 v2, 0xffff0000, v2 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v5 +; GFX11FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v5, v1, v3 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v7, 16, v4 +; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1) ; GFX11FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v6 ; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 -; GFX11FAKE16-NEXT: v_mul_f32_e32 v7, v9, v7 -; GFX11FAKE16-NEXT: v_add3_u32 v10, v10, v6, 0x7fff -; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1 -; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v3, v10, v3 :: v_dual_mul_f32 v0, v0, v2 -; GFX11FAKE16-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v7 -; GFX11FAKE16-NEXT: v_add3_u32 v9, v9, v7, 0x7fff -; GFX11FAKE16-NEXT: v_bfe_u32 v11, v0, 16, 1 -; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX11FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v0 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_add3_u32 v11, v11, v0, 0x7fff -; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v1, v2, v6 :: v_dual_lshlrev_b32 v6, 16, v4 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v2 +; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v4, v0, v2 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v10, v6, 0x7fff +; GFX11FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v7, v9, v8 +; GFX11FAKE16-NEXT: v_bfe_u32 v8, v4, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v2, v5, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_bfe_u32 v3, v7, 16, 1 ; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 -; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v2, v9, v10 :: v_dual_and_b32 v1, 0xffff0000, v1 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11FAKE16-NEXT: v_dual_add_f32 v1, v1, v5 :: v_dual_and_b32 v2, 0xffff0000, v2 -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v11, v12, vcc_lo -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1 -; GFX11FAKE16-NEXT: v_add_f32_e32 v2, v2, v6 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX11FAKE16-NEXT: v_bfe_u32 v6, v1, 16, 1 -; GFX11FAKE16-NEXT: v_add_f32_e32 v0, v0, v4 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11FAKE16-NEXT: v_add_f32_e32 v3, v3, v8 -; GFX11FAKE16-NEXT: v_bfe_u32 v8, v0, 16, 1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v3 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11FAKE16-NEXT: v_add3_u32 v4, v7, v3, 0x7fff -; GFX11FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc_lo -; GFX11FAKE16-NEXT: v_add3_u32 v4, v6, v1, 0x7fff -; GFX11FAKE16-NEXT: v_add3_u32 v5, v7, v2, 0x7fff -; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11FAKE16-NEXT: v_add3_u32 v7, v8, v0, 0x7fff -; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v2, 0x7060302 -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo -; GFX11FAKE16-NEXT: v_perm_b32 v1, v1, v3, 0x7060302 +; GFX11FAKE16-NEXT: v_add3_u32 v6, v8, v4, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v3, v7, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v7 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v0, v9, vcc_lo +; GFX11FAKE16-NEXT: v_perm_b32 v0, v3, v2, 0x7060302 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x7060302 ; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] %op = call <4 x bfloat> @llvm.fmuladd.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat> %c) ret <4 x bfloat> %op diff --git a/llvm/test/CodeGen/AMDGPU/empty-text.ll b/llvm/test/CodeGen/AMDGPU/empty-text.ll new file mode 100644 index 0000000..8aa8600 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/empty-text.ll @@ -0,0 +1,9 @@ +; Test that there is no s_code_end padding if .text is otherwise empty. + +; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 < %s | FileCheck %s --check-prefixes=GCN + +@globalVar = global i32 37 + +declare amdgpu_ps void @funcDecl() + +; GCN-NOT: .fill diff --git a/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll b/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll index f58cb84..839d0ba 100644 --- a/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll +++ b/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll @@ -38,11 +38,11 @@ define hidden void @copy(ptr noundef %va) { ; CHECK-NEXT: %va.addr.ascast = addrspacecast ptr addrspace(5) %va.addr to ptr ; CHECK-NEXT: %cp.ascast = addrspacecast ptr addrspace(5) %cp to ptr ; CHECK-NEXT: store ptr %va, ptr addrspace(5) %va.addr, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %cp) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %cp) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr %cp.ascast, ptr %va.addr.ascast, i32 8, i1 false) ; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %cp, align 8 ; CHECK-NEXT: call void @valist(ptr noundef %0) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %cp) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %cp) ; CHECK-NEXT: ret void ; entry: @@ -51,43 +51,43 @@ entry: %va.addr.ascast = addrspacecast ptr addrspace(5) %va.addr to ptr %cp.ascast = addrspacecast ptr addrspace(5) %cp to ptr store ptr %va, ptr addrspace(5) %va.addr, align 8 - call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %cp) + call void @llvm.lifetime.start.p5(ptr addrspace(5) %cp) call void @llvm.va_copy.p0(ptr %cp.ascast, ptr nonnull %va.addr.ascast) %0 = load ptr, ptr addrspace(5) %cp, align 8 call void @valist(ptr noundef %0) - call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %cp) + call void @llvm.lifetime.end.p5(ptr addrspace(5) %cp) ret void } -declare void @llvm.lifetime.start.p5(i64 immarg, ptr addrspace(5) nocapture) +declare void @llvm.lifetime.start.p5(ptr addrspace(5) nocapture) declare void @llvm.va_copy.p0(ptr, ptr) declare hidden void @valist(ptr noundef) -declare void @llvm.lifetime.end.p5(i64 immarg, ptr addrspace(5) nocapture) +declare void @llvm.lifetime.end.p5(ptr addrspace(5) nocapture) define hidden void @start_once(...) { ; CHECK-LABEL: define {{[^@]+}}@start_once(ptr %varargs) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %s = alloca ptr, align 8, addrspace(5) ; CHECK-NEXT: %s.ascast = addrspacecast ptr addrspace(5) %s to ptr -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %s) ; CHECK-NEXT: store ptr %varargs, ptr %s.ascast, align 8 ; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %s, align 8 ; CHECK-NEXT: call void @valist(ptr noundef %0) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %s) ; CHECK-NEXT: ret void ; entry: %s = alloca ptr, align 8, addrspace(5) %s.ascast = addrspacecast ptr addrspace(5) %s to ptr - call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s) + call void @llvm.lifetime.start.p5(ptr addrspace(5) %s) call void @llvm.va_start.p0(ptr %s.ascast) %0 = load ptr, ptr addrspace(5) %s, align 8 call void @valist(ptr noundef %0) call void @llvm.va_end.p0(ptr %s.ascast) - call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s) + call void @llvm.lifetime.end.p5(ptr addrspace(5) %s) ret void } @@ -102,16 +102,16 @@ define hidden void @start_twice(...) { ; CHECK-NEXT: %s1 = alloca ptr, align 8, addrspace(5) ; CHECK-NEXT: %s0.ascast = addrspacecast ptr addrspace(5) %s0 to ptr ; CHECK-NEXT: %s1.ascast = addrspacecast ptr addrspace(5) %s1 to ptr -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s0) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s1) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %s0) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %s1) ; CHECK-NEXT: store ptr %varargs, ptr %s0.ascast, align 8 ; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %s0, align 8 ; CHECK-NEXT: call void @valist(ptr noundef %0) ; CHECK-NEXT: store ptr %varargs, ptr %s1.ascast, align 8 ; CHECK-NEXT: %1 = load ptr, ptr addrspace(5) %s1, align 8 ; CHECK-NEXT: call void @valist(ptr noundef %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s0) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %s1) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %s0) ; CHECK-NEXT: ret void ; entry: @@ -119,8 +119,8 @@ entry: %s1 = alloca ptr, align 8, addrspace(5) %s0.ascast = addrspacecast ptr addrspace(5) %s0 to ptr %s1.ascast = addrspacecast ptr addrspace(5) %s1 to ptr - call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s0) - call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s1) + call void @llvm.lifetime.start.p5(ptr addrspace(5) %s0) + call void @llvm.lifetime.start.p5(ptr addrspace(5) %s1) call void @llvm.va_start.p0(ptr %s0.ascast) %0 = load ptr, ptr addrspace(5) %s0, align 8 call void @valist(ptr noundef %0) @@ -129,8 +129,8 @@ entry: %1 = load ptr, ptr addrspace(5) %s1, align 8 call void @valist(ptr noundef %1) call void @llvm.va_end.p0(ptr %s1.ascast) - call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s1) - call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s0) + call void @llvm.lifetime.end.p5(ptr addrspace(5) %s1) + call void @llvm.lifetime.end.p5(ptr addrspace(5) %s0) ret void } @@ -138,12 +138,12 @@ define hidden void @single_i32(i32 noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_i32(i32 noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -157,12 +157,12 @@ define hidden void @single_double(double noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_double(double noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_double.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store double %x, ptr addrspace(5) %0, align 8 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -174,12 +174,12 @@ define hidden void @single_v4f32(<4 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v4f32(<4 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v4f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 16, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <4 x float> %x, ptr addrspace(5) %0, align 16 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 16, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -191,12 +191,12 @@ define hidden void @single_v8f32(<8 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v8f32(<8 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v8f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 32, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <8 x float> %x, ptr addrspace(5) %0, align 32 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 32, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -208,12 +208,12 @@ define hidden void @single_v16f32(<16 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v16f32(<16 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v16f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 64, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <16 x float> %x, ptr addrspace(5) %0, align 64 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 64, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -225,12 +225,12 @@ define hidden void @single_v32f32(<32 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v32f32(<32 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v32f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 128, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <32 x float> %x, ptr addrspace(5) %0, align 128 ; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 128, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -242,14 +242,14 @@ define hidden void @i32_double(i32 noundef %x, double noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_double(i32 noundef %x, double noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_double.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 12, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store double %y, ptr addrspace(5) %1, align 8 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 12, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -261,14 +261,14 @@ define hidden void @double_i32(double noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@double_i32(double noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %double_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 12, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %double_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store double %x, ptr addrspace(5) %0, align 8 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %double_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 12, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -286,14 +286,14 @@ define hidden void @i32_libcS(i32 noundef %x, i8 %y.coerce0, i16 %y.coerce1, i32 ; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %y.coerce3, 3 ; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %y.coerce4, 4 ; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %y.coerce5, 5 -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %1, align 8 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -317,14 +317,14 @@ define hidden void @libcS_i32(i8 %x.coerce0, i16 %x.coerce1, i32 %x.coerce2, i64 ; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %x.coerce3, 3 ; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %x.coerce4, 4 ; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %x.coerce5, 5 -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %libcS_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %0, align 8 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %libcS_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -342,14 +342,14 @@ define hidden void @i32_v4f32(i32 noundef %x, <4 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v4f32(i32 noundef %x, <4 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v4f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 20, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store <4 x float> %y, ptr addrspace(5) %1, align 16 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 20, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -361,14 +361,14 @@ define hidden void @v4f32_i32(<4 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v4f32_i32(<4 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v4f32_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 20, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <4 x float> %x, ptr addrspace(5) %0, align 16 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 20, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -380,14 +380,14 @@ define hidden void @i32_v8f32(i32 noundef %x, <8 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v8f32(i32 noundef %x, <8 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v8f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store <8 x float> %y, ptr addrspace(5) %1, align 32 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -399,14 +399,14 @@ define hidden void @v8f32_i32(<8 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v8f32_i32(<8 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v8f32_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <8 x float> %x, ptr addrspace(5) %0, align 32 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -418,14 +418,14 @@ define hidden void @i32_v16f32(i32 noundef %x, <16 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v16f32(i32 noundef %x, <16 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v16f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 68, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store <16 x float> %y, ptr addrspace(5) %1, align 64 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 68, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -437,14 +437,14 @@ define hidden void @v16f32_i32(<16 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v16f32_i32(<16 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v16f32_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 68, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <16 x float> %x, ptr addrspace(5) %0, align 64 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 68, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -456,14 +456,14 @@ define hidden void @i32_v32f32(i32 noundef %x, <32 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v32f32(i32 noundef %x, <32 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v32f32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 132, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store <32 x float> %y, ptr addrspace(5) %1, align 128 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 132, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -475,14 +475,14 @@ define hidden void @v32f32_i32(<32 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v32f32_i32(<32 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v32f32_i32.vararg, align 4, addrspace(5) -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 132, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <32 x float> %x, ptr addrspace(5) %0, align 128 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void @vararg(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 132, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -495,12 +495,12 @@ define hidden void @fptr_single_i32(i32 noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %fptr_single_i32.vararg, align 4, addrspace(5) ; CHECK-NEXT: %0 = load volatile ptr, ptr addrspacecast (ptr addrspace(1) @vararg_ptr to ptr), align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_single_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr addrspace(5) %1, align 4 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void %0(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -520,12 +520,12 @@ define hidden void @fptr_libcS(i8 %x.coerce0, i16 %x.coerce1, i32 %x.coerce2, i6 ; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %x.coerce3, 3 ; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %x.coerce4, 4 ; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %x.coerce5, 5 -; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 32, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %1, align 8 ; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr ; CHECK-NEXT: call void %0(ptr %2) -; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 32, ptr addrspace(5) %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer) ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll index 7d36c9f..004d3c0 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll @@ -284,6 +284,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: s_clause 0x1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 @@ -329,6 +330,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo +; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 @@ -382,6 +384,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: s_clause 0x1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 @@ -430,6 +433,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo +; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll index b25d9b2..fc88839 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll @@ -3621,7 +3621,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX9-NEXT: s_mov_b32 s0, 0 ; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:4 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: s_movk_i32 s0, 0x3004 +; GFX9-NEXT: s_movk_i32 s0, 0x3000 +; GFX9-NEXT: s_add_i32 s0, s0, 4 ; GFX9-NEXT: v_mov_b32_e32 v0, 15 ; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:3712 ; GFX9-NEXT: s_waitcnt vmcnt(0) @@ -3637,7 +3638,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s9 ; GFX10-NEXT: v_mov_b32_e32 v0, 13 ; GFX10-NEXT: v_mov_b32_e32 v1, 15 -; GFX10-NEXT: s_movk_i32 s0, 0x3804 +; GFX10-NEXT: s_movk_i32 s0, 0x3800 +; GFX10-NEXT: s_add_i32 s0, s0, 4 ; GFX10-NEXT: scratch_store_dword off, v0, off offset:4 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: scratch_store_dword off, v1, s0 offset:1664 @@ -3682,7 +3684,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX9-PAL-NEXT: s_addc_u32 flat_scratch_hi, s13, 0 ; GFX9-PAL-NEXT: scratch_store_dword off, v0, s0 offset:4 ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) -; GFX9-PAL-NEXT: s_movk_i32 s0, 0x3004 +; GFX9-PAL-NEXT: s_movk_i32 s0, 0x3000 +; GFX9-PAL-NEXT: s_add_i32 s0, s0, 4 ; GFX9-PAL-NEXT: v_mov_b32_e32 v0, 15 ; GFX9-PAL-NEXT: scratch_store_dword off, v0, s0 offset:3712 ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) @@ -3716,8 +3719,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX1010-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13 ; GFX1010-PAL-NEXT: v_mov_b32_e32 v0, 13 ; GFX1010-PAL-NEXT: v_mov_b32_e32 v1, 15 +; GFX1010-PAL-NEXT: s_movk_i32 s0, 0x3800 ; GFX1010-PAL-NEXT: s_mov_b32 s1, 0 -; GFX1010-PAL-NEXT: s_movk_i32 s0, 0x3804 +; GFX1010-PAL-NEXT: s_add_i32 s0, s0, 4 ; GFX1010-PAL-NEXT: scratch_store_dword off, v0, s1 offset:4 ; GFX1010-PAL-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX1010-PAL-NEXT: scratch_store_dword off, v1, s0 offset:1664 @@ -3739,7 +3743,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX1030-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13 ; GFX1030-PAL-NEXT: v_mov_b32_e32 v0, 13 ; GFX1030-PAL-NEXT: v_mov_b32_e32 v1, 15 -; GFX1030-PAL-NEXT: s_movk_i32 s0, 0x3804 +; GFX1030-PAL-NEXT: s_movk_i32 s0, 0x3800 +; GFX1030-PAL-NEXT: s_add_i32 s0, s0, 4 ; GFX1030-PAL-NEXT: scratch_store_dword off, v0, off offset:4 ; GFX1030-PAL-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX1030-PAL-NEXT: scratch_store_dword off, v1, s0 offset:1664 @@ -3785,10 +3790,12 @@ define void @store_load_large_imm_offset_foo() { ; GFX9-LABEL: store_load_large_imm_offset_foo: ; GFX9: ; %bb.0: ; %bb ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_movk_i32 s0, 0x3000 ; GFX9-NEXT: v_mov_b32_e32 v0, 13 +; GFX9-NEXT: s_add_i32 s1, s32, s0 ; GFX9-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: s_add_i32 s0, s32, 0x3004 +; GFX9-NEXT: s_add_i32 s0, s1, 4 ; GFX9-NEXT: v_mov_b32_e32 v0, 15 ; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:3712 ; GFX9-NEXT: s_waitcnt vmcnt(0) @@ -3800,8 +3807,10 @@ define void @store_load_large_imm_offset_foo() { ; GFX10: ; %bb.0: ; %bb ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v0, 13 +; GFX10-NEXT: s_movk_i32 s0, 0x3800 ; GFX10-NEXT: v_mov_b32_e32 v1, 15 -; GFX10-NEXT: s_add_i32 s0, s32, 0x3804 +; GFX10-NEXT: s_add_i32 s1, s32, s0 +; GFX10-NEXT: s_add_i32 s0, s1, 4 ; GFX10-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: scratch_store_dword off, v1, s0 offset:1664 @@ -3843,10 +3852,12 @@ define void @store_load_large_imm_offset_foo() { ; GFX9-PAL-LABEL: store_load_large_imm_offset_foo: ; GFX9-PAL: ; %bb.0: ; %bb ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-PAL-NEXT: s_movk_i32 s0, 0x3000 ; GFX9-PAL-NEXT: v_mov_b32_e32 v0, 13 +; GFX9-PAL-NEXT: s_add_i32 s1, s32, s0 ; GFX9-PAL-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) -; GFX9-PAL-NEXT: s_add_i32 s0, s32, 0x3004 +; GFX9-PAL-NEXT: s_add_i32 s0, s1, 4 ; GFX9-PAL-NEXT: v_mov_b32_e32 v0, 15 ; GFX9-PAL-NEXT: scratch_store_dword off, v0, s0 offset:3712 ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) @@ -3872,8 +3883,10 @@ define void @store_load_large_imm_offset_foo() { ; GFX10-PAL: ; %bb.0: ; %bb ; GFX10-PAL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-PAL-NEXT: v_mov_b32_e32 v0, 13 +; GFX10-PAL-NEXT: s_movk_i32 s0, 0x3800 ; GFX10-PAL-NEXT: v_mov_b32_e32 v1, 15 -; GFX10-PAL-NEXT: s_add_i32 s0, s32, 0x3804 +; GFX10-PAL-NEXT: s_add_i32 s1, s32, s0 +; GFX10-PAL-NEXT: s_add_i32 s0, s1, 4 ; GFX10-PAL-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX10-PAL-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-PAL-NEXT: scratch_store_dword off, v1, s0 offset:1664 diff --git a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll index 1b092b2..5674ae3 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll @@ -349,29 +349,24 @@ define i32 @select_fneg_xor_select_i32(i1 %cond0, i1 %cond1, i32 %arg0, i32 %arg ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_and_b32_e32 v0, 1, v0 -; GCN-NEXT: v_xor_b32_e32 v2, 0x80000000, v2 -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 ; GCN-NEXT: v_and_b32_e32 v1, 1, v1 -; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc -; GCN-NEXT: v_xor_b32_e32 v2, 0x80000000, v0 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, -v2, v3, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1 -; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GCN-NEXT: v_cndmask_b32_e64 v0, v0, -v0, vcc ; GCN-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: select_fneg_xor_select_i32: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_and_b32_e32 v0, 1, v0 -; GFX11-NEXT: v_xor_b32_e32 v2, 0x80000000, v2 ; GFX11-NEXT: v_and_b32_e32 v1, 1, v1 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_cndmask_b32_e64 v0, -v2, v3, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 -; GFX11-NEXT: v_xor_b32_e32 v2, 0x80000000, v0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, -v0, vcc_lo ; GFX11-NEXT: s_setpc_b64 s[30:31] %fneg0 = xor i32 %arg0, -2147483648 %select0 = select i1 %cond0, i32 %arg1, i32 %fneg0 @@ -550,31 +545,25 @@ define i64 @select_fneg_xor_select_i64(i1 %cond0, i1 %cond1, i64 %arg0, i64 %arg ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_and_b32_e32 v0, 1, v0 -; GCN-NEXT: v_xor_b32_e32 v3, 0x80000000, v3 -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 ; GCN-NEXT: v_and_b32_e32 v1, 1, v1 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 ; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc -; GCN-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc -; GCN-NEXT: v_xor_b32_e32 v3, 0x80000000, v2 +; GCN-NEXT: v_cndmask_b32_e64 v2, -v3, v5, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1 -; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc +; GCN-NEXT: v_cndmask_b32_e64 v1, v2, -v2, vcc ; GCN-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: select_fneg_xor_select_i64: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_and_b32_e32 v0, 1, v0 -; GFX11-NEXT: v_xor_b32_e32 v3, 0x80000000, v3 -; GFX11-NEXT: v_and_b32_e32 v1, 1, v1 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0 -; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_and_b32 v1, 1, v1 +; GFX11-NEXT: v_cndmask_b32_e64 v2, -v3, v5, vcc_lo ; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 -; GFX11-NEXT: v_xor_b32_e32 v3, 0x80000000, v2 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, -v2, vcc_lo ; GFX11-NEXT: s_setpc_b64 s[30:31] %fneg0 = xor i64 %arg0, 9223372036854775808 %select0 = select i1 %cond0, i64 %arg1, i64 %fneg0 diff --git a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir index 7fad2f4..a88b1ec 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir @@ -75,7 +75,8 @@ stack: body: | bb.0: ; CHECK-LABEL: name: fold_frame_index__s_add_i32__fi_materializedconst_0 - ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 %stack.0, 256, implicit-def $scc + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 256 + ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 %stack.0, [[S_MOV_B32_]], implicit-def $scc ; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]] ; CHECK-NEXT: SI_RETURN implicit $sgpr4 %0:sreg_32 = S_MOV_B32 %stack.0 diff --git a/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir b/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir index cc43142..2f2d727 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir @@ -46,7 +46,8 @@ body: | %2:sreg_32 = S_LSHL2_ADD_U32 %0, %1, implicit-def $scc ... # GCN-LABEL: name: test_frameindex{{$}} -# GCN: %1:sreg_32 = S_ADD_I32 %stack.0, 70 +# GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 70 +# GCN-NEXT: %1:sreg_32 = S_ADD_I32 %stack.0, [[S_MOV_B32_]] --- name: test_frameindex tracksRegLiveness: true diff --git a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll index f9a24fe..0cb2b0b 100644 --- a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll +++ b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll @@ -2102,23 +2102,10 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret(ptr addrspace(3) %ptr, do ; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x24 ; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x2c ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v2, s2 -; GFX1250-NEXT: s_mov_b32 s2, 0 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: .LBB51_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: v_mov_b32_e32 v2, s2 +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-NEXT: ds_add_f64 v2, v[0:1] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_add_f64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1] -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1] -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5] -; GFX1250-NEXT: s_or_b32 s2, vcc_lo, s2 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2 -; GFX1250-NEXT: s_cbranch_execnz .LBB51_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX1250-NEXT: s_endpgm main_body: %ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0) @@ -2148,24 +2135,9 @@ define double @local_atomic_fadd_f64_rtn(ptr addrspace(3) %ptr, double %data) { ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v2, v0 -; GFX1250-NEXT: v_mov_b32_e32 v4, v1 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: .LBB52_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[6:7], v[4:5] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[6:7] +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB52_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end -; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] main_body: %ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0) @@ -2197,24 +2169,11 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr ; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat: ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v2, s0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: .LBB53_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: v_mov_b32_e32 v2, s0 +; GFX1250-NEXT: ds_add_f64 v2, v[0:1] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_add_f64_e32 v[4:5], 4.0, v[0:1] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1] -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1] -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB53_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX1250-NEXT: s_endpgm main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0 @@ -2246,24 +2205,11 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3 ; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush: ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v2, s0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: .LBB54_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_add_f64_e32 v[4:5], 4.0, v[0:1] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1] +; GFX1250-NEXT: v_mov_b32_e32 v2, s0 +; GFX1250-NEXT: ds_add_f64 v2, v[0:1] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1] -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB54_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX1250-NEXT: s_endpgm main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0 @@ -2295,24 +2241,11 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp ; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe: ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v2, s0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: .LBB55_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_add_f64_e32 v[4:5], 4.0, v[0:1] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1] +; GFX1250-NEXT: v_mov_b32_e32 v2, s0 +; GFX1250-NEXT: ds_add_f64 v2, v[0:1] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1] -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB55_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX1250-NEXT: s_endpgm main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst @@ -2341,23 +2274,9 @@ define double @local_atomic_fadd_f64_rtn_pat(ptr addrspace(3) %ptr, double %data ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_mov_b32_e32 v2, v0 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: .LBB56_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_mov_b64_e32 v[4:5], v[0:1] -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_add_f64_e32 v[0:1], 4.0, v[4:5] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[4:5] +; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0 +; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB56_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end -; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0 @@ -2387,24 +2306,9 @@ define double @local_atomic_fadd_f64_rtn_ieee_unsafe(ptr addrspace(3) %ptr, doub ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v2, v0 -; GFX1250-NEXT: v_mov_b32_e32 v4, v1 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: .LBB57_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[6:7], v[4:5] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[6:7] +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB57_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end -; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] main_body: %ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0) @@ -2434,24 +2338,9 @@ define double @local_atomic_fadd_f64_rtn_ieee_safe(ptr addrspace(3) %ptr, double ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v2, v0 -; GFX1250-NEXT: v_mov_b32_e32 v4, v1 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: .LBB58_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[6:7], v[4:5] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[6:7] +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB58_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end -; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] main_body: %ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0) diff --git a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll index 15cda62..f2fe61f 100644 --- a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll +++ b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll @@ -360,7 +360,8 @@ entry: ; s_add_i32. ; GCN-LABEL: {{^}}fi_sop2_s_add_u32_literal_error: -; GCN: s_add_u32 [[ADD_LO:s[0-9]+]], 0, 0x2010 +; GCN: s_movk_i32 [[S_MOVK_I32_:s[0-9]+]], 0x1000 +; GCN: s_add_u32 [[ADD_LO:s[0-9]+]], 0x1010, [[S_MOVK_I32_]] ; GCN: s_addc_u32 [[ADD_HI:s[0-9]+]], s{{[0-9]+}}, 0 define amdgpu_kernel void @fi_sop2_s_add_u32_literal_error() #0 { entry: diff --git a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll index 3a898a9..f0db321 100644 --- a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll +++ b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll @@ -244,8 +244,9 @@ define i32 @test_v64i32_load_store(ptr addrspace(1) %ptr, i32 %idx, ptr addrspac ; GCN-GISEL-NEXT: global_load_b128 v[60:63], v[0:1], off offset:16 ; GCN-GISEL-NEXT: global_load_b128 v[0:3], v[0:1], off offset:240 ; GCN-GISEL-NEXT: s_wait_loadcnt 0x0 -; GCN-GISEL-NEXT: scratch_store_b128 off, v[0:3], s32 offset:64 scope:SCOPE_SE ; 16-byte Folded Spill -; GCN-GISEL-NEXT: scratch_load_b128 v[0:3], off, s32 offset:80 th:TH_LOAD_LU ; 16-byte Folded Reload +; GCN-GISEL-NEXT: s_clause 0x1 +; GCN-GISEL-NEXT: scratch_store_b128 off, v[0:3], s32 offset:64 scope:SCOPE_SE +; GCN-GISEL-NEXT: scratch_load_b128 v[0:3], off, s32 offset:80 th:TH_LOAD_LU ; GCN-GISEL-NEXT: s_wait_loadcnt 0x0 ; GCN-GISEL-NEXT: s_clause 0xe ; GCN-GISEL-NEXT: global_store_b128 v[46:47], v[0:3], off offset:32 diff --git a/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir b/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir index 8007597..492753b 100644 --- a/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir +++ b/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir @@ -1,6 +1,507 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12 -# RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12 +# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12,GFX1200 +# RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12,GFX1250 + +--- +name: long_clause +tracksRegLiveness: true +body: | + bb.0: + liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0 + ; GFX1200-LABEL: name: long_clause + ; GFX1200: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit-def $vgpr2, implicit-def $vgpr2_lo16, implicit-def $vgpr2_hi16, implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit-def $vgpr5, implicit-def $vgpr5_lo16, implicit-def $vgpr5_hi16, implicit-def $vgpr6, implicit-def $vgpr6_lo16, implicit-def $vgpr6_hi16, implicit-def $vgpr7, implicit-def $vgpr7_lo16, implicit-def $vgpr7_hi16, implicit-def $vgpr8, implicit-def $vgpr8_lo16, implicit-def $vgpr8_hi16, implicit-def $vgpr9, implicit-def $vgpr9_lo16, implicit-def $vgpr9_hi16, implicit-def $vgpr10, implicit-def $vgpr10_lo16, implicit-def $vgpr10_hi16, implicit-def $vgpr11, implicit-def $vgpr11_lo16, implicit-def $vgpr11_hi16, implicit-def $vgpr12, implicit-def $vgpr12_lo16, implicit-def $vgpr12_hi16, implicit-def $vgpr13, implicit-def $vgpr13_lo16, implicit-def $vgpr13_hi16, implicit-def $vgpr14, implicit-def $vgpr14_lo16, implicit-def $vgpr14_hi16, implicit-def $vgpr15, implicit-def $vgpr15_lo16, implicit-def $vgpr15_hi16, implicit-def $vgpr16, implicit-def $vgpr16_lo16, implicit-def $vgpr16_hi16, implicit-def $vgpr17, implicit-def $vgpr17_lo16, implicit-def $vgpr17_hi16, implicit-def $vgpr18, implicit-def $vgpr18_lo16, implicit-def $vgpr18_hi16, implicit-def $vgpr19, implicit-def $vgpr19_lo16, implicit-def $vgpr19_hi16, implicit-def $vgpr20, implicit-def $vgpr20_lo16, implicit-def $vgpr20_hi16, implicit-def $vgpr21, implicit-def $vgpr21_lo16, implicit-def $vgpr21_hi16, implicit-def $vgpr22, implicit-def $vgpr22_lo16, implicit-def $vgpr22_hi16, implicit-def $vgpr23, implicit-def $vgpr23_lo16, implicit-def $vgpr23_hi16, implicit-def $vgpr24, implicit-def $vgpr24_lo16, implicit-def $vgpr24_hi16, implicit-def $vgpr25, implicit-def $vgpr25_lo16, implicit-def $vgpr25_hi16, implicit-def $vgpr26, implicit-def $vgpr26_lo16, implicit-def $vgpr26_hi16, implicit-def $vgpr27, implicit-def $vgpr27_lo16, implicit-def $vgpr27_hi16, implicit-def $vgpr28, implicit-def $vgpr28_lo16, implicit-def $vgpr28_hi16, implicit-def $vgpr29, implicit-def $vgpr29_lo16, implicit-def $vgpr29_hi16, implicit-def $vgpr30, implicit-def $vgpr30_lo16, implicit-def $vgpr30_hi16, implicit-def $vgpr31, implicit-def $vgpr31_lo16, implicit-def $vgpr31_hi16, implicit-def $vgpr32, implicit-def $vgpr32_lo16, implicit-def $vgpr32_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec { + ; GFX1200-NEXT: S_CLAUSE 31 + ; GFX1200-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 8, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 12, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr4 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 16, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr5 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 20, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr6 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 24, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr7 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 28, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr8 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 32, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr9 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 36, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr10 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 40, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr11 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 44, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr12 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 48, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr13 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 52, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr14 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 56, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr15 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 60, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr16 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 64, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr17 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 68, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr18 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 72, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr19 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 76, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr20 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 80, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr21 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 84, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr22 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 88, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr23 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 92, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr24 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 96, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr25 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 100, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr26 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 104, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr27 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 108, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr28 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 112, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr29 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 116, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr30 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 120, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr31 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 124, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr32 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 128, 0, 0, implicit $exec + ; GFX1200-NEXT: } + ; GFX1200-NEXT: BUNDLE implicit-def $vgpr33, implicit-def $vgpr33_lo16, implicit-def $vgpr33_hi16, implicit-def $vgpr34, implicit-def $vgpr34_lo16, implicit-def $vgpr34_hi16, implicit-def $vgpr35, implicit-def $vgpr35_lo16, implicit-def $vgpr35_hi16, implicit-def $vgpr36, implicit-def $vgpr36_lo16, implicit-def $vgpr36_hi16, implicit-def $vgpr37, implicit-def $vgpr37_lo16, implicit-def $vgpr37_hi16, implicit-def $vgpr38, implicit-def $vgpr38_lo16, implicit-def $vgpr38_hi16, implicit-def $vgpr39, implicit-def $vgpr39_lo16, implicit-def $vgpr39_hi16, implicit-def $vgpr40, implicit-def $vgpr40_lo16, implicit-def $vgpr40_hi16, implicit-def $vgpr41, implicit-def $vgpr41_lo16, implicit-def $vgpr41_hi16, implicit-def $vgpr42, implicit-def $vgpr42_lo16, implicit-def $vgpr42_hi16, implicit-def $vgpr43, implicit-def $vgpr43_lo16, implicit-def $vgpr43_hi16, implicit-def $vgpr44, implicit-def $vgpr44_lo16, implicit-def $vgpr44_hi16, implicit-def $vgpr45, implicit-def $vgpr45_lo16, implicit-def $vgpr45_hi16, implicit-def $vgpr46, implicit-def $vgpr46_lo16, implicit-def $vgpr46_hi16, implicit-def $vgpr47, implicit-def $vgpr47_lo16, implicit-def $vgpr47_hi16, implicit-def $vgpr48, implicit-def $vgpr48_lo16, implicit-def $vgpr48_hi16, implicit-def $vgpr49, implicit-def $vgpr49_lo16, implicit-def $vgpr49_hi16, implicit-def $vgpr50, implicit-def $vgpr50_lo16, implicit-def $vgpr50_hi16, implicit-def $vgpr51, implicit-def $vgpr51_lo16, implicit-def $vgpr51_hi16, implicit-def $vgpr52, implicit-def $vgpr52_lo16, implicit-def $vgpr52_hi16, implicit-def $vgpr53, implicit-def $vgpr53_lo16, implicit-def $vgpr53_hi16, implicit-def $vgpr54, implicit-def $vgpr54_lo16, implicit-def $vgpr54_hi16, implicit-def $vgpr55, implicit-def $vgpr55_lo16, implicit-def $vgpr55_hi16, implicit-def $vgpr56, implicit-def $vgpr56_lo16, implicit-def $vgpr56_hi16, implicit-def $vgpr57, implicit-def $vgpr57_lo16, implicit-def $vgpr57_hi16, implicit-def $vgpr58, implicit-def $vgpr58_lo16, implicit-def $vgpr58_hi16, implicit-def $vgpr59, implicit-def $vgpr59_lo16, implicit-def $vgpr59_hi16, implicit-def $vgpr60, implicit-def $vgpr60_lo16, implicit-def $vgpr60_hi16, implicit-def $vgpr61, implicit-def $vgpr61_lo16, implicit-def $vgpr61_hi16, implicit-def $vgpr62, implicit-def $vgpr62_lo16, implicit-def $vgpr62_hi16, implicit-def $vgpr63, implicit-def $vgpr63_lo16, implicit-def $vgpr63_hi16, implicit-def $vgpr64, implicit-def $vgpr64_lo16, implicit-def $vgpr64_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec { + ; GFX1200-NEXT: S_CLAUSE 31 + ; GFX1200-NEXT: $vgpr33 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 132, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr34 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 136, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr35 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 140, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr36 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 144, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr37 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 148, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr38 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 152, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr39 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 156, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr40 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 160, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr41 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 164, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr42 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 168, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr43 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 172, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr44 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 176, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr45 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 180, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr46 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 184, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr47 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 188, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr48 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 192, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr49 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 196, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr50 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 200, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr51 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 204, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr52 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 208, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr53 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 212, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr54 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 216, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr55 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 220, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr56 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 224, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr57 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 228, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr58 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 232, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr59 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 236, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr60 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 240, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr61 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 244, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr62 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 248, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr63 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 252, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr64 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 256, 0, 0, implicit $exec + ; GFX1200-NEXT: } + ; GFX1200-NEXT: BUNDLE implicit-def $vgpr65, implicit-def $vgpr65_lo16, implicit-def $vgpr65_hi16, implicit-def $vgpr66, implicit-def $vgpr66_lo16, implicit-def $vgpr66_hi16, implicit-def $vgpr67, implicit-def $vgpr67_lo16, implicit-def $vgpr67_hi16, implicit-def $vgpr68, implicit-def $vgpr68_lo16, implicit-def $vgpr68_hi16, implicit-def $vgpr69, implicit-def $vgpr69_lo16, implicit-def $vgpr69_hi16, implicit-def $vgpr70, implicit-def $vgpr70_lo16, implicit-def $vgpr70_hi16, implicit-def $vgpr71, implicit-def $vgpr71_lo16, implicit-def $vgpr71_hi16, implicit-def $vgpr72, implicit-def $vgpr72_lo16, implicit-def $vgpr72_hi16, implicit-def $vgpr73, implicit-def $vgpr73_lo16, implicit-def $vgpr73_hi16, implicit-def $vgpr74, implicit-def $vgpr74_lo16, implicit-def $vgpr74_hi16, implicit-def $vgpr75, implicit-def $vgpr75_lo16, implicit-def $vgpr75_hi16, implicit-def $vgpr76, implicit-def $vgpr76_lo16, implicit-def $vgpr76_hi16, implicit-def $vgpr77, implicit-def $vgpr77_lo16, implicit-def $vgpr77_hi16, implicit-def $vgpr78, implicit-def $vgpr78_lo16, implicit-def $vgpr78_hi16, implicit-def $vgpr79, implicit-def $vgpr79_lo16, implicit-def $vgpr79_hi16, implicit-def $vgpr80, implicit-def $vgpr80_lo16, implicit-def $vgpr80_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec { + ; GFX1200-NEXT: S_CLAUSE 15 + ; GFX1200-NEXT: $vgpr65 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 260, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr66 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 264, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr67 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 268, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr68 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 272, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr69 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 276, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr70 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 280, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr71 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 284, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr72 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 288, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr73 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 292, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr74 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 296, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr75 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 300, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr76 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 304, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr77 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 308, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr78 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 312, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr79 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 316, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr80 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 320, 0, 0, implicit $exec + ; GFX1200-NEXT: } + ; + ; GFX1250-LABEL: name: long_clause + ; GFX1250: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit-def $vgpr2, implicit-def $vgpr2_lo16, implicit-def $vgpr2_hi16, implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit-def $vgpr5, implicit-def $vgpr5_lo16, implicit-def $vgpr5_hi16, implicit-def $vgpr6, implicit-def $vgpr6_lo16, implicit-def $vgpr6_hi16, implicit-def $vgpr7, implicit-def $vgpr7_lo16, implicit-def $vgpr7_hi16, implicit-def $vgpr8, implicit-def $vgpr8_lo16, implicit-def $vgpr8_hi16, implicit-def $vgpr9, implicit-def $vgpr9_lo16, implicit-def $vgpr9_hi16, implicit-def $vgpr10, implicit-def $vgpr10_lo16, implicit-def $vgpr10_hi16, implicit-def $vgpr11, implicit-def $vgpr11_lo16, implicit-def $vgpr11_hi16, implicit-def $vgpr12, implicit-def $vgpr12_lo16, implicit-def $vgpr12_hi16, implicit-def $vgpr13, implicit-def $vgpr13_lo16, implicit-def $vgpr13_hi16, implicit-def $vgpr14, implicit-def $vgpr14_lo16, implicit-def $vgpr14_hi16, implicit-def $vgpr15, implicit-def $vgpr15_lo16, implicit-def $vgpr15_hi16, implicit-def $vgpr16, implicit-def $vgpr16_lo16, implicit-def $vgpr16_hi16, implicit-def $vgpr17, implicit-def $vgpr17_lo16, implicit-def $vgpr17_hi16, implicit-def $vgpr18, implicit-def $vgpr18_lo16, implicit-def $vgpr18_hi16, implicit-def $vgpr19, implicit-def $vgpr19_lo16, implicit-def $vgpr19_hi16, implicit-def $vgpr20, implicit-def $vgpr20_lo16, implicit-def $vgpr20_hi16, implicit-def $vgpr21, implicit-def $vgpr21_lo16, implicit-def $vgpr21_hi16, implicit-def $vgpr22, implicit-def $vgpr22_lo16, implicit-def $vgpr22_hi16, implicit-def $vgpr23, implicit-def $vgpr23_lo16, implicit-def $vgpr23_hi16, implicit-def $vgpr24, implicit-def $vgpr24_lo16, implicit-def $vgpr24_hi16, implicit-def $vgpr25, implicit-def $vgpr25_lo16, implicit-def $vgpr25_hi16, implicit-def $vgpr26, implicit-def $vgpr26_lo16, implicit-def $vgpr26_hi16, implicit-def $vgpr27, implicit-def $vgpr27_lo16, implicit-def $vgpr27_hi16, implicit-def $vgpr28, implicit-def $vgpr28_lo16, implicit-def $vgpr28_hi16, implicit-def $vgpr29, implicit-def $vgpr29_lo16, implicit-def $vgpr29_hi16, implicit-def $vgpr30, implicit-def $vgpr30_lo16, implicit-def $vgpr30_hi16, implicit-def $vgpr31, implicit-def $vgpr31_lo16, implicit-def $vgpr31_hi16, implicit-def $vgpr32, implicit-def $vgpr32_lo16, implicit-def $vgpr32_hi16, implicit-def $vgpr33, implicit-def $vgpr33_lo16, implicit-def $vgpr33_hi16, implicit-def $vgpr34, implicit-def $vgpr34_lo16, implicit-def $vgpr34_hi16, implicit-def $vgpr35, implicit-def $vgpr35_lo16, implicit-def $vgpr35_hi16, implicit-def $vgpr36, implicit-def $vgpr36_lo16, implicit-def $vgpr36_hi16, implicit-def $vgpr37, implicit-def $vgpr37_lo16, implicit-def $vgpr37_hi16, implicit-def $vgpr38, implicit-def $vgpr38_lo16, implicit-def $vgpr38_hi16, implicit-def $vgpr39, implicit-def $vgpr39_lo16, implicit-def $vgpr39_hi16, implicit-def $vgpr40, implicit-def $vgpr40_lo16, implicit-def $vgpr40_hi16, implicit-def $vgpr41, implicit-def $vgpr41_lo16, implicit-def $vgpr41_hi16, implicit-def $vgpr42, implicit-def $vgpr42_lo16, implicit-def $vgpr42_hi16, implicit-def $vgpr43, implicit-def $vgpr43_lo16, implicit-def $vgpr43_hi16, implicit-def $vgpr44, implicit-def $vgpr44_lo16, implicit-def $vgpr44_hi16, implicit-def $vgpr45, implicit-def $vgpr45_lo16, implicit-def $vgpr45_hi16, implicit-def $vgpr46, implicit-def $vgpr46_lo16, implicit-def $vgpr46_hi16, implicit-def $vgpr47, implicit-def $vgpr47_lo16, implicit-def $vgpr47_hi16, implicit-def $vgpr48, implicit-def $vgpr48_lo16, implicit-def $vgpr48_hi16, implicit-def $vgpr49, implicit-def $vgpr49_lo16, implicit-def $vgpr49_hi16, implicit-def $vgpr50, implicit-def $vgpr50_lo16, implicit-def $vgpr50_hi16, implicit-def $vgpr51, implicit-def $vgpr51_lo16, implicit-def $vgpr51_hi16, implicit-def $vgpr52, implicit-def $vgpr52_lo16, implicit-def $vgpr52_hi16, implicit-def $vgpr53, implicit-def $vgpr53_lo16, implicit-def $vgpr53_hi16, implicit-def $vgpr54, implicit-def $vgpr54_lo16, implicit-def $vgpr54_hi16, implicit-def $vgpr55, implicit-def $vgpr55_lo16, implicit-def $vgpr55_hi16, implicit-def $vgpr56, implicit-def $vgpr56_lo16, implicit-def $vgpr56_hi16, implicit-def $vgpr57, implicit-def $vgpr57_lo16, implicit-def $vgpr57_hi16, implicit-def $vgpr58, implicit-def $vgpr58_lo16, implicit-def $vgpr58_hi16, implicit-def $vgpr59, implicit-def $vgpr59_lo16, implicit-def $vgpr59_hi16, implicit-def $vgpr60, implicit-def $vgpr60_lo16, implicit-def $vgpr60_hi16, implicit-def $vgpr61, implicit-def $vgpr61_lo16, implicit-def $vgpr61_hi16, implicit-def $vgpr62, implicit-def $vgpr62_lo16, implicit-def $vgpr62_hi16, implicit-def $vgpr63, implicit-def $vgpr63_lo16, implicit-def $vgpr63_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec { + ; GFX1250-NEXT: S_CLAUSE 62 + ; GFX1250-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 8, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 12, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr4 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 16, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr5 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 20, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr6 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 24, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr7 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 28, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr8 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 32, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr9 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 36, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr10 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 40, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr11 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 44, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr12 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 48, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr13 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 52, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr14 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 56, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr15 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 60, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr16 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 64, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr17 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 68, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr18 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 72, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr19 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 76, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr20 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 80, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr21 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 84, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr22 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 88, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr23 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 92, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr24 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 96, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr25 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 100, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr26 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 104, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr27 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 108, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr28 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 112, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr29 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 116, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr30 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 120, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr31 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 124, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr32 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 128, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr33 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 132, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr34 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 136, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr35 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 140, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr36 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 144, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr37 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 148, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr38 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 152, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr39 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 156, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr40 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 160, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr41 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 164, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr42 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 168, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr43 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 172, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr44 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 176, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr45 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 180, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr46 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 184, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr47 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 188, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr48 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 192, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr49 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 196, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr50 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 200, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr51 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 204, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr52 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 208, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr53 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 212, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr54 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 216, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr55 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 220, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr56 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 224, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr57 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 228, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr58 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 232, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr59 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 236, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr60 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 240, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr61 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 244, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr62 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 248, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr63 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 252, 0, 0, implicit $exec + ; GFX1250-NEXT: } + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr64, implicit-def $vgpr64_lo16, implicit-def $vgpr64_hi16, implicit-def $vgpr65, implicit-def $vgpr65_lo16, implicit-def $vgpr65_hi16, implicit-def $vgpr66, implicit-def $vgpr66_lo16, implicit-def $vgpr66_hi16, implicit-def $vgpr67, implicit-def $vgpr67_lo16, implicit-def $vgpr67_hi16, implicit-def $vgpr68, implicit-def $vgpr68_lo16, implicit-def $vgpr68_hi16, implicit-def $vgpr69, implicit-def $vgpr69_lo16, implicit-def $vgpr69_hi16, implicit-def $vgpr70, implicit-def $vgpr70_lo16, implicit-def $vgpr70_hi16, implicit-def $vgpr71, implicit-def $vgpr71_lo16, implicit-def $vgpr71_hi16, implicit-def $vgpr72, implicit-def $vgpr72_lo16, implicit-def $vgpr72_hi16, implicit-def $vgpr73, implicit-def $vgpr73_lo16, implicit-def $vgpr73_hi16, implicit-def $vgpr74, implicit-def $vgpr74_lo16, implicit-def $vgpr74_hi16, implicit-def $vgpr75, implicit-def $vgpr75_lo16, implicit-def $vgpr75_hi16, implicit-def $vgpr76, implicit-def $vgpr76_lo16, implicit-def $vgpr76_hi16, implicit-def $vgpr77, implicit-def $vgpr77_lo16, implicit-def $vgpr77_hi16, implicit-def $vgpr78, implicit-def $vgpr78_lo16, implicit-def $vgpr78_hi16, implicit-def $vgpr79, implicit-def $vgpr79_lo16, implicit-def $vgpr79_hi16, implicit-def $vgpr80, implicit-def $vgpr80_lo16, implicit-def $vgpr80_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec { + ; GFX1250-NEXT: S_CLAUSE 16 + ; GFX1250-NEXT: $vgpr64 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 256, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr65 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 260, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr66 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 264, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr67 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 268, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr68 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 272, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr69 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 276, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr70 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 280, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr71 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 284, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr72 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 288, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr73 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 292, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr74 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 296, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr75 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 300, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr76 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 304, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr77 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 308, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr78 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 312, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr79 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 316, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr80 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 320, 0, 0, implicit $exec + ; GFX1250-NEXT: } + $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec + $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 8, 0, 0, implicit $exec + $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 12, 0, 0, implicit $exec + $vgpr4 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 16, 0, 0, implicit $exec + $vgpr5 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 20, 0, 0, implicit $exec + $vgpr6 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 24, 0, 0, implicit $exec + $vgpr7 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 28, 0, 0, implicit $exec + $vgpr8 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 32, 0, 0, implicit $exec + $vgpr9 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 36, 0, 0, implicit $exec + $vgpr10 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 40, 0, 0, implicit $exec + $vgpr11 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 44, 0, 0, implicit $exec + $vgpr12 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 48, 0, 0, implicit $exec + $vgpr13 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 52, 0, 0, implicit $exec + $vgpr14 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 56, 0, 0, implicit $exec + $vgpr15 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 60, 0, 0, implicit $exec + $vgpr16 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 64, 0, 0, implicit $exec + $vgpr17 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 68, 0, 0, implicit $exec + $vgpr18 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 72, 0, 0, implicit $exec + $vgpr19 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 76, 0, 0, implicit $exec + $vgpr20 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 80, 0, 0, implicit $exec + $vgpr21 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 84, 0, 0, implicit $exec + $vgpr22 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 88, 0, 0, implicit $exec + $vgpr23 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 92, 0, 0, implicit $exec + $vgpr24 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 96, 0, 0, implicit $exec + $vgpr25 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 100, 0, 0, implicit $exec + $vgpr26 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 104, 0, 0, implicit $exec + $vgpr27 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 108, 0, 0, implicit $exec + $vgpr28 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 112, 0, 0, implicit $exec + $vgpr29 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 116, 0, 0, implicit $exec + $vgpr30 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 120, 0, 0, implicit $exec + $vgpr31 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 124, 0, 0, implicit $exec + $vgpr32 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 128, 0, 0, implicit $exec + $vgpr33 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 132, 0, 0, implicit $exec + $vgpr34 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 136, 0, 0, implicit $exec + $vgpr35 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 140, 0, 0, implicit $exec + $vgpr36 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 144, 0, 0, implicit $exec + $vgpr37 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 148, 0, 0, implicit $exec + $vgpr38 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 152, 0, 0, implicit $exec + $vgpr39 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 156, 0, 0, implicit $exec + $vgpr40 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 160, 0, 0, implicit $exec + $vgpr41 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 164, 0, 0, implicit $exec + $vgpr42 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 168, 0, 0, implicit $exec + $vgpr43 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 172, 0, 0, implicit $exec + $vgpr44 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 176, 0, 0, implicit $exec + $vgpr45 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 180, 0, 0, implicit $exec + $vgpr46 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 184, 0, 0, implicit $exec + $vgpr47 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 188, 0, 0, implicit $exec + $vgpr48 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 192, 0, 0, implicit $exec + $vgpr49 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 196, 0, 0, implicit $exec + $vgpr50 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 200, 0, 0, implicit $exec + $vgpr51 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 204, 0, 0, implicit $exec + $vgpr52 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 208, 0, 0, implicit $exec + $vgpr53 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 212, 0, 0, implicit $exec + $vgpr54 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 216, 0, 0, implicit $exec + $vgpr55 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 220, 0, 0, implicit $exec + $vgpr56 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 224, 0, 0, implicit $exec + $vgpr57 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 228, 0, 0, implicit $exec + $vgpr58 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 232, 0, 0, implicit $exec + $vgpr59 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 236, 0, 0, implicit $exec + $vgpr60 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 240, 0, 0, implicit $exec + $vgpr61 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 244, 0, 0, implicit $exec + $vgpr62 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 248, 0, 0, implicit $exec + $vgpr63 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 252, 0, 0, implicit $exec + $vgpr64 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 256, 0, 0, implicit $exec + $vgpr65 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 260, 0, 0, implicit $exec + $vgpr66 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 264, 0, 0, implicit $exec + $vgpr67 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 268, 0, 0, implicit $exec + $vgpr68 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 272, 0, 0, implicit $exec + $vgpr69 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 276, 0, 0, implicit $exec + $vgpr70 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 280, 0, 0, implicit $exec + $vgpr71 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 284, 0, 0, implicit $exec + $vgpr72 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 288, 0, 0, implicit $exec + $vgpr73 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 292, 0, 0, implicit $exec + $vgpr74 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 296, 0, 0, implicit $exec + $vgpr75 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 300, 0, 0, implicit $exec + $vgpr76 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 304, 0, 0, implicit $exec + $vgpr77 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 308, 0, 0, implicit $exec + $vgpr78 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 312, 0, 0, implicit $exec + $vgpr79 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 316, 0, 0, implicit $exec + $vgpr80 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 320, 0, 0, implicit $exec +... + +--- +name: kill +tracksRegLiveness: true +body: | + bb.0: + liveins: $sgpr0_sgpr1, $sgpr4 + ; GFX12-LABEL: name: kill + ; GFX12: liveins: $sgpr0_sgpr1, $sgpr4 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: BUNDLE implicit-def $sgpr2, implicit-def $sgpr2_lo16, implicit-def $sgpr2_hi16, implicit-def $sgpr3, implicit-def $sgpr3_lo16, implicit-def $sgpr3_hi16, implicit $sgpr0_sgpr1, implicit undef $sgpr4 { + ; GFX12-NEXT: S_CLAUSE 1 + ; GFX12-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 + ; GFX12-NEXT: KILL undef renamable $sgpr4 + ; GFX12-NEXT: $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0 + ; GFX12-NEXT: } + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 + KILL undef renamable $sgpr4 + $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0 +... + +--- +name: kill2 +tracksRegLiveness: true +body: | + bb.0: + liveins: $sgpr0_sgpr1, $sgpr4, $sgpr5 + ; GFX12-LABEL: name: kill2 + ; GFX12: liveins: $sgpr0_sgpr1, $sgpr4, $sgpr5 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: BUNDLE implicit-def $sgpr2, implicit-def $sgpr2_lo16, implicit-def $sgpr2_hi16, implicit-def $sgpr3, implicit-def $sgpr3_lo16, implicit-def $sgpr3_hi16, implicit $sgpr0_sgpr1, implicit undef $sgpr4 { + ; GFX12-NEXT: S_CLAUSE 1 + ; GFX12-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 + ; GFX12-NEXT: KILL undef renamable $sgpr4 + ; GFX12-NEXT: $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0 + ; GFX12-NEXT: } + ; GFX12-NEXT: KILL undef renamable $sgpr5 + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 + KILL undef renamable $sgpr4 + $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0 + KILL undef renamable $sgpr5 +... + +--- +name: flat_load_atomic +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-LABEL: name: flat_load_atomic + ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX1200-NEXT: $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr + ; + ; GFX1250-LABEL: name: flat_load_atomic + ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $flat_scr, implicit $vgpr2 { + ; GFX1250-NEXT: S_CLAUSE 1 + ; GFX1250-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX1250-NEXT: $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr + ; GFX1250-NEXT: } + $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr +... + +--- +name: global_load_atomic +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-LABEL: name: global_load_atomic + ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr4 = GLOBAL_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec + ; + ; GFX1250-LABEL: name: global_load_atomic + ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $vgpr2 { + ; GFX1250-NEXT: S_CLAUSE 1 + ; GFX1250-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr4 = GLOBAL_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec + ; GFX1250-NEXT: } + $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + $vgpr4 = GLOBAL_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec +... + +--- +name: flat_global_load +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1 + ; GFX12-LABEL: name: flat_global_load + ; GFX12: liveins: $vgpr0_vgpr1 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX12-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 0, implicit $exec, implicit $flat_scr + $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 0, implicit $exec, implicit $flat_scr +... + +--- +name: buffer_load_atomic +tracksRegLiveness: true +body: | + bb.0: + liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0 + ; GFX1200-LABEL: name: buffer_load_atomic + ; GFX1200: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr0 = BUFFER_ATOMIC_ADD_OFFSET_RTN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 4, 0, 0, implicit $exec + ; + ; GFX1250-LABEL: name: buffer_load_atomic + ; GFX1250: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit-def $vgpr0, implicit-def $vgpr0_lo16, implicit-def $vgpr0_hi16, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit $exec, implicit $vgpr0 { + ; GFX1250-NEXT: S_CLAUSE 1 + ; GFX1250-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr0 = BUFFER_ATOMIC_ADD_OFFSET_RTN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 4, 0, 0, implicit $exec + ; GFX1250-NEXT: } + $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec + $vgpr0 = BUFFER_ATOMIC_ADD_OFFSET_RTN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 4, 0, 0, implicit $exec +... + +--- +name: flat_load_store +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-LABEL: name: flat_load_store + ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX1200-NEXT: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr + ; + ; GFX1250-LABEL: name: flat_load_store + ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $flat_scr, implicit $vgpr2 { + ; GFX1250-NEXT: S_CLAUSE 1 + ; GFX1250-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX1250-NEXT: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr + ; GFX1250-NEXT: } + $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr +... + +--- +name: global_load_store +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-LABEL: name: global_load_store + ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + ; GFX1200-NEXT: GLOBAL_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec + ; + ; GFX1250-LABEL: name: global_load_store + ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $vgpr2 { + ; GFX1250-NEXT: S_CLAUSE 1 + ; GFX1250-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + ; GFX1250-NEXT: GLOBAL_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec + ; GFX1250-NEXT: } + $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + GLOBAL_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec +... + +--- +name: buffer_load_store +tracksRegLiveness: true +body: | + bb.0: + liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0 + ; GFX1200-LABEL: name: buffer_load_store + ; GFX1200: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec + ; GFX1200-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec + ; + ; GFX1250-LABEL: name: buffer_load_store + ; GFX1250: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit $exec, implicit $vgpr0 { + ; GFX1250-NEXT: S_CLAUSE 1 + ; GFX1250-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec + ; GFX1250-NEXT: } + $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec + BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec +... + +--- +name: flat_load_global_load +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX12-LABEL: name: flat_load_global_load + ; GFX12: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX12-NEXT: $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec +... + +--- +name: global_load_buffer_store +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4 + ; GFX12-LABEL: name: global_load_buffer_store + ; GFX12: liveins: $vgpr0_vgpr1, $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + ; GFX12-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec + $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + BUFFER_STORE_DWORD_OFFSET $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec +... --- name: flat_prefetch_flat_load @@ -31,3 +532,106 @@ body: | GLOBAL_PREFETCH_B8 $vgpr0_vgpr1, 0, 0, implicit $exec $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr ... + +--- +name: async_load_async_store +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2 + ; GFX12-LABEL: name: async_load_async_store + ; GFX12: liveins: $vgpr0, $vgpr1, $vgpr2 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: BUNDLE implicit-def $asynccnt, implicit $vgpr2, implicit $vgpr0_vgpr1, implicit $exec, implicit $asynccnt { + ; GFX12-NEXT: S_CLAUSE 1 + ; GFX12-NEXT: GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt + ; GFX12-NEXT: GLOBAL_STORE_ASYNC_FROM_LDS_B32 $vgpr0_vgpr1, $vgpr2, 32, 0, implicit-def $asynccnt, implicit $exec, implicit internal $asynccnt + ; GFX12-NEXT: } + GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt + GLOBAL_STORE_ASYNC_FROM_LDS_B32 $vgpr0_vgpr1, $vgpr2, 32, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt +... + +--- +name: async_load_ds_load_tr +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2 + ; GFX12-LABEL: name: async_load_ds_load_tr + ; GFX12: liveins: $vgpr0, $vgpr1, $vgpr2 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt + ; GFX12-NEXT: $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr2, 8, 0, implicit $exec + GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt + $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr2, 8, 0, implicit $exec +... + +--- +name: ds_load_trs_ds_load +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + ; GFX12-LABEL: name: ds_load_trs_ds_load + ; GFX12: liveins: $vgpr0 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: $vgpr4_vgpr5 = DS_LOAD_TR8_B64 $vgpr0, 0, 0, implicit $exec + ; GFX12-NEXT: $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr0, 8, 0, implicit $exec + ; GFX12-NEXT: $vgpr2_vgpr3 = DS_READ_B64_gfx9 $vgpr0, 16, 0, implicit $exec + $vgpr4_vgpr5 = DS_LOAD_TR8_B64 $vgpr0, 0, 0, implicit $exec + $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr0, 8, 0, implicit $exec + $vgpr2_vgpr3 = DS_READ_B64_gfx9 $vgpr0, 16, 0, implicit $exec +... + +# Make sure we do not clause DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 with anything +--- +name: ds_atomic_async_barrier_arrive_b64_ds_read +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; GFX12-LABEL: name: ds_atomic_async_barrier_arrive_b64_ds_read + ; GFX12: liveins: $vgpr0, $vgpr1 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: $vgpr2 = DS_READ_B32_gfx9 $vgpr0, 0, 0, implicit $exec + ; GFX12-NEXT: DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec + ; GFX12-NEXT: $vgpr3 = DS_READ_B32_gfx9 $vgpr0, 16, 0, implicit $exec + $vgpr2 = DS_READ_B32_gfx9 $vgpr0, 0, 0, implicit $exec + DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec + $vgpr3 = DS_READ_B32_gfx9 $vgpr0, 16, 0, implicit $exec +... + +--- +name: ds_atomic_async_barrier_arrive_b64_flat_load +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; GFX12-LABEL: name: ds_atomic_async_barrier_arrive_b64_flat_load + ; GFX12: liveins: $vgpr0, $vgpr1 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX12-NEXT: DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec + ; GFX12-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 16, 0, implicit $exec, implicit $flat_scr + $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec + $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 16, 0, implicit $exec, implicit $flat_scr +... + +--- +name: global_load_switching_scope +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1 + ; GFX12-LABEL: name: global_load_switching_scope + ; GFX12: liveins: $vgpr0_vgpr1 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: BUNDLE implicit-def $vgpr2, implicit-def $vgpr2_lo16, implicit-def $vgpr2_hi16, implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $flat_scr { + ; GFX12-NEXT: S_CLAUSE 1 + ; GFX12-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX12-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 24, implicit $exec, implicit $flat_scr + ; GFX12-NEXT: } + $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 24, implicit $exec, implicit $flat_scr +... diff --git a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir index c7767cb8..b53bde6 100644 --- a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir +++ b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir @@ -20,11 +20,32 @@ ret void } + define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_physreg_src2() #0 { + ret void + } + define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_src2_different_subreg() #0 { ret void } + define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first() #1 { + ret void + } + + define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second() #1 { + ret void + } + + define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first_physreg() #1 { + ret void + } + + define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second_physreg() #1 { + ret void + } + attributes #0 = { "amdgpu-wave-limiter"="true" "amdgpu-waves-per-eu"="8,8" } + attributes #1 = { "amdgpu-wave-limiter"="true" "amdgpu-waves-per-eu"="10,10" } ... # Inflate pattern, except the defining instruction isn't an MFMA. @@ -403,6 +424,89 @@ body: | ... +# Non-mac variant, src2 is a physical register +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_physreg_src2 +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_physreg_src2 + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + # Non-mac variant, src2 is the same VGPR, but a different subregister. --- name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_src2_different_subreg @@ -489,3 +593,423 @@ body: | S_ENDPGM 0 ... + +# There isn't an assignable AGPR around the first MFMA. +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2 + %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9 + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 or %4 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# There isn't an assignable AGPR around the second MFMA. +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2 + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec + S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9 + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 or %4 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# There isn't an assignable AGPR around the first MFMA, with physreg interference +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first_physreg +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first_physreg + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + ; CHECK-NEXT: S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + ; CHECK-NEXT: S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + ; CHECK-NEXT: S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + ; CHECK-NEXT: S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + ; CHECK-NEXT: S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + ; CHECK-NEXT: S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 or %4 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# There isn't an assignable AGPR around the second MFMA, physreg interference +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second_physreg +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second_physreg + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + ; CHECK-NEXT: S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + ; CHECK-NEXT: S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + ; CHECK-NEXT: S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + ; CHECK-NEXT: S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + ; CHECK-NEXT: S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + ; CHECK-NEXT: S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec + S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 or %4 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... diff --git a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir index b907c13..b59f2de 100644 --- a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir +++ b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir @@ -445,6 +445,86 @@ body: | ... + +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_two_chained_uses_cannot_rewrite_final_use +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_two_chained_uses_cannot_rewrite_final_use + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr2_vgpr3 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, killed $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, %0:vreg_512_align2 + S_ENDPGM 0 + +... + # There is a rewrite candidate, but it is used by another MFMA which # does not have a tied result. --- @@ -619,10 +699,9 @@ body: | S_ENDPGM 0 ... - -# There isn't an assignable AGPR around the first MFMA. +# Chain of 2 untied cases, but the use isn't in src2. --- -name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2 tracksRegLiveness: true machineFunctionInfo: isEntryFunction: true @@ -630,7 +709,7 @@ machineFunctionInfo: occupancy: 10 sgprForEXECCopy: '$sgpr100_sgpr101' body: | - ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2 ; CHECK: bb.0: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} @@ -647,10 +726,8 @@ body: | ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) - ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 - ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr4_vgpr5, $vgpr8_vgpr9, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc ; CHECK-NEXT: S_BRANCH %bb.2 ; CHECK-NEXT: {{ $}} @@ -685,10 +762,8 @@ body: | liveins: $vcc undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) - S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2 %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec - S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9 - %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %3.sub4_sub5, %3.sub8_sub9, undef %6:vreg_512_align2, 0, 0, 0, implicit $mode, implicit $exec S_CBRANCH_VCCNZ %bb.1, implicit $vcc S_BRANCH %bb.2 @@ -711,9 +786,10 @@ body: | ... -# There isn't an assignable AGPR around the second MFMA. +# Chain of 2 untied cases, but the second mfma is a different size and +# uses a subregister. --- -name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg tracksRegLiveness: true machineFunctionInfo: isEntryFunction: true @@ -721,7 +797,7 @@ machineFunctionInfo: occupancy: 10 sgprForEXECCopy: '$sgpr100_sgpr101' body: | - ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg ; CHECK: bb.0: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} @@ -739,18 +815,16 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 - ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3 = V_MFMA_F32_16X16X16F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc ; CHECK-NEXT: S_BRANCH %bb.2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2: - ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF + ; CHECK-NEXT: liveins: $vgpr0_vgpr1_vgpr2_vgpr3 ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 - ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 @@ -758,10 +832,7 @@ body: | ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec - ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) - ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) - ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) - ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) ; CHECK-NEXT: S_ENDPGM 0 bb.0: S_NOP 0, implicit-def $agpr0 @@ -777,9 +848,7 @@ body: | undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec - S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2 - %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec - S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9 + %4:vreg_128_align2 = V_MFMA_F32_16X16X16F16_vgprcd_e64 %1, %1, %3.sub2_sub3_sub4_sub5, 0, 0, 0, implicit $mode, implicit $exec S_CBRANCH_VCCNZ %bb.1, implicit $vcc S_BRANCH %bb.2 @@ -794,6 +863,229 @@ body: | S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %5, %4, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + S_ENDPGM 0 + +... + +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_local_split +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + bb.0: + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_local_split + ; CHECK: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X8F16_mac_e64 killed $vgpr0_vgpr1, $vgpr0_vgpr1, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr6_vgpr7_vgpr8_vgpr9, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# Performs a split and inflate around the single instruction +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + bb.0: + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split + ; CHECK: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr2_vgpr3 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X8F16_mac_e64 killed $vgpr2_vgpr3, $vgpr2_vgpr3, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr6_vgpr7_vgpr8_vgpr9, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# Performs a split and inflate around the single instruction, non-tied case +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + bb.0: + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split + ; CHECK: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $agpr0_agpr1 + ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr6_vgpr7_vgpr8_vgpr9, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) @@ -802,9 +1094,11 @@ body: | ... -# Chain of 2 untied cases, but the use isn't in src2. +# This case does not fully use %0 after the MFMA. As a result, +# SplitKits insert a copy bundle for the subset of used lanes instead +# of a simple copy. --- -name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2 +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split_partial_uses_only tracksRegLiveness: true machineFunctionInfo: isEntryFunction: true @@ -812,7 +1106,447 @@ machineFunctionInfo: occupancy: 10 sgprForEXECCopy: '$sgpr100_sgpr101' body: | - ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2 + bb.0: + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split_partial_uses_only + ; CHECK: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $agpr0_agpr1 + ; CHECK-NEXT: renamable $vgpr2_vgpr3 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 killed $vgpr2_vgpr3, $vgpr2_vgpr3, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY renamable $vgpr0_vgpr1_vgpr2_vgpr3 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = COPY renamable $agpr0_agpr1_agpr2_agpr3 + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + S_ENDPGM 0 + +... + +# Untied version of previous. This case does not fully use %4 after +# the MFMA. As a result, SplitKits insert a copy bundle for the subset +# of used lanes instead of a simple copy, +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split_partial_uses_only +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + bb.0: + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split_partial_uses_only + ; CHECK: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $agpr0_agpr1 + ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY renamable $vgpr2_vgpr3_vgpr4_vgpr5 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = COPY renamable $agpr0_agpr1_agpr2_agpr3 + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + ; No VGPRs available for %4 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + S_ENDPGM 0 + +... + +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr10 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: renamable $vgpr11 = COPY renamable $vgpr10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) + ; CHECK-NEXT: renamable $vgpr6_vgpr7_vgpr8_vgpr9 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) + ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) + ; CHECK-NEXT: renamable $vgpr14_vgpr15_vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_1024_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_1024_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + %0.sub0_sub1_sub2_sub3:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) + %0.sub4_sub5_sub6_sub7:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) + %0.sub8_sub9_sub10_sub11:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) + %0.sub12_sub13_sub14_sub15:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) + %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15:vreg_1024_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +--- +name: chained_mfma_dst_user_is_vgpr +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: chained_mfma_dst_user_is_vgpr + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + successors: %bb.1(0x80000000) + + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + successors: %bb.1(0x40000000), %bb.2(0x40000000) + liveins: $vcc + + undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec + early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, %4 + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# TODO: In this trivial case, the single copy required is cheaper than +# the tuple copy. +--- +name: chained_mfma_dst_user_is_vgpr_small_subreg +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: chained_mfma_dst_user_is_vgpr_small_subreg + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: dead renamable $vgpr0 = nofpexcept V_CVT_F16_F32_e32 killed $vgpr0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + successors: %bb.1(0x80000000) + + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + successors: %bb.1(0x40000000), %bb.2(0x40000000) + liveins: $vcc + + undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec + early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + %5:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 %4.sub0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# Transitive user of the register is an MFMA with non-register src2 +--- +name: chained_mfma_dst_user_has_imm_src2 +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: chained_mfma_dst_user_has_imm_src2 ; CHECK: bb.0: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} @@ -830,7 +1564,8 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr4_vgpr5, $vgpr8_vgpr9, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $vgpr20_vgpr21_vgpr22_vgpr23 = V_MFMA_F32_4X4X4F16_vgprcd_e64 $vgpr20_vgpr21, $vgpr18_vgpr19, 0, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc ; CHECK-NEXT: S_BRANCH %bb.2 ; CHECK-NEXT: {{ $}} @@ -853,6 +1588,8 @@ body: | ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: S_ENDPGM 0 bb.0: + successors: %bb.1(0x80000000) + S_NOP 0, implicit-def $agpr0 renamable $sgpr0 = S_MOV_B32 0 undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec @@ -862,16 +1599,104 @@ body: | %0.sub9:vreg_512_align2 = COPY %0.sub8 bb.1: + successors: %bb.1(0x40000000), %bb.2(0x40000000) liveins: $vcc - undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) - %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec - %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %3.sub4_sub5, %3.sub8_sub9, undef %6:vreg_512_align2, 0, 0, 0, implicit $mode, implicit $exec + undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec + early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + %4.sub0_sub1_sub2_sub3:vreg_512_align2 = V_MFMA_F32_4X4X4F16_vgprcd_e64 %4.sub0_sub1, %1, 0, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc S_BRANCH %bb.2 bb.2: - ; No VGPRs available for %0 or %4 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 @@ -889,10 +1714,8 @@ body: | ... -# Chain of 2 untied cases, but the second mfma is a different size and -# uses a subregister. --- -name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user_with_vgpr_use tracksRegLiveness: true machineFunctionInfo: isEntryFunction: true @@ -900,7 +1723,7 @@ machineFunctionInfo: occupancy: 10 sgprForEXECCopy: '$sgpr100_sgpr101' body: | - ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user_with_vgpr_use ; CHECK: bb.0: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} @@ -908,26 +1731,27 @@ body: | ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 - ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1 ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19 + ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) - ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3 = V_MFMA_F32_16X16X16F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc ; CHECK-NEXT: S_BRANCH %bb.2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2: - ; CHECK-NEXT: liveins: $vgpr0_vgpr1_vgpr2_vgpr3 + ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 @@ -935,7 +1759,10 @@ body: | ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec - ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: S_ENDPGM 0 bb.0: S_NOP 0, implicit-def $agpr0 @@ -949,14 +1776,15 @@ body: | bb.1: liveins: $vcc - undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) - %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec - %4:vreg_128_align2 = V_MFMA_F32_16X16X16F16_vgprcd_e64 %1, %1, %3.sub2_sub3_sub4_sub5, 0, 0, 0, implicit $mode, implicit $exec + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, %4 S_CBRANCH_VCCNZ %bb.1, implicit $vcc S_BRANCH %bb.2 bb.2: - ; No VGPRs available for %0 or %4 + ; No VGPRs available for %0 S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 @@ -966,13 +1794,16 @@ body: | S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec - GLOBAL_STORE_DWORDX4_SADDR %5, %4, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) S_ENDPGM 0 ... --- -name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_has_tied_user tracksRegLiveness: true machineFunctionInfo: isEntryFunction: true @@ -980,32 +1811,115 @@ machineFunctionInfo: occupancy: 10 sgprForEXECCopy: '$sgpr100_sgpr101' body: | - ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_has_tied_user ; CHECK: bb.0: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 - ; CHECK-NEXT: renamable $vgpr10 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr2_vgpr3 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr2_vgpr3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: early-clobber renamable $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr2_vgpr3, $vgpr2_vgpr3, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr2_vgpr3, $vgpr2_vgpr3, killed $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %4, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# Non-mac variant, src2 is an immediate. +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_imm_src2 +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_imm_src2 + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc - ; CHECK-NEXT: renamable $vgpr11 = COPY renamable $vgpr10 + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) ; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) - ; CHECK-NEXT: renamable $vgpr6_vgpr7_vgpr8_vgpr9 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) - ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) - ; CHECK-NEXT: renamable $vgpr14_vgpr15_vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) - ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, 0, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc ; CHECK-NEXT: S_BRANCH %bb.2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2: - ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF + ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17:0x00000000FFFFFFFF ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 @@ -1025,20 +1939,16 @@ body: | bb.0: S_NOP 0, implicit-def $agpr0 renamable $sgpr0 = S_MOV_B32 0 - undef %0.sub8:vreg_1024_align2 = V_MOV_B32_e32 0, implicit $exec + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec renamable $sgpr1 = COPY renamable $sgpr0 %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc - %0.sub9:vreg_1024_align2 = COPY %0.sub8 + %0.sub9:vreg_512_align2 = COPY %0.sub8 bb.1: liveins: $vcc - %0.sub0_sub1_sub2_sub3:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) - %0.sub4_sub5_sub6_sub7:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) - %0.sub8_sub9_sub10_sub11:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) - %0.sub12_sub13_sub14_sub15:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) - %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15:vreg_1024_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15, 0, 0, 0, implicit $mode, implicit $exec + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, 0, 0, 0, 0, implicit $mode, implicit $exec S_CBRANCH_VCCNZ %bb.1, implicit $vcc S_BRANCH %bb.2 diff --git a/llvm/test/CodeGen/AMDGPU/integer-canonicalizing-src-modifiers.ll b/llvm/test/CodeGen/AMDGPU/integer-canonicalizing-src-modifiers.ll new file mode 100644 index 0000000..7b356d2 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/integer-canonicalizing-src-modifiers.ll @@ -0,0 +1,237 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 < %s | FileCheck -check-prefixes=GCN,GFX7 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s + +; Demonstrate that the conversion of bitmasks affecting the sign bit on integers to srcmods +; does not apply to canonicalizing instructions. + +define double @v_uitofp_i32_to_f64_abs(i32 %arg0) nounwind { +; GCN-LABEL: v_uitofp_i32_to_f64_abs: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0 +; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], v0 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_uitofp_i32_to_f64_abs: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], v0 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %arg0.abs = and i32 %arg0, u0x7fffffff + %cvt = uitofp i32 %arg0.abs to double + ret double %cvt +} + +define double @v_uitofp_i32_to_f64_neg(i32 %arg0) nounwind { +; GCN-LABEL: v_uitofp_i32_to_f64_neg: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_and_b32_e32 v0, 0x80000000, v0 +; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], v0 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_uitofp_i32_to_f64_neg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_and_b32_e32 v0, 0x80000000, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], v0 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %arg0.neg = and i32 %arg0, u0x80000000 + %cvt = uitofp i32 %arg0.neg to double + ret double %cvt +} + +define double @s_uitofp_i32_to_f64_abs(i32 inreg %arg0) nounwind { +; GCN-LABEL: s_uitofp_i32_to_f64_abs: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_bitset0_b32 s16, 31 +; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], s16 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_uitofp_i32_to_f64_abs: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_bitset0_b32 s0, 31 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], s0 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %arg0.abs = and i32 %arg0, u0x7fffffff + %cvt = uitofp i32 %arg0.abs to double + ret double %cvt +} + +define double @s_uitofp_i32_to_f64_neg(i32 inreg %arg0) nounwind { +; GCN-LABEL: s_uitofp_i32_to_f64_neg: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_and_b32 s4, s16, 0x80000000 +; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], s4 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_uitofp_i32_to_f64_neg: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_and_b32 s0, s0, 0x80000000 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], s0 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %arg0.neg = and i32 %arg0, u0x80000000 + %cvt = uitofp i32 %arg0.neg to double + ret double %cvt +} + +define half @v_uitofp_i16_to_f16_abs(i16 %arg0) nounwind { +; GFX7-LABEL: v_uitofp_i16_to_f16_abs: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v0, 0x7fff, v0 +; GFX7-NEXT: v_cvt_f32_u32_e32 v0, v0 +; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_uitofp_i16_to_f16_abs: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_and_b32_e32 v0, 0x7fff, v0 +; GFX9-NEXT: v_cvt_f16_u16_e32 v0, v0 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: v_uitofp_i16_to_f16_abs: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0x7fff, v0.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_cvt_f16_u16_e32 v0.l, v0.l +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: v_uitofp_i16_to_f16_abs: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x7fff, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cvt_f16_u16_e32 v0, v0 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %arg0.abs = and i16 %arg0, u0x7fff + %cvt = uitofp i16 %arg0.abs to half + ret half %cvt +} + +define half @v_uitofp_i16_to_f16_neg(i16 %arg0) nounwind { +; GFX7-LABEL: v_uitofp_i16_to_f16_neg: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v0, 0x8000, v0 +; GFX7-NEXT: v_cvt_f32_u32_e32 v0, v0 +; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_uitofp_i16_to_f16_neg: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff8000, v0 +; GFX9-NEXT: v_cvt_f16_u16_e32 v0, v0 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: v_uitofp_i16_to_f16_neg: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_and_b16 v0.l, 0x8000, v0.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_cvt_f16_u16_e32 v0.l, v0.l +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: v_uitofp_i16_to_f16_neg: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff8000, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cvt_f16_u16_e32 v0, v0 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %arg0.neg = and i16 %arg0, u0x8000 + %cvt = uitofp i16 %arg0.neg to half + ret half %cvt +} + +define half @s_uitofp_i16_to_f16_abs(i16 inreg %arg0) nounwind { +; GFX7-LABEL: s_uitofp_i16_to_f16_abs: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_and_b32 s4, s16, 0x7fff +; GFX7-NEXT: v_cvt_f32_u32_e32 v0, s4 +; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_uitofp_i16_to_f16_abs: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_and_b32 s4, s16, 0x7fff +; GFX9-NEXT: v_cvt_f16_u16_e32 v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: s_uitofp_i16_to_f16_abs: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-TRUE16-NEXT: v_cvt_f16_u16_e32 v0.l, s0 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: s_uitofp_i16_to_f16_abs: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0x7fff +; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-FAKE16-NEXT: v_cvt_f16_u16_e32 v0, s0 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %arg0.abs = and i16 %arg0, u0x7fff + %cvt = uitofp i16 %arg0.abs to half + ret half %cvt +} + +define half @s_uitofp_i16_to_f16_neg(i16 inreg %arg0) nounwind { +; GFX7-LABEL: s_uitofp_i16_to_f16_neg: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_and_b32 s4, s16, 0x8000 +; GFX7-NEXT: v_cvt_f32_u32_e32 v0, s4 +; GFX7-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v0 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_uitofp_i16_to_f16_neg: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_and_b32 s4, s16, 0x8000 +; GFX9-NEXT: v_cvt_f16_u16_e32 v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: s_uitofp_i16_to_f16_neg: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: s_and_b32 s0, s0, 0x8000 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-TRUE16-NEXT: v_cvt_f16_u16_e32 v0.l, s0 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: s_uitofp_i16_to_f16_neg: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: s_and_b32 s0, s0, 0x8000 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-FAKE16-NEXT: v_cvt_f16_u16_e32 v0, s0 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %arg0.neg = and i16 %arg0, u0x8000 + %cvt = uitofp i16 %arg0.neg to half + ret half %cvt +} + diff --git a/llvm/test/CodeGen/AMDGPU/integer-select-src-modifiers.ll b/llvm/test/CodeGen/AMDGPU/integer-select-src-modifiers.ll new file mode 100644 index 0000000..b3c7ac8 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/integer-select-src-modifiers.ll @@ -0,0 +1,1011 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 < %s | FileCheck -check-prefixes=GCN,GFX7 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s + +define i32 @fneg_select_i32_1(i32 %cond, i32 %a, i32 %b) { +; GCN-LABEL: fneg_select_i32_1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, v2, -v1, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_select_i32_1: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, -v1, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i32 %a, u0x80000000 + %cmp = icmp eq i32 %cond, zeroinitializer + %select = select i1 %cmp, i32 %neg.a, i32 %b + ret i32 %select +} + +define i32 @fneg_select_i32_2(i32 %cond, i32 %a, i32 %b) { +; GCN-LABEL: fneg_select_i32_2: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, -v1, v2, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_select_i32_2: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, -v1, v2, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i32 %a, u0x80000000 + %cmp = icmp eq i32 %cond, zeroinitializer + %select = select i1 %cmp, i32 %b, i32 %neg.a + ret i32 %select +} + +define i32 @fneg_select_i32_both(i32 %cond, i32 %a, i32 %b) { +; GCN-LABEL: fneg_select_i32_both: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, -v2, -v1, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_select_i32_both: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, -v2, -v1, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i32 %a, u0x80000000 + %neg.b = xor i32 %b, u0x80000000 + %cmp = icmp eq i32 %cond, zeroinitializer + %select = select i1 %cmp, i32 %neg.a, i32 %neg.b + ret i32 %select +} + +define i32 @fneg_1_fabs_2_select_i32(i32 %cond, i32 %a, i32 %b) { +; GCN-LABEL: fneg_1_fabs_2_select_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, |v1|, -v1, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_1_fabs_2_select_i32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, |v1|, -v1, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i32 %a, u0x80000000 + %abs.b = and i32 %a, u0x7fffffff + %cmp = icmp eq i32 %cond, zeroinitializer + %select = select i1 %cmp, i32 %neg.a, i32 %abs.b + ret i32 %select +} + +define i32 @s_fneg_select_i32_1(i32 inreg %cond, i32 inreg %a, i32 inreg %b) { +; GCN-LABEL: s_fneg_select_i32_1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s4, s17, 0x80000000 +; GCN-NEXT: s_cmp_eq_u32 s16, 0 +; GCN-NEXT: s_cselect_b32 s4, s4, s18 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_fneg_select_i32_1: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_xor_b32 s1, s1, 0x80000000 +; GFX11-NEXT: s_cmp_eq_u32 s0, 0 +; GFX11-NEXT: s_cselect_b32 s0, s1, s2 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_mov_b32_e32 v0, s0 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i32 %a, u0x80000000 + %cmp = icmp eq i32 %cond, zeroinitializer + %select = select i1 %cmp, i32 %neg.a, i32 %b + ret i32 %select +} + +define i32 @s_fneg_1_fabs_2_select_i32(i32 inreg %cond, i32 %a, i32 %b) { +; GCN-LABEL: s_fneg_1_fabs_2_select_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_cmp_eq_u32 s16, 0 +; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GCN-NEXT: v_cndmask_b32_e64 v0, |v0|, -v0, s[4:5] +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_fneg_1_fabs_2_select_i32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_cmp_eq_u32 s0, 0 +; GFX11-NEXT: s_cselect_b32 s0, -1, 0 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_cndmask_b32_e64 v0, |v0|, -v0, s0 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i32 %a, u0x80000000 + %abs.b = and i32 %a, u0x7fffffff + %cmp = icmp eq i32 %cond, zeroinitializer + %select = select i1 %cmp, i32 %neg.a, i32 %abs.b + ret i32 %select +} + +define <2 x i32> @fneg_select_v2i32_1(<2 x i32> %cond, <2 x i32> %a, <2 x i32> %b) { +; GCN-LABEL: fneg_select_v2i32_1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, v4, -v2, vcc +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GCN-NEXT: v_cndmask_b32_e64 v1, v5, -v3, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_select_v2i32_1: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, -v2, vcc_lo +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -v3, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor <2 x i32> %a, splat (i32 u0x80000000) + %cmp = icmp eq <2 x i32> %cond, zeroinitializer + %select = select <2 x i1> %cmp, <2 x i32> %neg.a, <2 x i32> %b + ret <2 x i32> %select +} + +define <2 x i32> @fneg_select_v2i32_2(<2 x i32> %cond, <2 x i32> %a, <2 x i32> %b) { +; GCN-LABEL: fneg_select_v2i32_2: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, -v2, v4, vcc +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GCN-NEXT: v_cndmask_b32_e64 v1, -v3, v5, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_select_v2i32_2: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, -v2, v4, vcc_lo +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-NEXT: v_cndmask_b32_e64 v1, -v3, v5, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor <2 x i32> %a, splat (i32 u0x80000000) + %cmp = icmp eq <2 x i32> %cond, zeroinitializer + %select = select <2 x i1> %cmp, <2 x i32> %b, <2 x i32> %neg.a + ret <2 x i32> %select +} + +define i32 @fabs_select_i32_1(i32 %cond, i32 %a, i32 %b) { +; GCN-LABEL: fabs_select_i32_1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, v2, |v1|, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fabs_select_i32_1: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, |v1|, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = and i32 %a, u0x7fffffff + %cmp = icmp eq i32 %cond, zeroinitializer + %select = select i1 %cmp, i32 %neg.a, i32 %b + ret i32 %select +} + +define i32 @fabs_select_i32_2(i32 %cond, i32 %a, i32 %b) { +; GCN-LABEL: fabs_select_i32_2: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, |v1|, v2, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fabs_select_i32_2: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, |v1|, v2, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = and i32 %a, u0x7fffffff + %cmp = icmp eq i32 %cond, zeroinitializer + %select = select i1 %cmp, i32 %b, i32 %neg.a + ret i32 %select +} + +define <2 x i32> @fneg_1_fabs_2_select_v2i32(<2 x i32> %cond, <2 x i32> %a, <2 x i32> %b) { +; GCN-LABEL: fneg_1_fabs_2_select_v2i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, -v2, |v2|, vcc +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GCN-NEXT: v_cndmask_b32_e64 v1, -v3, |v3|, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_1_fabs_2_select_v2i32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, -v2, |v2|, vcc_lo +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-NEXT: v_cndmask_b32_e64 v1, -v3, |v3|, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor <2 x i32> %a, splat (i32 u0x80000000) + %abs.b = and <2 x i32> %a, splat (i32 u0x7fffffff) + %cmp = icmp eq <2 x i32> %cond, zeroinitializer + %select = select <2 x i1> %cmp, <2 x i32> %abs.b, <2 x i32> %neg.a + ret <2 x i32> %select +} + +define i32 @fneg_fabs_select_i32_1(i32 %cond, i32 %a, i32 %b) { +; GCN-LABEL: fneg_fabs_select_i32_1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, v2, -|v1|, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_fabs_select_i32_1: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, -|v1|, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = or i32 %a, u0x80000000 + %cmp = icmp eq i32 %cond, zeroinitializer + %select = select i1 %cmp, i32 %neg.a, i32 %b + ret i32 %select +} + +define i32 @fneg_fabs_select_i32_2(i32 %cond, i32 %a, i32 %b) { +; GCN-LABEL: fneg_fabs_select_i32_2: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, -|v1|, v2, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_fabs_select_i32_2: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, -|v1|, v2, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = or i32 %a, u0x80000000 + %cmp = icmp eq i32 %cond, zeroinitializer + %select = select i1 %cmp, i32 %b, i32 %neg.a + ret i32 %select +} + +define <2 x i32> @fneg_fabs_select_v2i32_1(<2 x i32> %cond, <2 x i32> %a, <2 x i32> %b) { +; GCN-LABEL: fneg_fabs_select_v2i32_1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, v4, -|v2|, vcc +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GCN-NEXT: v_cndmask_b32_e64 v1, v5, -|v3|, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_fabs_select_v2i32_1: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, -|v2|, vcc_lo +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -|v3|, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = or <2 x i32> %a, splat (i32 u0x80000000) + %cmp = icmp eq <2 x i32> %cond, zeroinitializer + %select = select <2 x i1> %cmp, <2 x i32> %neg.a, <2 x i32> %b + ret <2 x i32> %select +} + +define <2 x i32> @fneg_fabs_select_v2i32_2(<2 x i32> %cond, <2 x i32> %a, <2 x i32> %b) { +; GCN-LABEL: fneg_fabs_select_v2i32_2: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, -|v2|, v4, vcc +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GCN-NEXT: v_cndmask_b32_e64 v1, -|v3|, v5, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_fabs_select_v2i32_2: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, -|v2|, v4, vcc_lo +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-NEXT: v_cndmask_b32_e64 v1, -|v3|, v5, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = or <2 x i32> %a, splat (i32 u0x80000000) + %cmp = icmp eq <2 x i32> %cond, zeroinitializer + %select = select <2 x i1> %cmp, <2 x i32> %b, <2 x i32> %neg.a + ret <2 x i32> %select +} + + +define <2 x i32> @s_fneg_select_v2i32_1(<2 x i32> inreg %cond, <2 x i32> inreg %a, <2 x i32> inreg %b) { +; GCN-LABEL: s_fneg_select_v2i32_1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_xor_b32 s4, s19, 0x80000000 +; GCN-NEXT: s_xor_b32 s5, s18, 0x80000000 +; GCN-NEXT: s_cmp_eq_u32 s16, 0 +; GCN-NEXT: s_cselect_b32 s5, s5, s20 +; GCN-NEXT: s_cmp_eq_u32 s17, 0 +; GCN-NEXT: s_cselect_b32 s4, s4, s21 +; GCN-NEXT: v_mov_b32_e32 v0, s5 +; GCN-NEXT: v_mov_b32_e32 v1, s4 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_fneg_select_v2i32_1: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_xor_b32 s3, s3, 0x80000000 +; GFX11-NEXT: s_xor_b32 s2, s2, 0x80000000 +; GFX11-NEXT: s_cmp_eq_u32 s0, 0 +; GFX11-NEXT: s_cselect_b32 s0, s2, s16 +; GFX11-NEXT: s_cmp_eq_u32 s1, 0 +; GFX11-NEXT: s_cselect_b32 s1, s3, s17 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor <2 x i32> %a, splat (i32 u0x80000000) + %cmp = icmp eq <2 x i32> %cond, zeroinitializer + %select = select <2 x i1> %cmp, <2 x i32> %neg.a, <2 x i32> %b + ret <2 x i32> %select +} + +define <2 x i32> @s_fneg_fabs_select_v2i32_2(<2 x i32> inreg %cond, <2 x i32> inreg %a, <2 x i32> inreg %b) { +; GCN-LABEL: s_fneg_fabs_select_v2i32_2: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_bitset1_b32 s19, 31 +; GCN-NEXT: s_bitset1_b32 s18, 31 +; GCN-NEXT: s_cmp_eq_u32 s16, 0 +; GCN-NEXT: s_cselect_b32 s4, s20, s18 +; GCN-NEXT: s_cmp_eq_u32 s17, 0 +; GCN-NEXT: s_cselect_b32 s5, s21, s19 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: v_mov_b32_e32 v1, s5 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_fneg_fabs_select_v2i32_2: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_bitset1_b32 s3, 31 +; GFX11-NEXT: s_bitset1_b32 s2, 31 +; GFX11-NEXT: s_cmp_eq_u32 s0, 0 +; GFX11-NEXT: s_cselect_b32 s0, s16, s2 +; GFX11-NEXT: s_cmp_eq_u32 s1, 0 +; GFX11-NEXT: s_cselect_b32 s1, s17, s3 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = or <2 x i32> %a, splat (i32 u0x80000000) + %cmp = icmp eq <2 x i32> %cond, zeroinitializer + %select = select <2 x i1> %cmp, <2 x i32> %b, <2 x i32> %neg.a + ret <2 x i32> %select +} + +define i64 @fneg_select_i64_1(i64 %cond, i64 %a, i64 %b) { +; GCN-LABEL: fneg_select_i64_1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] +; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; GCN-NEXT: v_cndmask_b32_e64 v1, v5, -v3, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_select_i64_1: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] +; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -v3, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i64 %a, u0x8000000000000000 + %cmp = icmp eq i64 %cond, zeroinitializer + %select = select i1 %cmp, i64 %neg.a, i64 %b + ret i64 %select +} + +define i64 @fneg_select_i64_2(i64 %cond, i64 %a, i64 %b) { +; GCN-LABEL: fneg_select_i64_2: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] +; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc +; GCN-NEXT: v_cndmask_b32_e64 v1, -v3, v5, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_select_i64_2: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] +; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, -v3, v5, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i64 %a, u0x8000000000000000 + %cmp = icmp eq i64 %cond, zeroinitializer + %select = select i1 %cmp, i64 %b, i64 %neg.a + ret i64 %select +} + +define i64 @fneg_1_fabs_2_select_i64(i64 %cond, i64 %a, i64 %b) { +; GCN-LABEL: fneg_1_fabs_2_select_i64: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] +; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; GCN-NEXT: v_cndmask_b32_e64 v1, |v5|, -v3, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_1_fabs_2_select_i64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] +; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, |v5|, -v3, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i64 %a, u0x8000000000000000 + %abs.b = and i64 %b, u0x7fffffffffffffff + %cmp = icmp eq i64 %cond, zeroinitializer + %select = select i1 %cmp, i64 %neg.a, i64 %abs.b + ret i64 %select +} + +define i64 @fabs_select_i64_1(i64 %cond, i64 %a, i64 %b) { +; GCN-LABEL: fabs_select_i64_1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] +; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; GCN-NEXT: v_cndmask_b32_e64 v1, v5, |v3|, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fabs_select_i64_1: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] +; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, |v3|, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = and i64 %a, u0x7fffffffffffffff + %cmp = icmp eq i64 %cond, zeroinitializer + %select = select i1 %cmp, i64 %neg.a, i64 %b + ret i64 %select +} + +define i64 @fabs_select_i64_2(i64 %cond, i64 %a, i64 %b) { +; GCN-LABEL: fabs_select_i64_2: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] +; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc +; GCN-NEXT: v_cndmask_b32_e64 v1, |v3|, v5, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fabs_select_i64_2: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] +; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, |v3|, v5, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = and i64 %a, u0x7fffffffffffffff + %cmp = icmp eq i64 %cond, zeroinitializer + %select = select i1 %cmp, i64 %b, i64 %neg.a + ret i64 %select +} + +define i64 @fneg_fabs_select_i64_1(i64 %cond, i64 %a, i64 %b) { +; GCN-LABEL: fneg_fabs_select_i64_1: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] +; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; GCN-NEXT: v_cndmask_b32_e64 v1, v5, -|v3|, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_fabs_select_i64_1: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] +; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -|v3|, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = or i64 %a, u0x8000000000000000 + %cmp = icmp eq i64 %cond, zeroinitializer + %select = select i1 %cmp, i64 %neg.a, i64 %b + ret i64 %select +} + +define i64 @fneg_fabs_select_i64_2(i64 %cond, i64 %a, i64 %b) { +; GCN-LABEL: fneg_fabs_select_i64_2: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] +; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc +; GCN-NEXT: v_cndmask_b32_e64 v1, -|v3|, v5, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: fneg_fabs_select_i64_2: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] +; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, -|v3|, v5, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = or i64 %a, u0x8000000000000000 + %cmp = icmp eq i64 %cond, zeroinitializer + %select = select i1 %cmp, i64 %b, i64 %neg.a + ret i64 %select +} + +define i64 @s_fneg_select_i64_1(i64 inreg %cond, i64 inreg %a, i64 inreg %b) { +; GFX7-LABEL: s_fneg_select_i64_1: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0 +; GFX7-NEXT: s_xor_b32 s6, s19, 0x80000000 +; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec +; GFX7-NEXT: s_cselect_b32 s4, s18, s20 +; GFX7-NEXT: s_cselect_b32 s5, s6, s21 +; GFX7-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-NEXT: v_mov_b32_e32 v1, s5 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_fneg_select_i64_1: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_xor_b32 s4, s19, 0x80000000 +; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0 +; GFX9-NEXT: s_cselect_b32 s5, s18, s20 +; GFX9-NEXT: s_cselect_b32 s4, s4, s21 +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_fneg_select_i64_1: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_xor_b32 s3, s3, 0x80000000 +; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0 +; GFX11-NEXT: s_cselect_b32 s0, s2, s16 +; GFX11-NEXT: s_cselect_b32 s1, s3, s17 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i64 %a, u0x8000000000000000 + %cmp = icmp eq i64 %cond, zeroinitializer + %select = select i1 %cmp, i64 %neg.a, i64 %b + ret i64 %select +} + +define i64 @s_fneg_select_i64_2(i64 inreg %cond, i64 inreg %a, i64 inreg %b) { +; GFX7-LABEL: s_fneg_select_i64_2: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0 +; GFX7-NEXT: s_xor_b32 s6, s19, 0x80000000 +; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec +; GFX7-NEXT: s_cselect_b32 s4, s20, s18 +; GFX7-NEXT: s_cselect_b32 s5, s21, s6 +; GFX7-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-NEXT: v_mov_b32_e32 v1, s5 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_fneg_select_i64_2: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_xor_b32 s4, s19, 0x80000000 +; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0 +; GFX9-NEXT: s_cselect_b32 s5, s20, s18 +; GFX9-NEXT: s_cselect_b32 s4, s21, s4 +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_fneg_select_i64_2: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_xor_b32 s3, s3, 0x80000000 +; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0 +; GFX11-NEXT: s_cselect_b32 s0, s16, s2 +; GFX11-NEXT: s_cselect_b32 s1, s17, s3 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i64 %a, u0x8000000000000000 + %cmp = icmp eq i64 %cond, zeroinitializer + %select = select i1 %cmp, i64 %b, i64 %neg.a + ret i64 %select +} + +define i64 @s_fneg_1_fabs_2_select_i64(i64 inreg %cond, i64 inreg %a, i64 inreg %b) { +; GFX7-LABEL: s_fneg_1_fabs_2_select_i64: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0 +; GFX7-NEXT: s_xor_b32 s6, s19, 0x80000000 +; GFX7-NEXT: s_bitset0_b32 s21, 31 +; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec +; GFX7-NEXT: s_cselect_b32 s4, s18, s20 +; GFX7-NEXT: s_cselect_b32 s5, s6, s21 +; GFX7-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-NEXT: v_mov_b32_e32 v1, s5 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_fneg_1_fabs_2_select_i64: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_xor_b32 s4, s19, 0x80000000 +; GFX9-NEXT: s_bitset0_b32 s21, 31 +; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0 +; GFX9-NEXT: s_cselect_b32 s5, s18, s20 +; GFX9-NEXT: s_cselect_b32 s4, s4, s21 +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_fneg_1_fabs_2_select_i64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_xor_b32 s3, s3, 0x80000000 +; GFX11-NEXT: s_bitset0_b32 s17, 31 +; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0 +; GFX11-NEXT: s_cselect_b32 s0, s2, s16 +; GFX11-NEXT: s_cselect_b32 s1, s3, s17 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i64 %a, u0x8000000000000000 + %abs.b = and i64 %b, u0x7fffffffffffffff + %cmp = icmp eq i64 %cond, zeroinitializer + %select = select i1 %cmp, i64 %neg.a, i64 %abs.b + ret i64 %select +} + +define i64 @s_fabs_select_i64_1(i64 inreg %cond, i64 inreg %a, i64 inreg %b) { +; GFX7-LABEL: s_fabs_select_i64_1: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0 +; GFX7-NEXT: s_bitset0_b32 s19, 31 +; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec +; GFX7-NEXT: s_cselect_b32 s4, s18, s20 +; GFX7-NEXT: s_cselect_b32 s5, s19, s21 +; GFX7-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-NEXT: v_mov_b32_e32 v1, s5 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_fabs_select_i64_1: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_bitset0_b32 s19, 31 +; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0 +; GFX9-NEXT: s_cselect_b32 s4, s18, s20 +; GFX9-NEXT: s_cselect_b32 s5, s19, s21 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_fabs_select_i64_1: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_bitset0_b32 s3, 31 +; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0 +; GFX11-NEXT: s_cselect_b32 s0, s2, s16 +; GFX11-NEXT: s_cselect_b32 s1, s3, s17 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = and i64 %a, u0x7fffffffffffffff + %cmp = icmp eq i64 %cond, zeroinitializer + %select = select i1 %cmp, i64 %neg.a, i64 %b + ret i64 %select +} + +define i64 @s_fabs_select_i64_2(i64 inreg %cond, i64 inreg %a, i64 inreg %b) { +; GFX7-LABEL: s_fabs_select_i64_2: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0 +; GFX7-NEXT: s_bitset0_b32 s19, 31 +; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec +; GFX7-NEXT: s_cselect_b32 s4, s20, s18 +; GFX7-NEXT: s_cselect_b32 s5, s21, s19 +; GFX7-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-NEXT: v_mov_b32_e32 v1, s5 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_fabs_select_i64_2: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_bitset0_b32 s19, 31 +; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0 +; GFX9-NEXT: s_cselect_b32 s4, s20, s18 +; GFX9-NEXT: s_cselect_b32 s5, s21, s19 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_fabs_select_i64_2: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_bitset0_b32 s3, 31 +; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0 +; GFX11-NEXT: s_cselect_b32 s0, s16, s2 +; GFX11-NEXT: s_cselect_b32 s1, s17, s3 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = and i64 %a, u0x7fffffffffffffff + %cmp = icmp eq i64 %cond, zeroinitializer + %select = select i1 %cmp, i64 %b, i64 %neg.a + ret i64 %select +} + +define i64 @s_fneg_fabs_select_i64_1(i64 inreg %cond, i64 inreg %a, i64 inreg %b) { +; GFX7-LABEL: s_fneg_fabs_select_i64_1: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0 +; GFX7-NEXT: s_bitset1_b32 s19, 31 +; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec +; GFX7-NEXT: s_cselect_b32 s4, s18, s20 +; GFX7-NEXT: s_cselect_b32 s5, s19, s21 +; GFX7-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-NEXT: v_mov_b32_e32 v1, s5 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_fneg_fabs_select_i64_1: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_bitset1_b32 s19, 31 +; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0 +; GFX9-NEXT: s_cselect_b32 s4, s18, s20 +; GFX9-NEXT: s_cselect_b32 s5, s19, s21 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_fneg_fabs_select_i64_1: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_bitset1_b32 s3, 31 +; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0 +; GFX11-NEXT: s_cselect_b32 s0, s2, s16 +; GFX11-NEXT: s_cselect_b32 s1, s3, s17 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = or i64 %a, u0x8000000000000000 + %cmp = icmp eq i64 %cond, zeroinitializer + %select = select i1 %cmp, i64 %neg.a, i64 %b + ret i64 %select +} + +define i64 @s_fneg_fabs_select_i64_2(i64 inreg %cond, i64 inreg %a, i64 inreg %b) { +; GFX7-LABEL: s_fneg_fabs_select_i64_2: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0 +; GFX7-NEXT: s_bitset1_b32 s19, 31 +; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec +; GFX7-NEXT: s_cselect_b32 s4, s20, s18 +; GFX7-NEXT: s_cselect_b32 s5, s21, s19 +; GFX7-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-NEXT: v_mov_b32_e32 v1, s5 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_fneg_fabs_select_i64_2: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_bitset1_b32 s19, 31 +; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0 +; GFX9-NEXT: s_cselect_b32 s4, s20, s18 +; GFX9-NEXT: s_cselect_b32 s5, s21, s19 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_fneg_fabs_select_i64_2: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_bitset1_b32 s3, 31 +; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0 +; GFX11-NEXT: s_cselect_b32 s0, s16, s2 +; GFX11-NEXT: s_cselect_b32 s1, s17, s3 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %neg.a = or i64 %a, u0x8000000000000000 + %cmp = icmp eq i64 %cond, zeroinitializer + %select = select i1 %cmp, i64 %b, i64 %neg.a + ret i64 %select +} + +define i16 @fneg_select_i16_1(i16 %cond, i16 %a, i16 %b) { +; GFX7-LABEL: fneg_select_i16_1: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX7-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: fneg_select_i16_1: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1 +; GFX9-NEXT: v_cmp_eq_u16_e32 vcc, 0, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: fneg_select_i16_1: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_xor_b16 v0.h, 0x8000, v1.l +; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: fneg_select_i16_1: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1 +; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc_lo +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i16 %a, u0x8000 + %cmp = icmp eq i16 %cond, zeroinitializer + %select = select i1 %cmp, i16 %neg.a, i16 %b + ret i16 %select +} + +define i16 @fneg_select_i16_2(i16 %cond, i16 %a, i16 %b) { +; GFX7-LABEL: fneg_select_i16_2: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX7-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: fneg_select_i16_2: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1 +; GFX9-NEXT: v_cmp_eq_u16_e32 vcc, 0, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: fneg_select_i16_2: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_xor_b16 v0.h, 0x8000, v1.l +; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v0.h, v2.l, vcc_lo +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: fneg_select_i16_2: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1 +; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i16 %a, u0x8000 + %cmp = icmp eq i16 %cond, zeroinitializer + %select = select i1 %cmp, i16 %b, i16 %neg.a + ret i16 %select +} + +define i16 @fneg_select_i16_both(i16 %cond, i16 %a, i16 %b) { +; GFX7-LABEL: fneg_select_i16_both: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX7-NEXT: v_xor_b32_e32 v0, 0xffff8000, v0 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: fneg_select_i16_both: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_eq_u16_e32 vcc, 0, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GFX9-NEXT: v_xor_b32_e32 v0, 0xffff8000, v0 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: fneg_select_i16_both: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v1.l, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_xor_b16 v0.l, 0x8000, v0.l +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: fneg_select_i16_both: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_xor_b32_e32 v0, 0xffff8000, v0 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i16 %a, u0x8000 + %neg.b = xor i16 %b, u0x8000 + %cmp = icmp eq i16 %cond, zeroinitializer + %select = select i1 %cmp, i16 %neg.a, i16 %neg.b + ret i16 %select +} + +define i16 @fneg_1_fabs_2_select_i16(i16 %cond, i16 %a, i16 %b) { +; GFX7-LABEL: fneg_1_fabs_2_select_i16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX7-NEXT: v_xor_b32_e32 v2, 0xffff8000, v1 +; GFX7-NEXT: v_and_b32_e32 v1, 0x7fff, v1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: fneg_1_fabs_2_select_i16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_xor_b32_e32 v2, 0xffff8000, v1 +; GFX9-NEXT: v_and_b32_e32 v1, 0x7fff, v1 +; GFX9-NEXT: v_cmp_eq_u16_e32 vcc, 0, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: fneg_1_fabs_2_select_i16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_xor_b16 v0.h, 0x8000, v1.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0x7fff, v1.l +; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: fneg_1_fabs_2_select_i16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_xor_b32_e32 v2, 0xffff8000, v1 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0x7fff, v1 +; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %neg.a = xor i16 %a, u0x8000 + %abs.b = and i16 %a, u0x7fff + %cmp = icmp eq i16 %cond, zeroinitializer + %select = select i1 %cmp, i16 %neg.a, i16 %abs.b + ret i16 %select +} diff --git a/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll b/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll index 1c298014..3001248 100644 --- a/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll +++ b/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll @@ -6,16 +6,24 @@ define amdgpu_gfx [13 x i32] @issue130120() { ; CHECK: ; %bb.0: ; %bb ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CHECK-NEXT: v_mov_b32_e32 v0, 0 -; CHECK-NEXT: s_add_i32 s0, s32, 0xf0 -; CHECK-NEXT: s_add_i32 s1, s32, 0xf4 -; CHECK-NEXT: s_add_i32 s2, s32, 0xf8 -; CHECK-NEXT: s_add_i32 s3, s32, 0xfc +; CHECK-NEXT: s_movk_i32 s1, 0xf4 +; CHECK-NEXT: s_movk_i32 s2, 0xf8 +; CHECK-NEXT: s_movk_i32 s3, 0xfc +; CHECK-NEXT: s_movk_i32 s34, 0x100 ; CHECK-NEXT: v_mov_b32_e32 v1, v0 -; CHECK-NEXT: s_add_i32 s34, s32, 0x100 -; CHECK-NEXT: s_add_i32 s35, s32, 0x104 -; CHECK-NEXT: s_add_i32 s36, s32, 0x108 -; CHECK-NEXT: s_add_i32 s37, s32, 0x110 -; CHECK-NEXT: s_add_i32 s38, s32, 0x120 +; CHECK-NEXT: s_movk_i32 s35, 0x104 +; CHECK-NEXT: s_movk_i32 s36, 0x108 +; CHECK-NEXT: s_movk_i32 s37, 0x110 +; CHECK-NEXT: s_movk_i32 s38, 0x120 +; CHECK-NEXT: s_add_i32 s0, s32, 0xf0 +; CHECK-NEXT: s_add_i32 s1, s32, s1 +; CHECK-NEXT: s_add_i32 s2, s32, s2 +; CHECK-NEXT: s_add_i32 s3, s32, s3 +; CHECK-NEXT: s_add_i32 s34, s32, s34 +; CHECK-NEXT: s_add_i32 s35, s32, s35 +; CHECK-NEXT: s_add_i32 s36, s32, s36 +; CHECK-NEXT: s_add_i32 s37, s32, s37 +; CHECK-NEXT: s_add_i32 s38, s32, s38 ; CHECK-NEXT: s_or_b32 s39, s32, 4 ; CHECK-NEXT: s_or_b32 s40, s32, 8 ; CHECK-NEXT: s_or_b32 s41, s32, 12 diff --git a/llvm/test/CodeGen/AMDGPU/literal64.ll b/llvm/test/CodeGen/AMDGPU/literal64.ll index 768c972..98691d3 100644 --- a/llvm/test/CodeGen/AMDGPU/literal64.ll +++ b/llvm/test/CodeGen/AMDGPU/literal64.ll @@ -67,24 +67,8 @@ define void @v_mov_b64_double(ptr addrspace(1) %ptr) { ; GCN: ; %bb.0: ; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: global_load_b64 v[4:5], v[0:1], off -; GCN-NEXT: s_mov_b32 s0, 0 -; GCN-NEXT: .LBB6_1: ; %atomicrmw.start -; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-NEXT: s_wait_loadcnt 0x0 -; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GCN-NEXT: v_add_f64_e32 v[2:3], lit64(0x4063233333333333), v[4:5] -; GCN-NEXT: global_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GCN-NEXT: s_wait_loadcnt 0x0 -; GCN-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5] -; GCN-NEXT: s_wait_xcnt 0x0 -; GCN-NEXT: v_mov_b64_e32 v[4:5], v[2:3] -; GCN-NEXT: s_or_b32 s0, vcc_lo, s0 -; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GCN-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GCN-NEXT: s_cbranch_execnz .LBB6_1 -; GCN-NEXT: ; %bb.2: ; %atomicrmw.end -; GCN-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GCN-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4063233333333333) +; GCN-NEXT: global_atomic_add_f64 v[0:1], v[2:3], off scope:SCOPE_SYS ; GCN-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw fadd ptr addrspace(1) %ptr, double 153.1 monotonic ret void diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll index 462090c..0a2e7af 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll @@ -1,12 +1,46 @@ -; RUN: llc -mtriple=amdgcn -mcpu=verde < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize64 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX10 %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX10 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 -amdgpu-enable-delay-alu=0 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX11-12,GFX11 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 -amdgpu-enable-delay-alu=0 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX11-12,GFX12 %s -; GCN-LABEL: {{^}}gs_const: -; GCN-NOT: v_cmpx -; GCN: s_mov_b64 exec, 0 define amdgpu_gs void @gs_const() { +; SI-LABEL: gs_const: +; SI: ; %bb.0: +; SI-NEXT: s_mov_b64 s[0:1], exec +; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], exec +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: gs_const: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_mov_b64 s[0:1], exec +; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], exec +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: gs_const: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: s_mov_b64 s[0:1], exec +; GFX11-12-NEXT: s_and_not1_b64 s[0:1], s[0:1], exec +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm %tmp = icmp ule i32 0, 3 %tmp1 = select i1 %tmp, float 1.000000e+00, float -1.000000e+00 %c1 = fcmp oge float %tmp1, 0.0 @@ -19,12 +53,81 @@ define amdgpu_gs void @gs_const() { ret void } -; GCN-LABEL: {{^}}vcc_implicit_def: -; GCN: v_cmp_nle_f32_e32 vcc, 0, v{{[0-9]+}} -; GCN: v_cmp_gt_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], 0, v{{[0-9]+}} -; GCN: s_and{{n2|_not1}}_b64 exec, exec, vcc -; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1.0, [[CMP]] define amdgpu_ps void @vcc_implicit_def(float %arg13, float %arg14) { +; SI-LABEL: vcc_implicit_def: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1 +; SI-NEXT: v_cmp_gt_f32_e64 s[0:1], 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_cbranch_scc0 .LBB1_2 +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1] +; SI-NEXT: exp mrt1 v0, v0, v0, v0 done vm +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB1_2: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: vcc_implicit_def: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1 +; GFX10-NEXT: v_cmp_gt_f32_e64 s[0:1], 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_cbranch_scc0 .LBB1_2 +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1] +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: exp mrt1 v0, v0, v0, v0 done vm +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB1_2: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: vcc_implicit_def: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1 +; GFX11-NEXT: v_cmp_gt_f32_e64 s[0:1], 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_cbranch_scc0 .LBB1_2 +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1] +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: exp mrt1 v0, v0, v0, v0 done +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB1_2: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: vcc_implicit_def: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_le_f32_e64 s[0:1], 0, v1 +; GFX12-NEXT: s_mov_b64 s[2:3], exec +; GFX12-NEXT: v_cmp_gt_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1] +; GFX12-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[0:1] +; GFX12-NEXT: s_cbranch_scc0 .LBB1_2 +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, vcc +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: export mrt1 v0, v0, v0, v0 done +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB1_2: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm %tmp0 = fcmp olt float %arg13, 0.000000e+00 %c1 = fcmp oge float %arg14, 0.0 call void @llvm.amdgcn.kill(i1 %c1) @@ -34,31 +137,102 @@ define amdgpu_ps void @vcc_implicit_def(float %arg13, float %arg14) { ret void } -; GCN-LABEL: {{^}}true: -; GCN-NEXT: %bb. -; GCN-NEXT: s_endpgm define amdgpu_gs void @true() { +; GCN-LABEL: true: +; GCN: ; %bb.0: +; GCN-NEXT: s_endpgm call void @llvm.amdgcn.kill(i1 true) ret void } -; GCN-LABEL: {{^}}false: -; GCN-NOT: v_cmpx -; GCN: s_mov_b64 exec, 0 define amdgpu_gs void @false() { +; SI-LABEL: false: +; SI: ; %bb.0: +; SI-NEXT: s_andn2_b64 exec, exec, exec +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: false: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_andn2_b64 exec, exec, exec +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: false: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: s_and_not1_b64 exec, exec, exec +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm call void @llvm.amdgcn.kill(i1 false) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}and: -; GCN: v_cmp_lt_i32 -; GCN: v_cmp_lt_i32 -; GCN: s_or_b64 s[0:1] -; GCN: s_and{{n2|_not1}}_b64 s[0:1], exec, s[0:1] -; GCN: s_and{{n2|_not1}}_b64 s[2:3], s[2:3], s[0:1] -; GCN: s_and_b64 exec, exec, s[2:3] define amdgpu_gs void @and(i32 %a, i32 %b, i32 %c, i32 %d) { +; SI-LABEL: and: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: s_mov_b64 s[2:3], exec +; SI-NEXT: s_andn2_b64 s[0:1], exec, s[0:1] +; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1] +; SI-NEXT: s_and_b64 exec, exec, s[2:3] +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: and: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; GFX10-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; GFX10-NEXT: s_mov_b64 s[2:3], exec +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 s[0:1], exec, s[0:1] +; GFX10-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1] +; GFX10-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: and: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; GFX11-12-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; GFX11-12-NEXT: s_mov_b64 s[2:3], exec +; GFX11-12-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1] +; GFX11-12-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[0:1] +; GFX11-12-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm %c1 = icmp slt i32 %a, %b %c2 = icmp slt i32 %c, %d %x = or i1 %c1, %c2 @@ -67,13 +241,52 @@ define amdgpu_gs void @and(i32 %a, i32 %b, i32 %c, i32 %d) { ret void } -; GCN-LABEL: {{^}}andn2: -; GCN: v_cmp_lt_i32 -; GCN: v_cmp_lt_i32 -; GCN: s_xor_b64 s[0:1] -; GCN: s_and{{n2|_not1}}_b64 s[2:3], s[2:3], s[0:1] -; GCN: s_and_b64 exec, exec, s[2:3] define amdgpu_gs void @andn2(i32 %a, i32 %b, i32 %c, i32 %d) { +; SI-LABEL: andn2: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; SI-NEXT: s_mov_b64 s[2:3], exec +; SI-NEXT: s_xor_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1] +; SI-NEXT: s_and_b64 exec, exec, s[2:3] +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: andn2: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; GFX10-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; GFX10-NEXT: s_mov_b64 s[2:3], exec +; GFX10-NEXT: s_xor_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1] +; GFX10-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: andn2: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1 +; GFX11-12-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3 +; GFX11-12-NEXT: s_mov_b64 s[2:3], exec +; GFX11-12-NEXT: s_xor_b64 s[0:1], vcc, s[0:1] +; GFX11-12-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[0:1] +; GFX11-12-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm %c1 = icmp slt i32 %a, %b %c2 = icmp slt i32 %c, %d %x = xor i1 %c1, %c2 @@ -83,135 +296,854 @@ define amdgpu_gs void @andn2(i32 %a, i32 %b, i32 %c, i32 %d) { ret void } -; GCN-LABEL: {{^}}oeq: -; GCN: v_cmp_neq_f32 +; Should use v_cmp_neq_f32 define amdgpu_gs void @oeq(float %a) { +; SI-LABEL: oeq: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: oeq: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: oeq: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: oeq: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp oeq float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ogt: -; GCN: v_cmp_nlt_f32 +; Should use v_cmp_nlt_f32 define amdgpu_gs void @ogt(float %a) { +; SI-LABEL: ogt: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ogt: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ogt: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ogt: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_lt_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ogt float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}oge: -; GCN: v_cmp_nle_f32 +; Should use v_cmp_nle_f32 define amdgpu_gs void @oge(float %a) { +; SI-LABEL: oge: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: oge: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: oge: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: oge: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_le_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp oge float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}olt: -; GCN: v_cmp_ngt_f32 +; Should use v_cmp_ngt_f32 define amdgpu_gs void @olt(float %a) { +; SI-LABEL: olt: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: olt: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: olt: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: olt: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_gt_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp olt float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ole: -; GCN: v_cmp_nge_f32 +; Should use v_cmp_nge_f32 define amdgpu_gs void @ole(float %a) { +; SI-LABEL: ole: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ole: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ole: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ole: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ole float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}one: -; GCN: v_cmp_nlg_f32 +; Should use v_cmp_nlg_f32 define amdgpu_gs void @one(float %a) { +; SI-LABEL: one: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: one: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: one: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: one: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp one float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ord: -; GCN: v_cmp_o_f32 +; Should use v_cmp_o_f32 define amdgpu_gs void @ord(float %a) { +; SI-LABEL: ord: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_o_f32_e32 vcc, v0, v0 +; SI-NEXT: s_mov_b64 s[0:1], exec +; SI-NEXT: s_andn2_b64 s[2:3], exec, vcc +; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; SI-NEXT: s_and_b64 exec, exec, s[0:1] +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ord: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_o_f32_e32 vcc, v0, v0 +; GFX10-NEXT: s_mov_b64 s[0:1], exec +; GFX10-NEXT: s_andn2_b64 s[2:3], exec, vcc +; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; GFX10-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: ord: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: v_cmp_o_f32_e32 vcc, v0, v0 +; GFX11-12-NEXT: s_mov_b64 s[0:1], exec +; GFX11-12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX11-12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX11-12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm %c1 = fcmp ord float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}uno: -; GCN: v_cmp_u_f32 +; Should use v_cmp_u_f32 define amdgpu_gs void @uno(float %a) { +; SI-LABEL: uno: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; SI-NEXT: s_mov_b64 s[0:1], exec +; SI-NEXT: s_andn2_b64 s[2:3], exec, vcc +; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; SI-NEXT: s_and_b64 exec, exec, s[0:1] +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: uno: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX10-NEXT: s_mov_b64 s[0:1], exec +; GFX10-NEXT: s_andn2_b64 s[2:3], exec, vcc +; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; GFX10-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-12-LABEL: uno: +; GFX11-12: ; %bb.0: +; GFX11-12-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX11-12-NEXT: s_mov_b64 s[0:1], exec +; GFX11-12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX11-12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX11-12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX11-12-NEXT: s_mov_b32 m0, 0 +; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-12-NEXT: s_endpgm +; GFX11-12-NEXT: ; %bb.1: +; GFX11-12-NEXT: s_mov_b64 exec, 0 +; GFX11-12-NEXT: s_endpgm %c1 = fcmp uno float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ueq: -; GCN: v_cmp_lg_f32 +; Should use v_cmp_lg_f32 define amdgpu_gs void @ueq(float %a) { +; SI-LABEL: ueq: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ueq: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ueq: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ueq: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ueq float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ugt: -; GCN: v_cmp_ge_f32 +; Should use v_cmp_ge_f32 define amdgpu_gs void @ugt(float %a) { +; SI-LABEL: ugt: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ugt: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ugt: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ugt: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ugt float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}uge: -; GCN: v_cmp_gt_f32_e32 vcc, -1.0 +; Should use v_cmp_gt_f32_e32 vcc, -1.0 define amdgpu_gs void @uge(float %a) { +; SI-LABEL: uge: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_gt_f32_e32 vcc, -1.0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: uge: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_gt_f32_e32 vcc, -1.0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: uge: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_gt_f32_e32 vcc, -1.0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: uge: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_ngt_f32_e32 vcc, -1.0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp uge float %a, -1.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ult: -; GCN: v_cmp_le_f32_e32 vcc, -2.0 +; Should use v_cmp_le_f32_e32 vcc, -2.0 define amdgpu_gs void @ult(float %a) { +; SI-LABEL: ult: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_le_f32_e32 vcc, -2.0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ult: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_le_f32_e32 vcc, -2.0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ult: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_le_f32_e32 vcc, -2.0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ult: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_nle_f32_e32 vcc, -2.0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ult float %a, -2.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}ule: -; GCN: v_cmp_lt_f32_e32 vcc, 2.0 +; Should use v_cmp_lt_f32_e32 vcc, 2.0 define amdgpu_gs void @ule(float %a) { +; SI-LABEL: ule: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: ule: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: ule: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: ule: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_nlt_f32_e32 vcc, 2.0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp ule float %a, 2.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}une: -; GCN: v_cmp_eq_f32_e32 vcc, 0 +; Should use v_cmp_eq_f32_e32 vcc, 0 define amdgpu_gs void @une(float %a) { +; SI-LABEL: une: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: une: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: une: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: une: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp une float %a, 0.0 call void @llvm.amdgcn.kill(i1 %c1) call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0) ret void } -; GCN-LABEL: {{^}}neg_olt: -; GCN: v_cmp_gt_f32_e32 vcc, 1.0 +; Should use v_cmp_gt_f32_e32 vcc, 1.0 define amdgpu_gs void @neg_olt(float %a) { +; SI-LABEL: neg_olt: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_mov_b32 m0, 0 +; SI-NEXT: s_nop 0 +; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; SI-NEXT: s_endpgm +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: neg_olt: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_mov_b32 m0, 0 +; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP) +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: neg_olt: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_mov_b32 m0, 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: neg_olt: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_ngt_f32_e32 vcc, 1.0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_mov_b32 m0, 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: s_endpgm %c1 = fcmp olt float %a, 1.0 %c2 = xor i1 %c1, 1 call void @llvm.amdgcn.kill(i1 %c2) @@ -219,13 +1151,61 @@ define amdgpu_gs void @neg_olt(float %a) { ret void } -; GCN-LABEL: {{^}}fcmp_x2: ; FIXME: LLVM should be able to combine these fcmp opcodes. -; SI: v_cmp_lt_f32_e32 vcc, s{{[0-9]+}}, v0 -; GFX10: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0 -; GCN: v_cndmask_b32 -; GCN: v_cmp_nle_f32 define amdgpu_ps void @fcmp_x2(float %a) #0 { +; SI-LABEL: fcmp_x2: +; SI: ; %bb.0: +; SI-NEXT: s_mov_b32 s0, 0x3e800000 +; SI-NEXT: v_cmp_lt_f32_e32 vcc, s0, v0 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc +; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_cbranch_scc0 .LBB21_1 +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB21_1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: fcmp_x2: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc +; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_cbranch_scc0 .LBB21_1 +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB21_1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: fcmp_x2: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc +; GFX11-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_cbranch_scc0 .LBB21_1 +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB21_1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: fcmp_x2: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0 +; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc +; GFX12-NEXT: v_cmp_le_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc +; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[2:3] +; GFX12-NEXT: s_cbranch_scc0 .LBB21_1 +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB21_1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm %ogt = fcmp nsz ogt float %a, 2.500000e-01 %k = select i1 %ogt, float -1.000000e+00, float 0.000000e+00 %c = fcmp nsz oge float %k, 0.000000e+00 @@ -234,14 +1214,78 @@ define amdgpu_ps void @fcmp_x2(float %a) #0 { } ; Note: an almost identical test for this exists in llvm.amdgcn.wqm.vote.ll -; GCN-LABEL: {{^}}wqm: -; GCN: v_cmp_neq_f32_e32 vcc, 0 -; GCN-DAG: s_wqm_b64 s[2:3], vcc -; GCN-DAG: s_mov_b64 s[0:1], exec -; GCN: s_and{{n2|_not1}}_b64 s[2:3], exec, s[2:3] -; GCN: s_and{{n2|_not1}}_b64 s[0:1], s[0:1], s[2:3] -; GCN: s_and_b64 exec, exec, s[0:1] define amdgpu_ps float @wqm(float %a) { +; SI-LABEL: wqm: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; SI-NEXT: s_wqm_b64 s[2:3], vcc +; SI-NEXT: s_mov_b64 s[0:1], exec +; SI-NEXT: s_andn2_b64 s[2:3], exec, s[2:3] +; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; SI-NEXT: s_cbranch_scc0 .LBB22_2 +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_and_b64 exec, exec, s[0:1] +; SI-NEXT: v_mov_b32_e32 v0, 0 +; SI-NEXT: s_branch .LBB22_3 +; SI-NEXT: .LBB22_2: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB22_3: +; +; GFX10-LABEL: wqm: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_mov_b64 s[0:1], exec +; GFX10-NEXT: s_wqm_b64 s[2:3], vcc +; GFX10-NEXT: s_andn2_b64 s[2:3], exec, s[2:3] +; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] +; GFX10-NEXT: s_cbranch_scc0 .LBB22_2 +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX10-NEXT: v_mov_b32_e32 v0, 0 +; GFX10-NEXT: s_branch .LBB22_3 +; GFX10-NEXT: .LBB22_2: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB22_3: +; +; GFX11-LABEL: wqm: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX11-NEXT: s_mov_b64 s[0:1], exec +; GFX11-NEXT: s_wqm_b64 s[2:3], vcc +; GFX11-NEXT: s_and_not1_b64 s[2:3], exec, s[2:3] +; GFX11-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX11-NEXT: s_cbranch_scc0 .LBB22_2 +; GFX11-NEXT: ; %bb.1: +; GFX11-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX11-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-NEXT: s_branch .LBB22_3 +; GFX11-NEXT: .LBB22_2: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB22_3: +; +; GFX12-LABEL: wqm: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_wqm_b64 s[2:3], vcc +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, s[2:3] +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT: s_cbranch_scc0 .LBB22_2 +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: v_mov_b32_e32 v0, 0 +; GFX12-NEXT: s_branch .LBB22_3 +; GFX12-NEXT: .LBB22_2: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB22_3: %c1 = fcmp une float %a, 0.0 %c2 = call i1 @llvm.amdgcn.wqm.vote(i1 %c1) call void @llvm.amdgcn.kill(i1 %c2) @@ -249,28 +1293,212 @@ define amdgpu_ps float @wqm(float %a) { } ; This checks that we use the 64-bit encoding when the operand is a SGPR. -; GCN-LABEL: {{^}}test_sgpr: -; GCN: v_cmp_nle_f32_e64 define amdgpu_ps void @test_sgpr(float inreg %a) #0 { +; SI-LABEL: test_sgpr: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_nle_f32_e64 vcc, s0, 1.0 +; SI-NEXT: s_andn2_b64 exec, exec, vcc +; SI-NEXT: s_cbranch_scc0 .LBB23_1 +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB23_1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: test_sgpr: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_nle_f32_e64 vcc, s0, 1.0 +; GFX10-NEXT: s_andn2_b64 exec, exec, vcc +; GFX10-NEXT: s_cbranch_scc0 .LBB23_1 +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB23_1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: test_sgpr: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_nle_f32_e64 vcc, s0, 1.0 +; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc +; GFX11-NEXT: s_cbranch_scc0 .LBB23_1 +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB23_1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: test_sgpr: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_cmp_le_f32 s0, 1.0 +; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1] +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, s[0:1] +; GFX12-NEXT: s_cbranch_scc0 .LBB23_1 +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB23_1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm %c = fcmp ole float %a, 1.000000e+00 call void @llvm.amdgcn.kill(i1 %c) #1 ret void } -; GCN-LABEL: {{^}}test_non_inline_imm_sgpr: -; GCN-NOT: v_cmp_le_f32_e64 define amdgpu_ps void @test_non_inline_imm_sgpr(float inreg %a) #0 { +; SI-LABEL: test_non_inline_imm_sgpr: +; SI: ; %bb.0: +; SI-NEXT: v_mov_b32_e32 v0, 0x3fc00000 +; SI-NEXT: v_cmp_le_f32_e32 vcc, s0, v0 +; SI-NEXT: s_andn2_b64 s[0:1], exec, vcc +; SI-NEXT: s_andn2_b64 s[2:3], exec, s[0:1] +; SI-NEXT: s_cbranch_scc0 .LBB24_1 +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB24_1: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: test_non_inline_imm_sgpr: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmp_ge_f32_e64 s[0:1], 0x3fc00000, s0 +; GFX10-NEXT: s_andn2_b64 s[0:1], exec, s[0:1] +; GFX10-NEXT: s_andn2_b64 s[2:3], exec, s[0:1] +; GFX10-NEXT: s_cbranch_scc0 .LBB24_1 +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB24_1: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: test_non_inline_imm_sgpr: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_cmp_ge_f32_e64 s[0:1], 0x3fc00000, s0 +; GFX11-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1] +; GFX11-NEXT: s_and_not1_b64 s[2:3], exec, s[0:1] +; GFX11-NEXT: s_cbranch_scc0 .LBB24_1 +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB24_1: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: test_non_inline_imm_sgpr: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_cmp_le_f32 s0, 0x3fc00000 +; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1] +; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, s[0:1] +; GFX12-NEXT: s_cbranch_scc0 .LBB24_1 +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB24_1: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm %c = fcmp ole float %a, 1.500000e+00 call void @llvm.amdgcn.kill(i1 %c) #1 ret void } -; GCN-LABEL: {{^}}test_scc_liveness: -; GCN: s_cmp -; GCN: s_and_b64 exec -; GCN: s_cmp -; GCN: s_cbranch_scc define amdgpu_ps void @test_scc_liveness() #0 { +; SI-LABEL: test_scc_liveness: +; SI: ; %bb.0: ; %main_body +; SI-NEXT: s_mov_b64 s[0:1], exec +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: .LBB25_1: ; %loop3 +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: s_cmp_gt_i32 s2, 0 +; SI-NEXT: s_cselect_b64 s[4:5], -1, 0 +; SI-NEXT: s_andn2_b64 s[4:5], exec, s[4:5] +; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5] +; SI-NEXT: s_cbranch_scc0 .LBB25_4 +; SI-NEXT: ; %bb.2: ; %loop3 +; SI-NEXT: ; in Loop: Header=BB25_1 Depth=1 +; SI-NEXT: s_and_b64 exec, exec, s[0:1] +; SI-NEXT: s_add_i32 s3, s2, 1 +; SI-NEXT: s_cmp_lt_i32 s2, 1 +; SI-NEXT: s_mov_b32 s2, s3 +; SI-NEXT: s_cbranch_scc1 .LBB25_1 +; SI-NEXT: ; %bb.3: ; %endloop15 +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB25_4: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: test_scc_liveness: +; GFX10: ; %bb.0: ; %main_body +; GFX10-NEXT: s_mov_b64 s[0:1], exec +; GFX10-NEXT: s_mov_b32 s2, 0 +; GFX10-NEXT: .LBB25_1: ; %loop3 +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_cmp_gt_i32 s2, 0 +; GFX10-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX10-NEXT: s_andn2_b64 s[4:5], exec, s[4:5] +; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5] +; GFX10-NEXT: s_cbranch_scc0 .LBB25_4 +; GFX10-NEXT: ; %bb.2: ; %loop3 +; GFX10-NEXT: ; in Loop: Header=BB25_1 Depth=1 +; GFX10-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_add_i32 s3, s2, 1 +; GFX10-NEXT: s_cmp_lt_i32 s2, 1 +; GFX10-NEXT: s_mov_b32 s2, s3 +; GFX10-NEXT: s_cbranch_scc1 .LBB25_1 +; GFX10-NEXT: ; %bb.3: ; %endloop15 +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB25_4: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: test_scc_liveness: +; GFX11: ; %bb.0: ; %main_body +; GFX11-NEXT: s_mov_b64 s[0:1], exec +; GFX11-NEXT: s_mov_b32 s2, 0 +; GFX11-NEXT: .LBB25_1: ; %loop3 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_cmp_gt_i32 s2, 0 +; GFX11-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX11-NEXT: s_and_not1_b64 s[4:5], exec, s[4:5] +; GFX11-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[4:5] +; GFX11-NEXT: s_cbranch_scc0 .LBB25_4 +; GFX11-NEXT: ; %bb.2: ; %loop3 +; GFX11-NEXT: ; in Loop: Header=BB25_1 Depth=1 +; GFX11-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_add_i32 s3, s2, 1 +; GFX11-NEXT: s_cmp_lt_i32 s2, 1 +; GFX11-NEXT: s_mov_b32 s2, s3 +; GFX11-NEXT: s_cbranch_scc1 .LBB25_1 +; GFX11-NEXT: ; %bb.3: ; %endloop15 +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB25_4: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: test_scc_liveness: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: s_mov_b64 s[0:1], exec +; GFX12-NEXT: s_mov_b32 s2, 0 +; GFX12-NEXT: .LBB25_1: ; %loop3 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_cmp_gt_i32 s2, 0 +; GFX12-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX12-NEXT: s_and_not1_b64 s[4:5], exec, s[4:5] +; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[4:5] +; GFX12-NEXT: s_cbranch_scc0 .LBB25_4 +; GFX12-NEXT: ; %bb.2: ; %loop3 +; GFX12-NEXT: ; in Loop: Header=BB25_1 Depth=1 +; GFX12-NEXT: s_and_b64 exec, exec, s[0:1] +; GFX12-NEXT: s_add_co_i32 s3, s2, 1 +; GFX12-NEXT: s_cmp_lt_i32 s2, 1 +; GFX12-NEXT: s_mov_b32 s2, s3 +; GFX12-NEXT: s_cbranch_scc1 .LBB25_1 +; GFX12-NEXT: ; %bb.3: ; %endloop15 +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB25_4: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm main_body: br label %loop3 @@ -287,11 +1515,139 @@ endloop15: ; preds = %loop3 ; Check this compiles. ; If kill is marked as defining VCC then this will fail with live interval issues. -; GCN-LABEL: {{^}}kill_with_loop_exit: -; GCN: s_mov_b64 [[LIVE:s\[[0-9]+:[0-9]+\]]], exec -; GCN: s_and{{n2|_not1}}_b64 [[LIVE]], [[LIVE]], exec -; GCN-NEXT: s_cbranch_scc0 define amdgpu_ps void @kill_with_loop_exit(float inreg %inp0, float inreg %inp1, <4 x i32> inreg %inp2, float inreg %inp3) { +; SI-LABEL: kill_with_loop_exit: +; SI: ; %bb.0: ; %.entry +; SI-NEXT: v_mov_b32_e32 v0, 0x43000000 +; SI-NEXT: v_cmp_lt_f32_e32 vcc, s0, v0 +; SI-NEXT: v_cmp_lt_f32_e64 s[0:1], s1, v0 +; SI-NEXT: s_and_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: s_and_b64 vcc, exec, s[0:1] +; SI-NEXT: v_mov_b32_e32 v0, 1.0 +; SI-NEXT: s_cbranch_vccnz .LBB26_5 +; SI-NEXT: ; %bb.1: ; %.preheader1.preheader +; SI-NEXT: v_cmp_ngt_f32_e64 s[0:1], s6, 0 +; SI-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1] +; SI-NEXT: s_mov_b64 s[2:3], exec +; SI-NEXT: v_mov_b32_e32 v0, 0x3fc00000 +; SI-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1 +; SI-NEXT: .LBB26_2: ; %bb +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: s_and_b64 vcc, exec, s[0:1] +; SI-NEXT: v_add_f32_e32 v0, 0x3e800000, v0 +; SI-NEXT: s_cbranch_vccnz .LBB26_2 +; SI-NEXT: ; %bb.3: ; %bb33 +; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], exec +; SI-NEXT: s_cbranch_scc0 .LBB26_6 +; SI-NEXT: ; %bb.4: ; %bb33 +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: .LBB26_5: ; %bb35 +; SI-NEXT: exp mrt0 v0, v0, v0, v0 done vm +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB26_6: +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: kill_with_loop_exit: +; GFX10: ; %bb.0: ; %.entry +; GFX10-NEXT: v_cmp_gt_f32_e64 s[4:5], 0x43000000, s0 +; GFX10-NEXT: v_cmp_gt_f32_e64 s[0:1], 0x43000000, s1 +; GFX10-NEXT: v_mov_b32_e32 v0, 1.0 +; GFX10-NEXT: s_and_b64 s[0:1], s[4:5], s[0:1] +; GFX10-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX10-NEXT: s_cbranch_vccnz .LBB26_5 +; GFX10-NEXT: ; %bb.1: ; %.preheader1.preheader +; GFX10-NEXT: v_cmp_ngt_f32_e64 s[0:1], s6, 0 +; GFX10-NEXT: v_mov_b32_e32 v0, 0x3fc00000 +; GFX10-NEXT: s_mov_b64 s[2:3], exec +; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1] +; GFX10-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1 +; GFX10-NEXT: .LBB26_2: ; %bb +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: v_add_f32_e32 v0, 0x3e800000, v0 +; GFX10-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX10-NEXT: s_cbranch_vccnz .LBB26_2 +; GFX10-NEXT: ; %bb.3: ; %bb33 +; GFX10-NEXT: s_andn2_b64 s[2:3], s[2:3], exec +; GFX10-NEXT: s_cbranch_scc0 .LBB26_6 +; GFX10-NEXT: ; %bb.4: ; %bb33 +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: .LBB26_5: ; %bb35 +; GFX10-NEXT: exp mrt0 v0, v0, v0, v0 done vm +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: .LBB26_6: +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: kill_with_loop_exit: +; GFX11: ; %bb.0: ; %.entry +; GFX11-NEXT: v_cmp_gt_f32_e64 s[4:5], 0x43000000, s0 +; GFX11-NEXT: v_cmp_gt_f32_e64 s[0:1], 0x43000000, s1 +; GFX11-NEXT: v_mov_b32_e32 v0, 1.0 +; GFX11-NEXT: s_and_b64 s[0:1], s[4:5], s[0:1] +; GFX11-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX11-NEXT: s_cbranch_vccnz .LBB26_5 +; GFX11-NEXT: ; %bb.1: ; %.preheader1.preheader +; GFX11-NEXT: v_cmp_ngt_f32_e64 s[0:1], s6, 0 +; GFX11-NEXT: v_mov_b32_e32 v0, 0x3fc00000 +; GFX11-NEXT: s_mov_b64 s[2:3], exec +; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1] +; GFX11-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1 +; GFX11-NEXT: .LBB26_2: ; %bb +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: v_add_f32_e32 v0, 0x3e800000, v0 +; GFX11-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX11-NEXT: s_cbranch_vccnz .LBB26_2 +; GFX11-NEXT: ; %bb.3: ; %bb33 +; GFX11-NEXT: s_and_not1_b64 s[2:3], s[2:3], exec +; GFX11-NEXT: s_cbranch_scc0 .LBB26_6 +; GFX11-NEXT: ; %bb.4: ; %bb33 +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: .LBB26_5: ; %bb35 +; GFX11-NEXT: exp mrt0 v0, v0, v0, v0 done +; GFX11-NEXT: s_endpgm +; GFX11-NEXT: .LBB26_6: +; GFX11-NEXT: s_mov_b64 exec, 0 +; GFX11-NEXT: exp mrt0 off, off, off, off done +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: kill_with_loop_exit: +; GFX12: ; %bb.0: ; %.entry +; GFX12-NEXT: s_cmp_lt_f32 s0, 0x43000000 +; GFX12-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX12-NEXT: s_cmp_lt_f32 s1, 0x43000000 +; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX12-NEXT: s_and_b64 s[0:1], s[4:5], s[0:1] +; GFX12-NEXT: s_mov_b32 s4, 1.0 +; GFX12-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX12-NEXT: s_cbranch_vccnz .LBB26_5 +; GFX12-NEXT: ; %bb.1: ; %.preheader1.preheader +; GFX12-NEXT: s_cmp_ngt_f32 s6, 0 +; GFX12-NEXT: s_mov_b64 s[2:3], exec +; GFX12-NEXT: s_mov_b32 s4, 0x3fc00000 +; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] +; GFX12-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 +; GFX12-NEXT: .LBB26_2: ; %bb +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_add_f32 s4, s4, 0x3e800000 +; GFX12-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX12-NEXT: s_cbranch_vccnz .LBB26_2 +; GFX12-NEXT: ; %bb.3: ; %bb33 +; GFX12-NEXT: s_and_not1_b64 s[2:3], s[2:3], exec +; GFX12-NEXT: s_cbranch_scc0 .LBB26_6 +; GFX12-NEXT: ; %bb.4: ; %bb33 +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: .LBB26_5: ; %bb35 +; GFX12-NEXT: v_mov_b32_e32 v0, s4 +; GFX12-NEXT: export mrt0 v0, v0, v0, v0 done +; GFX12-NEXT: s_endpgm +; GFX12-NEXT: .LBB26_6: +; GFX12-NEXT: s_mov_b64 exec, 0 +; GFX12-NEXT: export mrt0 off, off, off, off done +; GFX12-NEXT: s_endpgm .entry: %tmp24 = fcmp olt float %inp0, 1.280000e+02 %tmp25 = fcmp olt float %inp1, 1.280000e+02 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll index 7a20b5c..a2c1545 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll @@ -1,27 +1,52 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16 -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16 -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 define amdgpu_kernel void @raw_atomic_buffer_load_i32(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB0_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB0_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB0_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB0_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB0_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB0_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -34,23 +59,42 @@ bb2: } define amdgpu_kernel void @raw_atomic_buffer_load_i32_off(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_i32_off: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB1_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB1_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_i32_off: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB1_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB1_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_i32_off: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB1_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB1_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -62,23 +106,43 @@ bb2: ret void } define amdgpu_kernel void @raw_atomic_buffer_load_i32_soff(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_i32_soff: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB2_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB2_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_i32_soff: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB2_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB2_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_i32_soff: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_mov_b32 s5, 4 +; GFX12-NEXT: .LBB2_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], s5 offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB2_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -90,23 +154,42 @@ bb2: ret void } define amdgpu_kernel void @raw_atomic_buffer_load_i32_dlc(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_i32_dlc: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB3_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB3_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_i32_dlc: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB3_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB3_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_i32_dlc: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB3_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT_RT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB3_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -119,24 +202,44 @@ bb2: } define amdgpu_kernel void @raw_nonatomic_buffer_load_i32(<4 x i32> %addr) { -; CHECK-LABEL: raw_nonatomic_buffer_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_mov_b32 s0, 0 -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: .LBB4_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; CHECK-NEXT: s_or_b32 s0, s1, s0 -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; CHECK-NEXT: s_cbranch_execnz .LBB4_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_nonatomic_buffer_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_mov_b32 s0, 0 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: .LBB4_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_or_b32 s0, s1, s0 +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX11-NEXT: s_cbranch_execnz .LBB4_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_nonatomic_buffer_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s0, 0 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: .LBB4_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_or_b32 s0, s1, s0 +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX12-NEXT: s_cbranch_execnz .LBB4_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -149,23 +252,43 @@ bb2: } define amdgpu_kernel void @raw_atomic_buffer_load_i64(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_i64: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB5_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1] -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB5_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_i64: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB5_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1] +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB5_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_i64: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v1, 0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB5_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1] +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB5_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() %id.zext = zext i32 %id to i64 @@ -179,23 +302,42 @@ bb2: } define amdgpu_kernel void @raw_atomic_buffer_load_v2i16(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_v2i16: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB6_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB6_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_v2i16: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB6_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB6_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_v2i16: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB6_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB6_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -209,68 +351,151 @@ bb2: } define amdgpu_kernel void @raw_atomic_buffer_load_v4i16(<4 x i32> %addr) { -; CHECK-SDAG-TRUE16-LABEL: raw_atomic_buffer_load_v4i16: -; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb -; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 -; CHECK-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1 -; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 -; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-SDAG-TRUE16-NEXT: s_endpgm +; GFX11-SDAG-TRUE16-LABEL: raw_atomic_buffer_load_v4i16: +; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 +; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-SDAG-TRUE16-NEXT: s_endpgm ; -; CHECK-FAKE16-LABEL: raw_atomic_buffer_load_v4i16: -; CHECK-FAKE16: ; %bb.0: ; %bb -; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0 -; CHECK-FAKE16-NEXT: .LBB7_1: ; %bb1 -; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 -; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-FAKE16-NEXT: s_endpgm +; GFX11-FAKE16-LABEL: raw_atomic_buffer_load_v4i16: +; GFX11-FAKE16: ; %bb.0: ; %bb +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX11-FAKE16-NEXT: .LBB7_1: ; %bb1 +; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 +; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-FAKE16-NEXT: s_endpgm ; -; CHECK-GISEL-LABEL: raw_atomic_buffer_load_v4i16: -; CHECK-GISEL: ; %bb.0: ; %bb -; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-GISEL-NEXT: s_mov_b32 s4, 0 -; CHECK-GISEL-NEXT: .LBB7_1: ; %bb1 -; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0) -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v1 -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v2 -; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 -; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) -; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 -; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2 -; CHECK-GISEL-NEXT: s_endpgm +; GFX11-GISEL-TRUE16-LABEL: raw_atomic_buffer_load_v4i16: +; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l +; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-TRUE16-NEXT: s_endpgm +; +; GFX11-GISEL-LABEL: raw_atomic_buffer_load_v4i16: +; GFX11-GISEL: ; %bb.0: ; %bb +; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-NEXT: .LBB7_1: ; %bb1 +; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v1 +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v2 +; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 +; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 +; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-NEXT: s_endpgm +; +; GFX12-SDAG-TRUE16-LABEL: raw_atomic_buffer_load_v4i16: +; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1 +; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-SDAG-TRUE16-NEXT: s_endpgm +; +; GFX12-FAKE16-LABEL: raw_atomic_buffer_load_v4i16: +; GFX12-FAKE16: ; %bb.0: ; %bb +; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0 +; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX12-FAKE16-NEXT: .LBB7_1: ; %bb1 +; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1 +; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-FAKE16-NEXT: s_endpgm +; +; GFX12-GISEL-TRUE16-LABEL: raw_atomic_buffer_load_v4i16: +; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l +; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-GISEL-TRUE16-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -285,23 +510,42 @@ bb2: } define amdgpu_kernel void @raw_atomic_buffer_load_v4i32(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_v4i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB8_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_v4i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB8_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_v4i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB8_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b128 v[2:5], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -315,25 +559,46 @@ bb2: } define amdgpu_kernel void @raw_atomic_buffer_load_ptr(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_ptr: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB9_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: flat_load_b32 v1, v[1:2] -; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB9_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_ptr: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB9_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: flat_load_b32 v1, v[1:2] +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB9_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_ptr: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB9_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: flat_load_b32 v1, v[2:3] +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB9_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll index 5c0e34c..d51e912 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll @@ -1,58 +1,95 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 < %s | FileCheck -check-prefix=CHECK %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX9 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX12 %s define void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { -; CHECK-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; CHECK: ; %bb.0: -; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen offset:24 -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen offset:24 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_add_nc_u32_e32 v1, 24, v1 +; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen +; GFX12-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 24 %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void } define void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 inreg %soffset) { -; CHECK-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; CHECK: ; %bb.0: -; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: buffer_atomic_add_f32 v0, off, s[16:19], s20 -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: buffer_atomic_add_f32 v0, off, s[16:19], s20 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 +; GFX12-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 0, i32 %soffset, i32 0) ret void } define void @raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { -; CHECK-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; CHECK: ; %bb.0: -; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[16:19], s20 offen -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[16:19], s20 offen +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen +; GFX12-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.raw.buffer.atomic.fadd.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } define void @raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { -; CHECK-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; CHECK: ; %bb.0: -; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: buffer_atomic_pk_add_f16 v0, off, s[16:19], s20 offset:92 -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: buffer_atomic_pk_add_f16 v0, off, s[16:19], s20 offset:92 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 +; GFX12-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.raw.buffer.atomic.fadd.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 92, i32 %soffset, i32 0) ret void } define void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { -; CHECK-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: -; CHECK: ; %bb.0: -; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen slc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen slc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT +; GFX12-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 2) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll index 8a6594f..1a1a1f7 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll @@ -6,6 +6,7 @@ ; RUN: llc -mcpu=gfx1010 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefixes=GFX910,GFX10 ; RUN: llc -mcpu=gfx1100 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX11 ; RUN: llc -mcpu=gfx1200 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12 +; RUN: llc -mcpu=gfx1250 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12 define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) { ; GFX67-LABEL: raw_buffer_load_i8_tfe: diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll index 89511de..eeea1456 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll @@ -3,6 +3,7 @@ ; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga | FileCheck -check-prefixes=GFX68,GFX8 %s ; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 | FileCheck -check-prefixes=GFX11 %s ; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1200 | FileCheck -check-prefixes=GFX12 %s +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 | FileCheck -check-prefixes=GFX12 %s define amdgpu_ps void @buffer_store(<4 x i32> inreg, <4 x float>, <4 x float>, <4 x float>) { ; GFX68-LABEL: buffer_store: diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll index 561ec7d..6f7c001 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll @@ -1,27 +1,52 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16 -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16 -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 define amdgpu_kernel void @raw_ptr_atomic_buffer_ptr_load_i32(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_ptr_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB0_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB0_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_ptr_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB0_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB0_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_ptr_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB0_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB0_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -34,23 +59,42 @@ bb2: } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i32_off(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_load_i32_off: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB1_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB1_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_load_i32_off: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB1_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB1_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_load_i32_off: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB1_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB1_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -62,23 +106,43 @@ bb2: ret void } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i32_soff(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_load_i32_soff: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB2_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB2_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_load_i32_soff: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB2_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB2_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_load_i32_soff: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_mov_b32 s5, 4 +; GFX12-NEXT: .LBB2_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], s5 offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB2_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -90,23 +154,42 @@ bb2: ret void } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i32_dlc(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_load_i32_dlc: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB3_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB3_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_load_i32_dlc: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB3_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB3_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_load_i32_dlc: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB3_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT_RT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB3_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -119,24 +202,44 @@ bb2: } define amdgpu_kernel void @raw_nonptr_atomic_buffer_load_i32(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_nonptr_atomic_buffer_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_mov_b32 s0, 0 -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: .LBB4_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; CHECK-NEXT: s_or_b32 s0, s1, s0 -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; CHECK-NEXT: s_cbranch_execnz .LBB4_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_nonptr_atomic_buffer_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_mov_b32 s0, 0 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: .LBB4_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_or_b32 s0, s1, s0 +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX11-NEXT: s_cbranch_execnz .LBB4_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_nonptr_atomic_buffer_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s0, 0 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: .LBB4_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_or_b32 s0, s1, s0 +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX12-NEXT: s_cbranch_execnz .LBB4_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -149,23 +252,43 @@ bb2: } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i64(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_load_i64: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB5_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1] -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB5_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_load_i64: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB5_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1] +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB5_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_load_i64: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v1, 0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB5_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1] +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB5_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() %id.zext = zext i32 %id to i64 @@ -179,23 +302,42 @@ bb2: } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v2i16(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_load_v2i16: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB6_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB6_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_load_v2i16: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB6_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB6_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_load_v2i16: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB6_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB6_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -209,68 +351,151 @@ bb2: } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v4i16(ptr addrspace(8) %ptr) { -; CHECK-SDAG-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: -; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb -; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 -; CHECK-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1 -; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 -; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-SDAG-TRUE16-NEXT: s_endpgm +; GFX11-SDAG-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: +; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 +; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-SDAG-TRUE16-NEXT: s_endpgm ; -; CHECK-FAKE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: -; CHECK-FAKE16: ; %bb.0: ; %bb -; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0 -; CHECK-FAKE16-NEXT: .LBB7_1: ; %bb1 -; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 -; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-FAKE16-NEXT: s_endpgm +; GFX11-FAKE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: +; GFX11-FAKE16: ; %bb.0: ; %bb +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX11-FAKE16-NEXT: .LBB7_1: ; %bb1 +; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 +; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-FAKE16-NEXT: s_endpgm ; -; CHECK-GISEL-LABEL: raw_ptr_atomic_buffer_load_v4i16: -; CHECK-GISEL: ; %bb.0: ; %bb -; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-GISEL-NEXT: s_mov_b32 s4, 0 -; CHECK-GISEL-NEXT: .LBB7_1: ; %bb1 -; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0) -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v1 -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v2 -; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 -; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) -; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 -; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2 -; CHECK-GISEL-NEXT: s_endpgm +; GFX11-GISEL-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: +; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l +; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-TRUE16-NEXT: s_endpgm +; +; GFX11-GISEL-LABEL: raw_ptr_atomic_buffer_load_v4i16: +; GFX11-GISEL: ; %bb.0: ; %bb +; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-NEXT: .LBB7_1: ; %bb1 +; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v1 +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v2 +; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 +; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 +; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-NEXT: s_endpgm +; +; GFX12-SDAG-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: +; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1 +; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-SDAG-TRUE16-NEXT: s_endpgm +; +; GFX12-FAKE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: +; GFX12-FAKE16: ; %bb.0: ; %bb +; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0 +; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX12-FAKE16-NEXT: .LBB7_1: ; %bb1 +; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1 +; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-FAKE16-NEXT: s_endpgm +; +; GFX12-GISEL-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: +; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l +; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-GISEL-TRUE16-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -285,23 +510,42 @@ bb2: } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v4i32(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_load_v4i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB8_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_load_v4i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB8_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_load_v4i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB8_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b128 v[2:5], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -315,25 +559,46 @@ bb2: } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_ptr(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_load_ptr: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB9_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: flat_load_b32 v1, v[1:2] -; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB9_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_load_ptr: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB9_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: flat_load_b32 v1, v[1:2] +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB9_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_load_ptr: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB9_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: flat_load_b32 v1, v[2:3] +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB9_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll index 8b6ba1a..2c3b521 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll @@ -1,104 +1,174 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; FIXME: Test 90a, 940. 908 should fail to select. -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s define <2 x bfloat> @raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 { -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128 th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0x80, v1 +; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 128 %ret = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret <2 x bfloat> %ret } define <2 x bfloat> @raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 { -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 2) ret <2 x bfloat> %ret } define void @raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 { -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0x80, v1 +; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 128 %unused = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void } define void @raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 { -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %unused = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 2) ret void } ; Test waterfall loop define <2 x bfloat> @raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset) #0 { -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_mov_b32 s2, exec_lo -; GFX12-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: v_readfirstlane_b32 s4, v1 -; GFX12-NEXT: v_readfirstlane_b32 s5, v2 -; GFX12-NEXT: v_readfirstlane_b32 s6, v3 -; GFX12-NEXT: v_readfirstlane_b32 s7, v4 -; GFX12-NEXT: v_readfirstlane_b32 s3, v6 -; GFX12-NEXT: s_wait_alu 0xf1ff -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4] -; GFX12-NEXT: v_cmp_eq_u32_e64 s1, s3, v6 -; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_b32 s0, s0, s1 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_saveexec_b32 s0, s0 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, v5, s[4:7], s3 offen offset:128 th:TH_ATOMIC_RETURN -; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 -; GFX12-NEXT: ; implicit-def: $vgpr6 -; GFX12-NEXT: ; implicit-def: $vgpr5 -; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 -; GFX12-NEXT: s_cbranch_execnz .LBB4_1 -; GFX12-NEXT: ; %bb.2: -; GFX12-NEXT: s_mov_b32 exec_lo, s2 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: s_mov_b32 s2, exec_lo +; GFX1200-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1 +; GFX1200-NEXT: v_readfirstlane_b32 s4, v1 +; GFX1200-NEXT: v_readfirstlane_b32 s5, v2 +; GFX1200-NEXT: v_readfirstlane_b32 s6, v3 +; GFX1200-NEXT: v_readfirstlane_b32 s7, v4 +; GFX1200-NEXT: v_readfirstlane_b32 s3, v6 +; GFX1200-NEXT: s_wait_alu 0xf1ff +; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] +; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1200-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4] +; GFX1200-NEXT: v_cmp_eq_u32_e64 s1, s3, v6 +; GFX1200-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_b32 s0, s0, s1 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, v5, s[4:7], s3 offen offset:128 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 +; GFX1200-NEXT: ; implicit-def: $vgpr6 +; GFX1200-NEXT: ; implicit-def: $vgpr5 +; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1200-NEXT: s_cbranch_execnz .LBB4_1 +; GFX1200-NEXT: ; %bb.2: +; GFX1200-NEXT: s_mov_b32 exec_lo, s2 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v11, v4 :: v_dual_mov_b32 v10, v3 +; GFX1250-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v8, v1 +; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0x80, v5 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v8 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v9 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v10 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v11 +; GFX1250-NEXT: v_readfirstlane_b32 s3, v6 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[8:9] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[10:11] +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v6 +; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_and_b32 s0, s0, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[4:7], s3 offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9_vgpr10_vgpr11 +; GFX1250-NEXT: ; implicit-def: $vgpr6 +; GFX1250-NEXT: ; implicit-def: $vgpr1 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB4_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 128 %ret = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret <2 x bfloat> %ret diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll index 8141e0d..ea8f836 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll @@ -2,7 +2,8 @@ ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 { ; GFX908-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: @@ -26,15 +27,22 @@ define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voff ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen scope:SCOPE_SYS -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen scope:SCOPE_SYS +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen scope:SCOPE_SYS +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 24) ret void } @@ -61,15 +69,22 @@ define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 0, i32 %soffset, i32 0) ret void } @@ -96,15 +111,22 @@ define void @raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_vo ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -131,15 +153,22 @@ define void @raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffs ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 0) ret void } @@ -166,15 +195,22 @@ define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voff ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 2) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll index 767117d..2838740 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll @@ -1,7 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 { ; GFX90A-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: @@ -18,16 +19,24 @@ define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffs ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 24) ret float %ret } @@ -47,16 +56,24 @@ define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset_ ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 0, i32 %soffset, i32 0) ret float %ret } @@ -76,16 +93,24 @@ define <2 x half> @raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgp ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 0) ret <2 x half> %ret } @@ -105,16 +130,24 @@ define <2 x half> @raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_v ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 0) ret <2 x half> %ret } @@ -134,16 +167,24 @@ define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffs ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 2) ret float %ret } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll index 3540468..4dd258b 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll @@ -3,7 +3,8 @@ ; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck --check-prefix=GFX8 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck --check-prefix=GFX9 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck --check-prefix=GFX10 %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefixes=GFX11 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX11 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX12 %s define bfloat @raw_ptr_buffer_load_bf16(ptr addrspace(8) inreg %rsrc) { ; GFX7-LABEL: raw_ptr_buffer_load_bf16: @@ -41,6 +42,14 @@ define bfloat @raw_ptr_buffer_load_bf16(ptr addrspace(8) inreg %rsrc) { ; GFX11-NEXT: buffer_load_u16 v0, off, s[0:3], 0 ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_ptr_buffer_load_bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_u16 v0, off, s[0:3], null +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: s_set_pc_i64 s[30:31] %val = call bfloat @llvm.amdgcn.raw.ptr.buffer.load.v2bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0) ret bfloat %val } @@ -82,6 +91,14 @@ define <2 x bfloat> @raw_ptr_buffer_load_v2bf16(ptr addrspace(8) inreg %rsrc) { ; GFX11-NEXT: buffer_load_b32 v0, off, s[0:3], 0 ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_ptr_buffer_load_v2bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v0, off, s[0:3], null +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: s_set_pc_i64 s[30:31] %val = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.load.v2bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0) ret <2 x bfloat> %val } @@ -125,6 +142,14 @@ define <4 x bfloat> @raw_ptr_buffer_load_v4bf16(ptr addrspace(8) inreg %rsrc) { ; GFX11-NEXT: buffer_load_b64 v[0:1], off, s[0:3], 0 ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_ptr_buffer_load_v4bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b64 v[0:1], off, s[0:3], null +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: s_set_pc_i64 s[30:31] %val = call <4 x bfloat> @llvm.amdgcn.raw.ptr.buffer.load.v4bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0) ret <4 x bfloat> %val } @@ -178,6 +203,14 @@ define <8 x bfloat> @raw_ptr_buffer_load_v8bf16(ptr addrspace(8) inreg %rsrc) { ; GFX11-NEXT: buffer_load_b128 v[0:3], off, s[0:3], 0 ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_ptr_buffer_load_v8bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b128 v[0:3], off, s[0:3], null +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: s_set_pc_i64 s[30:31] %val = call <8 x bfloat> @llvm.amdgcn.raw.ptr.buffer.load.v8bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0) ret <8 x bfloat> %val } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll index e1f84dc..ec7d7d4 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll @@ -3,7 +3,8 @@ ; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck --check-prefix=GFX8 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck --check-prefix=GFX9 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck --check-prefix=GFX10 %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefixes=GFX11 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX11 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX12 %s define amdgpu_ps void @buffer_store_bf16(ptr addrspace(8) inreg %rsrc, bfloat %data, i32 %offset) { ; GFX7-LABEL: buffer_store_bf16: @@ -32,6 +33,11 @@ define amdgpu_ps void @buffer_store_bf16(ptr addrspace(8) inreg %rsrc, bfloat %d ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b16 v0, v1, s[0:3], 0 offen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b16 v0, v1, s[0:3], null offen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.raw.ptr.buffer.store.bf16(bfloat %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0) ret void } @@ -65,6 +71,11 @@ define amdgpu_ps void @buffer_store_v2bf16(ptr addrspace(8) inreg %rsrc, <2 x bf ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 offen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_v2bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null offen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.raw.ptr.buffer.store.v2bf16(<2 x bfloat> %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0) ret void } @@ -102,6 +113,11 @@ define amdgpu_ps void @buffer_store_v4bf16(ptr addrspace(8) inreg %rsrc, <4 x bf ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 offen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_v4bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null offen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.raw.ptr.buffer.store.v4bf16(<4 x bfloat> %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0) ret void } @@ -153,6 +169,11 @@ define amdgpu_ps void @buffer_store_v8bf16(ptr addrspace(8) inreg %rsrc, <8 x bf ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], 0 offen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_v8bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null offen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.raw.ptr.buffer.store.v8bf16(<8 x bfloat> %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll index f6f614e..8896364 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll @@ -1,30 +1,58 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16 -; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16 -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 define amdgpu_kernel void @struct_atomic_buffer_load_i32(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB0_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB0_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB0_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB0_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB0_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB0_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -37,23 +65,43 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_i32_const_idx(<4 x i32> %addr) { -; CHECK-LABEL: struct_atomic_buffer_load_i32_const_idx: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB1_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB1_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_i32_const_idx: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB1_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB1_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_i32_const_idx: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v1, 15 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB1_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB1_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -66,26 +114,48 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_i32_off(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_i32_off: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB2_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB2_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_i32_off: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB2_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB2_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_i32_off: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB2_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB2_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -98,26 +168,49 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_i32_soff(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_i32_soff: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB3_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB3_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_i32_soff: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB3_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB3_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_i32_soff: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_mov_b32 s5, 4 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB3_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], s5 idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB3_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -129,26 +222,48 @@ bb2: ret void } define amdgpu_kernel void @struct_atomic_buffer_load_i32_dlc(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_i32_dlc: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB4_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB4_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_i32_dlc: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB4_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB4_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_i32_dlc: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB4_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT_RT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB4_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -161,26 +276,49 @@ bb2: } define amdgpu_kernel void @struct_nonatomic_buffer_load_i32(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_nonatomic_buffer_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_mov_b32 s0, 0 -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: .LBB5_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; CHECK-NEXT: s_or_b32 s0, s1, s0 -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; CHECK-NEXT: s_cbranch_execnz .LBB5_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_nonatomic_buffer_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_mov_b32 s0, 0 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: .LBB5_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_or_b32 s0, s1, s0 +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX11-NEXT: s_cbranch_execnz .LBB5_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_nonatomic_buffer_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s0, 0 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: .LBB5_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_or_b32 s0, s1, s0 +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX12-NEXT: s_cbranch_execnz .LBB5_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -193,26 +331,49 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_i64(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_i64: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v2, s6 -; CHECK-NEXT: .LBB6_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1] -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB6_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_i64: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v2, s6 +; GFX11-NEXT: .LBB6_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1] +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB6_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_i64: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v1, 0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v2, s6 +; GFX12-NEXT: .LBB6_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[0:1] +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB6_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() %id.zext = zext i32 %id to i64 @@ -226,26 +387,48 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_v2i16(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_v2i16: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB7_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_v2i16: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB7_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_v2i16: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB7_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -259,77 +442,172 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_v4i16(<4 x i32> %addr, i32 %index) { -; CHECK-SDAG-TRUE16-LABEL: struct_atomic_buffer_load_v4i16: -; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb -; CHECK-SDAG-TRUE16-NEXT: s_clause 0x1 -; CHECK-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1 -; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 -; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-SDAG-TRUE16-NEXT: s_endpgm +; GFX11-SDAG-TRUE16-LABEL: struct_atomic_buffer_load_v4i16: +; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX11-SDAG-TRUE16-NEXT: s_clause 0x1 +; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-SDAG-TRUE16-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: struct_atomic_buffer_load_v4i16: +; GFX11-FAKE16: ; %bb.0: ; %bb +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-FAKE16-NEXT: .LBB8_1: ; %bb1 +; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-GISEL-TRUE16-LABEL: struct_atomic_buffer_load_v4i16: +; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX11-GISEL-TRUE16-NEXT: s_clause 0x1 +; GFX11-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l +; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-TRUE16-NEXT: s_endpgm +; +; GFX11-GISEL-LABEL: struct_atomic_buffer_load_v4i16: +; GFX11-GISEL: ; %bb.0: ; %bb +; GFX11-GISEL-NEXT: s_clause 0x1 +; GFX11-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-GISEL-NEXT: .LBB8_1: ; %bb1 +; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v2 +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v3 +; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 +; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 +; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-NEXT: s_endpgm ; -; CHECK-FAKE16-LABEL: struct_atomic_buffer_load_v4i16: -; CHECK-FAKE16: ; %bb.0: ; %bb -; CHECK-FAKE16-NEXT: s_clause 0x1 -; CHECK-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0 -; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-FAKE16-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-FAKE16-NEXT: .LBB8_1: ; %bb1 -; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 -; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-FAKE16-NEXT: s_endpgm +; GFX12-SDAG-TRUE16-LABEL: struct_atomic_buffer_load_v4i16: +; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX12-SDAG-TRUE16-NEXT: s_clause 0x1 +; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-SDAG-TRUE16-NEXT: s_endpgm ; -; CHECK-GISEL-LABEL: struct_atomic_buffer_load_v4i16: -; CHECK-GISEL: ; %bb.0: ; %bb -; CHECK-GISEL-NEXT: s_clause 0x1 -; CHECK-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-GISEL-NEXT: s_mov_b32 s4, 0 -; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-GISEL-NEXT: .LBB8_1: ; %bb1 -; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0) -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v2 -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v3 -; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 -; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) -; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 -; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2 -; CHECK-GISEL-NEXT: s_endpgm +; GFX12-FAKE16-LABEL: struct_atomic_buffer_load_v4i16: +; GFX12-FAKE16: ; %bb.0: ; %bb +; GFX12-FAKE16-NEXT: s_clause 0x1 +; GFX12-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0 +; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-FAKE16-NEXT: .LBB8_1: ; %bb1 +; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-FAKE16-NEXT: s_endpgm +; +; GFX12-GISEL-TRUE16-LABEL: struct_atomic_buffer_load_v4i16: +; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX12-GISEL-TRUE16-NEXT: s_clause 0x1 +; GFX12-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l +; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-GISEL-TRUE16-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -344,26 +622,48 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_v4i32(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_v4i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB9_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB9_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_v4i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB9_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB9_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_v4i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB9_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB9_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -377,28 +677,52 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_ptr(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_ptr: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB10_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: flat_load_b32 v2, v[2:3] -; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB10_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_ptr: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB10_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: flat_load_b32 v2, v[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB10_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_ptr: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB10_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: flat_load_b32 v2, v[2:3] +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB10_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll index 13b28d4..9abbc06 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll @@ -6,6 +6,7 @@ ; RUN: llc -mcpu=gfx1010 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefixes=GFX910,GFX10 ; RUN: llc -mcpu=gfx1100 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX11 ; RUN: llc -mcpu=gfx1200 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12 +; RUN: llc -mcpu=gfx1250 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12 define amdgpu_ps void @struct_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) { ; GFX67-LABEL: struct_buffer_load_i8_tfe: diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll index 9ce33c6..822016b 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll @@ -3,6 +3,8 @@ ; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga | FileCheck -check-prefixes=GFX68,GFX8 %s ; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s ; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s define amdgpu_ps void @buffer_store(<4 x i32> inreg, <4 x float>, <4 x float>, <4 x float>) { ; GFX68-LABEL: buffer_store: @@ -21,6 +23,15 @@ define amdgpu_ps void @buffer_store(<4 x i32> inreg, <4 x float>, <4 x float>, < ; GFX11-NEXT: buffer_store_b128 v[4:7], v12, s[0:3], 0 idxen glc ; GFX11-NEXT: buffer_store_b128 v[8:11], v12, s[0:3], 0 idxen slc ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: v_mov_b32_e32 v12, 0 +; GFX12-NEXT: s_clause 0x2 +; GFX12-NEXT: buffer_store_b128 v[0:3], v12, s[0:3], null idxen +; GFX12-NEXT: buffer_store_b128 v[4:7], v12, s[0:3], null idxen th:TH_STORE_NT +; GFX12-NEXT: buffer_store_b128 v[8:11], v12, s[0:3], null idxen th:TH_STORE_HT +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 0, i32 0, i32 0) call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 0, i32 1) @@ -40,6 +51,12 @@ define amdgpu_ps void @buffer_store_immoffs(<4 x i32> inreg, <4 x float>) { ; GFX11-NEXT: v_mov_b32_e32 v4, 0 ; GFX11-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], 0 idxen offset:42 ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_immoffs: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: v_mov_b32_e32 v4, 0 +; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null idxen offset:42 +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 42, i32 0, i32 0) ret void @@ -55,6 +72,11 @@ define amdgpu_ps void @buffer_store_idx(<4 x i32> inreg, <4 x float>, i32) { ; GFX11: ; %bb.0: ; %main_body ; GFX11-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_idx: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null idxen +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 0, i32 0, i32 0) ret void @@ -76,6 +98,12 @@ define amdgpu_ps void @buffer_store_ofs(<4 x i32> inreg, <4 x float>, i32) { ; GFX11-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, s4 ; GFX11-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], 0 idxen offen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_ofs: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, 0 +; GFX12-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], null idxen offen +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 %2, i32 0, i32 0) ret void @@ -91,6 +119,11 @@ define amdgpu_ps void @buffer_store_both(<4 x i32> inreg, <4 x float>, i32, i32) ; GFX11: ; %bb.0: ; %main_body ; GFX11-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], 0 idxen offen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_both: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], null idxen offen +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 %3, i32 0, i32 0) ret void @@ -108,6 +141,12 @@ define amdgpu_ps void @buffer_store_both_reversed(<4 x i32> inreg, <4 x float>, ; GFX11-NEXT: v_mov_b32_e32 v6, v4 ; GFX11-NEXT: buffer_store_b128 v[0:3], v[5:6], s[0:3], 0 idxen offen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_both_reversed: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: v_dual_mov_b32 v6, v5 :: v_dual_mov_b32 v7, v4 +; GFX12-NEXT: buffer_store_b128 v[0:3], v[6:7], s[0:3], null idxen offen +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %3, i32 %2, i32 0, i32 0) ret void @@ -139,6 +178,15 @@ define amdgpu_ps void @buffer_store_wait(<4 x i32> inreg, <4 x float>, i32, i32, ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_store_b128 v[0:3], v6, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_wait: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null idxen +; GFX12-NEXT: buffer_load_b128 v[0:3], v5, s[0:3], null idxen +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: buffer_store_b128 v[0:3], v6, s[0:3], null idxen +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 0, i32 0, i32 0) %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %0, i32 %3, i32 0, i32 0, i32 0) @@ -156,6 +204,11 @@ define amdgpu_ps void @buffer_store_x1(<4 x i32> inreg %rsrc, float %data, i32 % ; GFX11: ; %bb.0: ; %main_body ; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_x1: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null idxen +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.f32(float %data, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0) ret void @@ -171,6 +224,11 @@ define amdgpu_ps void @buffer_store_x2(<4 x i32> inreg %rsrc, <2 x float> %data, ; GFX11: ; %bb.0: ; %main_body ; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_x2: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null idxen +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v2f32(<2 x float> %data, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0) ret void @@ -193,6 +251,15 @@ define amdgpu_ps void @buffer_store_int(<4 x i32> inreg, <4 x i32>, <2 x i32>, i ; GFX11-NEXT: buffer_store_b64 v[4:5], v7, s[0:3], 0 idxen glc ; GFX11-NEXT: buffer_store_b32 v6, v7, s[0:3], 0 idxen slc ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_int: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: v_mov_b32_e32 v7, 0 +; GFX12-NEXT: s_clause 0x2 +; GFX12-NEXT: buffer_store_b128 v[0:3], v7, s[0:3], null idxen +; GFX12-NEXT: buffer_store_b64 v[4:5], v7, s[0:3], null idxen th:TH_STORE_NT +; GFX12-NEXT: buffer_store_b32 v6, v7, s[0:3], null idxen th:TH_STORE_HT +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4i32(<4 x i32> %1, <4 x i32> %0, i32 0, i32 0, i32 0, i32 0) call void @llvm.amdgcn.struct.buffer.store.v2i32(<2 x i32> %2, <4 x i32> %0, i32 0, i32 0, i32 0, i32 1) @@ -212,6 +279,12 @@ define amdgpu_ps void @struct_buffer_store_byte(<4 x i32> inreg %rsrc, float %v1 ; GFX11-NEXT: v_cvt_u32_f32_e32 v0, v0 ; GFX11-NEXT: buffer_store_b8 v0, v1, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_buffer_store_byte: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GFX12-NEXT: buffer_store_b8 v0, v1, s[0:3], null idxen +; GFX12-NEXT: s_endpgm main_body: %v2 = fptoui float %v1 to i32 %v3 = trunc i32 %v2 to i8 @@ -237,6 +310,18 @@ define amdgpu_ps void @struct_buffer_store_f16(<4 x i32> inreg %rsrc, float %v1, ; GFX11-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0 ; GFX11-FAKE16-NEXT: buffer_store_b16 v0, v1, s[0:3], 0 idxen ; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX12-TRUE16-LABEL: struct_buffer_store_f16: +; GFX12-TRUE16: ; %bb.0: +; GFX12-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0 +; GFX12-TRUE16-NEXT: buffer_store_b16 v0, v1, s[0:3], null idxen +; GFX12-TRUE16-NEXT: s_endpgm +; +; GFX12-FAKE16-LABEL: struct_buffer_store_f16: +; GFX12-FAKE16: ; %bb.0: +; GFX12-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-FAKE16-NEXT: buffer_store_b16 v0, v1, s[0:3], null idxen +; GFX12-FAKE16-NEXT: s_endpgm %v2 = fptrunc float %v1 to half call void @llvm.amdgcn.struct.buffer.store.f16(half %v2, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0) ret void @@ -261,6 +346,11 @@ define amdgpu_ps void @struct_buffer_store_v2f16(<4 x i32> inreg %rsrc, <2 x hal ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_buffer_store_v2f16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null idxen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.struct.buffer.store.v2f16(<2 x half> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0) ret void } @@ -288,6 +378,11 @@ define amdgpu_ps void @struct_buffer_store_v4f16(<4 x i32> inreg %rsrc, <4 x hal ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_buffer_store_v4f16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null idxen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.struct.buffer.store.v4f16(<4 x half> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0) ret void } @@ -304,6 +399,12 @@ define amdgpu_ps void @struct_buffer_store_i16(<4 x i32> inreg %rsrc, float %v1, ; GFX11-NEXT: v_cvt_u32_f32_e32 v0, v0 ; GFX11-NEXT: buffer_store_b16 v0, v1, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_buffer_store_i16: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GFX12-NEXT: buffer_store_b16 v0, v1, s[0:3], null idxen +; GFX12-NEXT: s_endpgm main_body: %v2 = fptoui float %v1 to i32 %v3 = trunc i32 %v2 to i16 @@ -329,6 +430,11 @@ define amdgpu_ps void @struct_buffer_store_vif16(<4 x i32> inreg %rsrc, <2 x i16 ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_buffer_store_vif16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null idxen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.struct.buffer.store.v2i16(<2 x i16> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0) ret void } @@ -354,6 +460,11 @@ define amdgpu_ps void @struct_buffer_store_v4i16(<4 x i32> inreg %rsrc, <4 x i16 ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_buffer_store_v4i16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null idxen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.struct.buffer.store.v4i16(<4 x i16> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll index 8f33dd6..23db247 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll @@ -1,30 +1,58 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16 -; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16 -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB0_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB0_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB0_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB0_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB0_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB0_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -37,23 +65,43 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_const_idx(ptr addrspace(8) %ptr) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_const_idx: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB1_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB1_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_const_idx: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB1_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB1_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_const_idx: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v1, 15 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB1_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB1_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -66,26 +114,48 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_off(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_off: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB2_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB2_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_off: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB2_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB2_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_off: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB2_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB2_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -98,26 +168,49 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_soff(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_soff: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB3_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB3_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_soff: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB3_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB3_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_soff: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_mov_b32 s5, 4 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB3_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], s5 idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB3_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -129,26 +222,48 @@ bb2: ret void } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_dlc(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_dlc: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB4_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB4_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_dlc: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB4_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB4_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_dlc: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB4_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT_RT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB4_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -161,26 +276,49 @@ bb2: } define amdgpu_kernel void @struct_ptr_nonatomic_buffer_load_i32(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_nonatomic_buffer_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_mov_b32 s0, 0 -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: .LBB5_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; CHECK-NEXT: s_or_b32 s0, s1, s0 -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; CHECK-NEXT: s_cbranch_execnz .LBB5_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_nonatomic_buffer_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_mov_b32 s0, 0 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: .LBB5_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_or_b32 s0, s1, s0 +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX11-NEXT: s_cbranch_execnz .LBB5_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_nonatomic_buffer_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s0, 0 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: .LBB5_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_or_b32 s0, s1, s0 +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX12-NEXT: s_cbranch_execnz .LBB5_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -193,26 +331,49 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i64(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_i64: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v2, s6 -; CHECK-NEXT: .LBB6_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1] -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB6_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_i64: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v2, s6 +; GFX11-NEXT: .LBB6_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1] +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB6_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_i64: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v1, 0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v2, s6 +; GFX12-NEXT: .LBB6_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[0:1] +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB6_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() %id.zext = zext i32 %id to i64 @@ -226,26 +387,48 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v2i16(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_v2i16: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB7_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_v2i16: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB7_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_v2i16: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB7_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -259,77 +442,172 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i16(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-SDAG-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: -; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb -; CHECK-SDAG-TRUE16-NEXT: s_clause 0x1 -; CHECK-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1 -; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 -; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-SDAG-TRUE16-NEXT: s_endpgm +; GFX11-SDAG-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: +; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX11-SDAG-TRUE16-NEXT: s_clause 0x1 +; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-SDAG-TRUE16-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: +; GFX11-FAKE16: ; %bb.0: ; %bb +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-FAKE16-NEXT: .LBB8_1: ; %bb1 +; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-GISEL-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: +; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX11-GISEL-TRUE16-NEXT: s_clause 0x1 +; GFX11-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l +; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-TRUE16-NEXT: s_endpgm +; +; GFX11-GISEL-LABEL: struct_ptr_atomic_buffer_load_v4i16: +; GFX11-GISEL: ; %bb.0: ; %bb +; GFX11-GISEL-NEXT: s_clause 0x1 +; GFX11-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-GISEL-NEXT: .LBB8_1: ; %bb1 +; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v2 +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v3 +; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 +; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 +; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-NEXT: s_endpgm ; -; CHECK-FAKE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: -; CHECK-FAKE16: ; %bb.0: ; %bb -; CHECK-FAKE16-NEXT: s_clause 0x1 -; CHECK-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0 -; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-FAKE16-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-FAKE16-NEXT: .LBB8_1: ; %bb1 -; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 -; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-FAKE16-NEXT: s_endpgm +; GFX12-SDAG-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: +; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX12-SDAG-TRUE16-NEXT: s_clause 0x1 +; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-SDAG-TRUE16-NEXT: s_endpgm ; -; CHECK-GISEL-LABEL: struct_ptr_atomic_buffer_load_v4i16: -; CHECK-GISEL: ; %bb.0: ; %bb -; CHECK-GISEL-NEXT: s_clause 0x1 -; CHECK-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-GISEL-NEXT: s_mov_b32 s4, 0 -; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-GISEL-NEXT: .LBB8_1: ; %bb1 -; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0) -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v2 -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v3 -; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 -; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) -; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 -; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2 -; CHECK-GISEL-NEXT: s_endpgm +; GFX12-FAKE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: +; GFX12-FAKE16: ; %bb.0: ; %bb +; GFX12-FAKE16-NEXT: s_clause 0x1 +; GFX12-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0 +; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-FAKE16-NEXT: .LBB8_1: ; %bb1 +; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-FAKE16-NEXT: s_endpgm +; +; GFX12-GISEL-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: +; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX12-GISEL-TRUE16-NEXT: s_clause 0x1 +; GFX12-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l +; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-GISEL-TRUE16-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -344,26 +622,48 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i32(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_v4i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB9_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB9_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_v4i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB9_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB9_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_v4i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB9_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB9_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -377,28 +677,52 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_ptr(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_ptr: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB10_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: flat_load_b32 v2, v[2:3] -; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB10_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_ptr: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB10_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: flat_load_b32 v2, v[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB10_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_ptr: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB10_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: flat_load_b32 v2, v[2:3] +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB10_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll index 746b879..4366472 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll @@ -3,6 +3,7 @@ ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) #0 { ; GFX908-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: @@ -39,6 +40,14 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX1200-NEXT: s_wait_kmcnt 0x0 ; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -75,6 +84,13 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voff ; GFX1200-NEXT: s_wait_kmcnt 0x0 ; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0) ret void } @@ -114,6 +130,14 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX1200-NEXT: s_wait_kmcnt 0x0 ; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) ret void } @@ -153,6 +177,14 @@ define void @struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr ; GFX1200-NEXT: s_wait_kmcnt 0x0 ; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v[1:2], s[0:3], s16 idxen offen ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[2:3], s[0:3], s16 idxen offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -291,6 +323,42 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__vgpr_rsrc__vgpr_v ; GFX1200-NEXT: ; %bb.2: ; GFX1200-NEXT: s_mov_b32 exec_lo, s2 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5] +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_and_b32 s0, s0, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[8:9], s[4:7], s3 idxen offen +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr7 +; GFX1250-NEXT: ; implicit-def: $vgpr0 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB4_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -429,6 +497,42 @@ define void @struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__vgpr_rsrc__vgpr ; GFX1200-NEXT: ; %bb.2: ; GFX1200-NEXT: s_mov_b32 exec_lo, s2 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5] +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_and_b32 s0, s0, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[8:9], s[4:7], s3 idxen offen +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr7 +; GFX1250-NEXT: ; implicit-def: $vgpr0 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB5_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll index 71c63bf..0191a85 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) #0 { ; GFX90A-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: @@ -32,6 +33,15 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_vo ; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN ; GFX1200-NEXT: s_wait_loadcnt 0x0 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret float %ret } @@ -62,6 +72,14 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffs ; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN ; GFX1200-NEXT: s_wait_loadcnt 0x0 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0) ret float %ret } @@ -95,6 +113,15 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_vo ; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN ; GFX1200-NEXT: s_wait_loadcnt 0x0 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) ret float %ret } @@ -128,6 +155,15 @@ define <2 x half> @struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__ ; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN ; GFX1200-NEXT: s_wait_loadcnt 0x0 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret <2 x half> %ret } @@ -237,6 +273,43 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__vgpr_rsrc__vgpr_vo ; GFX1200-NEXT: s_mov_b32 exec_lo, s2 ; GFX1200-NEXT: s_wait_loadcnt 0x0 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5] +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_and_b32 s0, s0, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr7 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB4_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret float %ret } @@ -346,6 +419,43 @@ define <2 x half> @struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__vgpr_rsrc__ ; GFX1200-NEXT: s_mov_b32 exec_lo, s2 ; GFX1200-NEXT: s_wait_loadcnt 0x0 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5] +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_and_b32 s0, s0, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr7 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB5_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret <2 x half> %ret } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll index e3889ab..d551d91 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll @@ -4,7 +4,8 @@ ; Not supported in gfx8 or gfx9 ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) { ; GFX6-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: @@ -35,16 +36,25 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret float %ret } @@ -78,16 +88,25 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[4:5], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret float %ret @@ -122,16 +141,24 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffs ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0) ret float %ret } @@ -165,16 +192,25 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) ret float %ret } @@ -206,15 +242,23 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX11-NEXT: buffer_atomic_max_f32 v0, v[1:2], s[0:3], s16 idxen offen ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -246,15 +290,23 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX11-NEXT: buffer_atomic_max_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[4:5], s[0:3], s16 idxen offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -288,15 +340,22 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voff ; GFX11-NEXT: buffer_atomic_max_f32 v0, v1, s[0:3], s16 idxen ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0) ret void } @@ -328,15 +387,23 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX11-NEXT: buffer_atomic_max_f32 v0, v[1:2], s[0:3], s16 idxen offen slc ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) ret void } @@ -442,36 +509,68 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_vo ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_add__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_mov_b32 s2, exec_lo -; GFX12-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: v_readfirstlane_b32 s4, v1 -; GFX12-NEXT: v_readfirstlane_b32 s5, v2 -; GFX12-NEXT: v_readfirstlane_b32 s6, v3 -; GFX12-NEXT: v_readfirstlane_b32 s7, v4 -; GFX12-NEXT: s_wait_alu 0xf1ff -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] -; GFX12-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4] -; GFX12-NEXT: s_and_b32 s1, vcc_lo, s1 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_saveexec_b32 s1, s1 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN -; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 -; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6 -; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s1 -; GFX12-NEXT: s_cbranch_execnz .LBB8_1 -; GFX12-NEXT: ; %bb.2: -; GFX12-NEXT: s_mov_b32 exec_lo, s2 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: s_mov_b32 s2, exec_lo +; GFX1200-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1 +; GFX1200-NEXT: v_readfirstlane_b32 s4, v1 +; GFX1200-NEXT: v_readfirstlane_b32 s5, v2 +; GFX1200-NEXT: v_readfirstlane_b32 s6, v3 +; GFX1200-NEXT: v_readfirstlane_b32 s7, v4 +; GFX1200-NEXT: s_wait_alu 0xf1ff +; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] +; GFX1200-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4] +; GFX1200-NEXT: s_and_b32 s1, vcc_lo, s1 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_saveexec_b32 s1, s1 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 +; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6 +; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s1 +; GFX1200-NEXT: s_cbranch_execnz .LBB8_1 +; GFX1200-NEXT: ; %bb.2: +; GFX1200-NEXT: s_mov_b32 exec_lo, s2 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4 +; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2 +; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[4:5] +; GFX1250-NEXT: s_and_b32 s1, vcc_lo, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s1, s1 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[8:9], s[4:7], s0 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s1 +; GFX1250-NEXT: s_cbranch_execnz .LBB8_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret float %ret @@ -595,41 +694,78 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__vgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_mov_b32 s2, exec_lo -; GFX12-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: v_readfirstlane_b32 s4, v1 -; GFX12-NEXT: v_readfirstlane_b32 s5, v2 -; GFX12-NEXT: v_readfirstlane_b32 s6, v3 -; GFX12-NEXT: v_readfirstlane_b32 s7, v4 -; GFX12-NEXT: v_readfirstlane_b32 s3, v7 -; GFX12-NEXT: s_wait_alu 0xf1ff -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4] -; GFX12-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 -; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_b32 s0, s0, s1 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_saveexec_b32 s0, s0 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN -; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 -; GFX12-NEXT: ; implicit-def: $vgpr7 -; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6 -; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 -; GFX12-NEXT: s_cbranch_execnz .LBB9_1 -; GFX12-NEXT: ; %bb.2: -; GFX12-NEXT: s_mov_b32 exec_lo, s2 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__vgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: s_mov_b32 s2, exec_lo +; GFX1200-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1 +; GFX1200-NEXT: v_readfirstlane_b32 s4, v1 +; GFX1200-NEXT: v_readfirstlane_b32 s5, v2 +; GFX1200-NEXT: v_readfirstlane_b32 s6, v3 +; GFX1200-NEXT: v_readfirstlane_b32 s7, v4 +; GFX1200-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1200-NEXT: s_wait_alu 0xf1ff +; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] +; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1200-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4] +; GFX1200-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1200-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_b32 s0, s0, s1 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 +; GFX1200-NEXT: ; implicit-def: $vgpr7 +; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6 +; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1200-NEXT: s_cbranch_execnz .LBB9_1 +; GFX1200-NEXT: ; %bb.2: +; GFX1200-NEXT: s_mov_b32 exec_lo, s2 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__vgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4 +; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2 +; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5] +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_and_b32 s0, s0, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr7 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB9_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret float %ret diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll index f001bf9..0096289 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll @@ -4,7 +4,8 @@ ; Not supported in gfx8 or gfx9 ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) { ; GFX6-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: @@ -35,16 +36,25 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret float %ret } @@ -78,16 +88,25 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[4:5], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret float %ret @@ -122,16 +141,24 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voff ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0) ret float %ret } @@ -165,16 +192,25 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) ret float %ret } @@ -206,15 +242,23 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_ ; GFX11-NEXT: buffer_atomic_min_f32 v0, v[1:2], s[0:3], s16 idxen offen ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -246,15 +290,23 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_ ; GFX11-NEXT: buffer_atomic_min_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[4:5], s[0:3], s16 idxen offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -288,15 +340,22 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_vof ; GFX11-NEXT: buffer_atomic_min_f32 v0, v1, s[0:3], s16 idxen ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0) ret void } @@ -328,15 +387,23 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_ ; GFX11-NEXT: buffer_atomic_min_f32 v0, v[1:2], s[0:3], s16 idxen offen slc ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) ret void } @@ -442,36 +509,68 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_v ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_mov_b32 s2, exec_lo -; GFX12-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: v_readfirstlane_b32 s4, v1 -; GFX12-NEXT: v_readfirstlane_b32 s5, v2 -; GFX12-NEXT: v_readfirstlane_b32 s6, v3 -; GFX12-NEXT: v_readfirstlane_b32 s7, v4 -; GFX12-NEXT: s_wait_alu 0xf1ff -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] -; GFX12-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4] -; GFX12-NEXT: s_and_b32 s1, vcc_lo, s1 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_saveexec_b32 s1, s1 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN -; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 -; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6 -; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s1 -; GFX12-NEXT: s_cbranch_execnz .LBB8_1 -; GFX12-NEXT: ; %bb.2: -; GFX12-NEXT: s_mov_b32 exec_lo, s2 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: s_mov_b32 s2, exec_lo +; GFX1200-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1 +; GFX1200-NEXT: v_readfirstlane_b32 s4, v1 +; GFX1200-NEXT: v_readfirstlane_b32 s5, v2 +; GFX1200-NEXT: v_readfirstlane_b32 s6, v3 +; GFX1200-NEXT: v_readfirstlane_b32 s7, v4 +; GFX1200-NEXT: s_wait_alu 0xf1ff +; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] +; GFX1200-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4] +; GFX1200-NEXT: s_and_b32 s1, vcc_lo, s1 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_saveexec_b32 s1, s1 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 +; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6 +; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s1 +; GFX1200-NEXT: s_cbranch_execnz .LBB8_1 +; GFX1200-NEXT: ; %bb.2: +; GFX1200-NEXT: s_mov_b32 exec_lo, s2 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4 +; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2 +; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[4:5] +; GFX1250-NEXT: s_and_b32 s1, vcc_lo, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s1, s1 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[8:9], s[4:7], s0 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s1 +; GFX1250-NEXT: s_cbranch_execnz .LBB8_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret float %ret @@ -595,41 +694,78 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__vgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_mov_b32 s2, exec_lo -; GFX12-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: v_readfirstlane_b32 s4, v1 -; GFX12-NEXT: v_readfirstlane_b32 s5, v2 -; GFX12-NEXT: v_readfirstlane_b32 s6, v3 -; GFX12-NEXT: v_readfirstlane_b32 s7, v4 -; GFX12-NEXT: v_readfirstlane_b32 s3, v7 -; GFX12-NEXT: s_wait_alu 0xf1ff -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4] -; GFX12-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 -; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_b32 s0, s0, s1 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_saveexec_b32 s0, s0 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN -; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 -; GFX12-NEXT: ; implicit-def: $vgpr7 -; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6 -; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 -; GFX12-NEXT: s_cbranch_execnz .LBB9_1 -; GFX12-NEXT: ; %bb.2: -; GFX12-NEXT: s_mov_b32 exec_lo, s2 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__vgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: s_mov_b32 s2, exec_lo +; GFX1200-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1 +; GFX1200-NEXT: v_readfirstlane_b32 s4, v1 +; GFX1200-NEXT: v_readfirstlane_b32 s5, v2 +; GFX1200-NEXT: v_readfirstlane_b32 s6, v3 +; GFX1200-NEXT: v_readfirstlane_b32 s7, v4 +; GFX1200-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1200-NEXT: s_wait_alu 0xf1ff +; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] +; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1200-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4] +; GFX1200-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1200-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_b32 s0, s0, s1 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 +; GFX1200-NEXT: ; implicit-def: $vgpr7 +; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6 +; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1200-NEXT: s_cbranch_execnz .LBB9_1 +; GFX1200-NEXT: ; %bb.2: +; GFX1200-NEXT: s_mov_b32 exec_lo, s2 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__vgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4 +; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2 +; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5] +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_and_b32 s0, s0, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr7 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB9_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret float %ret diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll b/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll index 91a8446..13ea8b0 100644 --- a/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll +++ b/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll @@ -18,10 +18,9 @@ define amdgpu_cs void @test_uniform_load_b96(ptr addrspace(1) %ptr, i32 %arg) "a ; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x0 ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x8 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: s_or_b32 s1, s2, s3 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: s_or_b32 s0, s0, s1 -; GFX11-NEXT: v_mov_b32_e32 v2, s0 +; GFX11-NEXT: v_mov_b32_e32 v2, s3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_or3_b32 v2, s2, v2, s0 ; GFX11-NEXT: global_store_b32 v[0:1], v2, off ; GFX11-NEXT: s_endpgm ; @@ -34,14 +33,12 @@ define amdgpu_cs void @test_uniform_load_b96(ptr addrspace(1) %ptr, i32 %arg) "a ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, v1, v3, vcc_lo ; GFX12-NEXT: v_readfirstlane_b32 s0, v2 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1) ; GFX12-NEXT: v_readfirstlane_b32 s1, v3 ; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_or_b32 s0, s0, s1 -; GFX12-NEXT: s_or_b32 s0, s2, s0 -; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: v_or3_b32 v2, v2, s1, s2 ; GFX12-NEXT: global_store_b32 v[0:1], v2, off ; GFX12-NEXT: s_endpgm bb: diff --git a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll index a3ebaec..5f0ca7b 100644 --- a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll +++ b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll @@ -74,7 +74,8 @@ define amdgpu_kernel void @local_stack_offset_uses_sp(ptr addrspace(1) %out) { ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: s_cbranch_scc1 .LBB0_1 ; FLATSCR-NEXT: ; %bb.2: ; %split -; FLATSCR-NEXT: s_movk_i32 s0, 0x5000 +; FLATSCR-NEXT: s_movk_i32 s0, 0x2000 +; FLATSCR-NEXT: s_addk_i32 s0, 0x3000 ; FLATSCR-NEXT: scratch_load_dwordx2 v[0:1], off, s0 offset:208 glc ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: s_movk_i32 s0, 0x3000 @@ -175,7 +176,9 @@ define void @func_local_stack_offset_uses_sp(ptr addrspace(1) %out) { ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: s_cbranch_scc1 .LBB1_1 ; FLATSCR-NEXT: ; %bb.2: ; %split -; FLATSCR-NEXT: s_add_i32 s0, s33, 0x5000 +; FLATSCR-NEXT: s_movk_i32 s0, 0x2000 +; FLATSCR-NEXT: s_add_i32 s1, s33, s0 +; FLATSCR-NEXT: s_add_i32 s0, s1, 0x3000 ; FLATSCR-NEXT: scratch_load_dwordx2 v[2:3], off, s0 offset:208 glc ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: s_add_i32 s0, s33, 0x3000 @@ -223,30 +226,35 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(ptr addrspace(1) %out ; MUBUF-NEXT: s_waitcnt vmcnt(0) ; MUBUF-NEXT: s_cbranch_scc1 .LBB2_1 ; MUBUF-NEXT: ; %bb.2: ; %split +; MUBUF-NEXT: s_movk_i32 s5, 0x12d4 ; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000 -; MUBUF-NEXT: v_or_b32_e32 v0, 0x12d4, v1 +; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1 +; MUBUF-NEXT: s_movk_i32 s5, 0x12d0 ; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000 ; MUBUF-NEXT: s_movk_i32 s4, 0x4000 ; MUBUF-NEXT: buffer_load_dword v5, v0, s[0:3], 0 offen glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) -; MUBUF-NEXT: v_or_b32_e32 v0, 0x12d0, v1 +; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1 +; MUBUF-NEXT: s_movk_i32 s5, 0x12c4 ; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000 ; MUBUF-NEXT: s_or_b32 s4, s4, 0x12c0 ; MUBUF-NEXT: buffer_load_dword v4, v0, s[0:3], 0 offen glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) -; MUBUF-NEXT: v_or_b32_e32 v0, 0x12c4, v1 -; MUBUF-NEXT: v_mov_b32_e32 v3, 0x4000 +; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1 ; MUBUF-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) ; MUBUF-NEXT: v_mov_b32_e32 v0, s4 -; MUBUF-NEXT: v_or_b32_e32 v2, 0x12cc, v3 +; MUBUF-NEXT: s_movk_i32 s4, 0x12cc +; MUBUF-NEXT: v_mov_b32_e32 v3, 0x4000 +; MUBUF-NEXT: v_or_b32_e32 v2, s4, v3 +; MUBUF-NEXT: s_movk_i32 s4, 0x12c8 ; MUBUF-NEXT: v_mov_b32_e32 v6, 0x4000 ; MUBUF-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) ; MUBUF-NEXT: v_mov_b32_e32 v7, 0x4000 ; MUBUF-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) -; MUBUF-NEXT: v_or_b32_e32 v2, 0x12c8, v6 +; MUBUF-NEXT: v_or_b32_e32 v2, s4, v6 ; MUBUF-NEXT: v_mov_b32_e32 v8, 0x4000 ; MUBUF-NEXT: v_mov_b32_e32 v9, 0x4000 ; MUBUF-NEXT: buffer_load_dword v2, v2, s[0:3], 0 offen glc @@ -298,7 +306,8 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(ptr addrspace(1) %out ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: s_cbranch_scc1 .LBB2_1 ; FLATSCR-NEXT: ; %bb.2: ; %split -; FLATSCR-NEXT: s_movk_i32 s0, 0x3000 +; FLATSCR-NEXT: s_movk_i32 s0, 0x1000 +; FLATSCR-NEXT: s_addk_i32 s0, 0x2000 ; FLATSCR-NEXT: scratch_load_dwordx2 v[8:9], off, s0 offset:720 glc ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 offset:704 glc diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll index 63c0463..66de953 100644 --- a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll +++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll @@ -255,6 +255,56 @@ define i32 @ptrtoint_offset(ptr addrspace(7) %ptr) { ret i32 %ret } +define i32 @ptrtoaddr(ptr addrspace(7) %ptr) { +; CHECK-LABEL: define i32 @ptrtoaddr +; CHECK-SAME: ({ ptr addrspace(8), i32 } [[PTR:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[PTR_RSRC:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 0 +; CHECK-NEXT: [[RET:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 1 +; CHECK-NEXT: ret i32 [[RET]] +; + %ret = ptrtoaddr ptr addrspace(7) %ptr to i32 + ret i32 %ret +} + +define <2 x i32> @ptrtoaddr_vec(<2 x ptr addrspace(7)> %ptr) { +; CHECK-LABEL: define <2 x i32> @ptrtoaddr_vec +; CHECK-SAME: ({ <2 x ptr addrspace(8)>, <2 x i32> } [[PTR:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[PTR_RSRC:%.*]] = extractvalue { <2 x ptr addrspace(8)>, <2 x i32> } [[PTR]], 0 +; CHECK-NEXT: [[RET:%.*]] = extractvalue { <2 x ptr addrspace(8)>, <2 x i32> } [[PTR]], 1 +; CHECK-NEXT: ret <2 x i32> [[RET]] +; + %ret = ptrtoaddr <2 x ptr addrspace(7)> %ptr to <2 x i32> + ret <2 x i32> %ret +} + +;; Check that we extend the offset to i160. +define i160 @ptrtoaddr_ext(ptr addrspace(7) %ptr) { +; CHECK-LABEL: define i160 @ptrtoaddr_ext +; CHECK-SAME: ({ ptr addrspace(8), i32 } [[PTR:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[PTR_RSRC:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 0 +; CHECK-NEXT: [[PTR_OFF:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 1 +; CHECK-NEXT: [[RET:%.*]] = zext i32 [[PTR_OFF]] to i160 +; CHECK-NEXT: ret i160 [[RET]] +; + %addr = ptrtoaddr ptr addrspace(7) %ptr to i32 + %ext = zext i32 %addr to i160 + ret i160 %ext +} + +;; Check that we truncate the offset to i16. +define i16 @ptrtoaddr_trunc(ptr addrspace(7) %ptr) { +; CHECK-LABEL: define i16 @ptrtoaddr_trunc +; CHECK-SAME: ({ ptr addrspace(8), i32 } [[PTR:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[PTR_RSRC:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 0 +; CHECK-NEXT: [[PTR_OFF:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 1 +; CHECK-NEXT: [[RET:%.*]] = trunc i32 [[PTR_OFF]] to i16 +; CHECK-NEXT: ret i16 [[RET]] +; + %addr = ptrtoaddr ptr addrspace(7) %ptr to i32 + %trunc = trunc i32 %addr to i16 + ret i16 %trunc +} + define ptr addrspace(7) @inttoptr(i160 %v) { ; CHECK-LABEL: define { ptr addrspace(8), i32 } @inttoptr ; CHECK-SAME: (i160 [[V:%.*]]) #[[ATTR0]] { diff --git a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-attr.mir b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-attr.mir index 23412aa..3b3ea3f 100644 --- a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-attr.mir +++ b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-attr.mir @@ -347,8 +347,10 @@ body: | ... # User-requested maximum number of VGPRs need to be taken into account by # the scheduler's rematerialization stage. Register usage above that number -# is considered like spill; occupancy is "inadvertently" increased when -# eliminating spill. +# is considered like spill. On unified RF (gfx90a), the requested number is +# understood "per-bank", effectively doubling its value, so no rematerialization +# is necessary. +--- name: small_num_vgprs_as_spill tracksRegLiveness: true machineFunctionInfo: @@ -371,36 +373,15 @@ body: | ; GFX908-NEXT: [[V_CVT_I32_F64_e32_10:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 10, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_11:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 11, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_12:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 12, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_13:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_14:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_15:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_16:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 16, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_17:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 17, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_18:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 18, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_19:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 19, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_20:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 20, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_21:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 21, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_22:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 22, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_23:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 23, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_24:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 24, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_25:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 25, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_26:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 26, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_27:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 33, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_13:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode ; GFX908-NEXT: {{ $}} ; GFX908-NEXT: bb.1: ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]], implicit [[V_CVT_I32_F64_e32_2]], implicit [[V_CVT_I32_F64_e32_3]], implicit [[V_CVT_I32_F64_e32_4]] ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_5]], implicit [[V_CVT_I32_F64_e32_6]], implicit [[V_CVT_I32_F64_e32_7]], implicit [[V_CVT_I32_F64_e32_8]], implicit [[V_CVT_I32_F64_e32_9]] - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_10]], implicit [[V_CVT_I32_F64_e32_11]], implicit [[V_CVT_I32_F64_e32_12]], implicit [[V_CVT_I32_F64_e32_13]], implicit [[V_CVT_I32_F64_e32_14]] - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_15]], implicit [[V_CVT_I32_F64_e32_16]], implicit [[V_CVT_I32_F64_e32_17]], implicit [[V_CVT_I32_F64_e32_18]], implicit [[V_CVT_I32_F64_e32_19]] - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_20]], implicit [[V_CVT_I32_F64_e32_21]], implicit [[V_CVT_I32_F64_e32_22]], implicit [[V_CVT_I32_F64_e32_23]], implicit [[V_CVT_I32_F64_e32_24]] - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_28:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 27, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_29:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 28, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_30:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 29, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_31:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 30, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_32:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 31, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_33:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 32, implicit $exec, implicit $mode - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_25]], implicit [[V_CVT_I32_F64_e32_26]], implicit [[V_CVT_I32_F64_e32_28]], implicit [[V_CVT_I32_F64_e32_29]], implicit [[V_CVT_I32_F64_e32_30]] - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_31]], implicit [[V_CVT_I32_F64_e32_32]], implicit [[V_CVT_I32_F64_e32_33]], implicit [[V_CVT_I32_F64_e32_27]] + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_14:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_15:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode + ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_10]], implicit [[V_CVT_I32_F64_e32_11]], implicit [[V_CVT_I32_F64_e32_12]], implicit [[V_CVT_I32_F64_e32_14]], implicit [[V_CVT_I32_F64_e32_15]] + ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_13]] ; GFX908-NEXT: S_ENDPGM 0 ; ; GFX90A-LABEL: name: small_num_vgprs_as_spill @@ -420,36 +401,15 @@ body: | ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_10:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 10, implicit $exec, implicit $mode, implicit-def $m0 ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_11:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 11, implicit $exec, implicit $mode, implicit-def $m0 ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_12:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 12, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_13:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_14:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_15:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_16:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 16, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_17:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 17, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_18:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 18, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_19:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 19, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_20:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 20, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_21:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 21, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_22:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 22, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_23:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 23, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_24:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 24, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_25:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 25, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_26:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 26, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_27:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 33, implicit $exec, implicit $mode + ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_13:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode + ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_14:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode + ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_15:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.1: ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]], implicit [[V_CVT_I32_F64_e32_2]], implicit [[V_CVT_I32_F64_e32_3]], implicit [[V_CVT_I32_F64_e32_4]] ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_5]], implicit [[V_CVT_I32_F64_e32_6]], implicit [[V_CVT_I32_F64_e32_7]], implicit [[V_CVT_I32_F64_e32_8]], implicit [[V_CVT_I32_F64_e32_9]] ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_10]], implicit [[V_CVT_I32_F64_e32_11]], implicit [[V_CVT_I32_F64_e32_12]], implicit [[V_CVT_I32_F64_e32_13]], implicit [[V_CVT_I32_F64_e32_14]] - ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_15]], implicit [[V_CVT_I32_F64_e32_16]], implicit [[V_CVT_I32_F64_e32_17]], implicit [[V_CVT_I32_F64_e32_18]], implicit [[V_CVT_I32_F64_e32_19]] - ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_20]], implicit [[V_CVT_I32_F64_e32_21]], implicit [[V_CVT_I32_F64_e32_22]], implicit [[V_CVT_I32_F64_e32_23]], implicit [[V_CVT_I32_F64_e32_24]] - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_28:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 27, implicit $exec, implicit $mode - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_29:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 28, implicit $exec, implicit $mode - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_30:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 29, implicit $exec, implicit $mode - ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_25]], implicit [[V_CVT_I32_F64_e32_26]], implicit [[V_CVT_I32_F64_e32_28]], implicit [[V_CVT_I32_F64_e32_29]], implicit [[V_CVT_I32_F64_e32_30]] - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_31:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 30, implicit $exec, implicit $mode - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_32:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 31, implicit $exec, implicit $mode - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_33:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 32, implicit $exec, implicit $mode - ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_31]], implicit [[V_CVT_I32_F64_e32_32]], implicit [[V_CVT_I32_F64_e32_33]], implicit [[V_CVT_I32_F64_e32_27]] + ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_15]] ; GFX90A-NEXT: S_ENDPGM 0 bb.0: successors: %bb.1 @@ -467,38 +427,16 @@ body: | %10:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 10, implicit $exec, implicit $mode, implicit-def $m0 %11:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 11, implicit $exec, implicit $mode, implicit-def $m0 %12:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 12, implicit $exec, implicit $mode, implicit-def $m0 - %13:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode, implicit-def $m0 - %14:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode, implicit-def $m0 - %15:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode, implicit-def $m0 - %16:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 16, implicit $exec, implicit $mode, implicit-def $m0 - %17:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 17, implicit $exec, implicit $mode, implicit-def $m0 - %18:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 18, implicit $exec, implicit $mode, implicit-def $m0 - %19:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 19, implicit $exec, implicit $mode, implicit-def $m0 - %20:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 20, implicit $exec, implicit $mode, implicit-def $m0 - %21:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 21, implicit $exec, implicit $mode, implicit-def $m0 - %22:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 22, implicit $exec, implicit $mode, implicit-def $m0 - %23:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 23, implicit $exec, implicit $mode, implicit-def $m0 - %24:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 24, implicit $exec, implicit $mode, implicit-def $m0 - %25:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 25, implicit $exec, implicit $mode, implicit-def $m0 - %26:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 26, implicit $exec, implicit $mode, implicit-def $m0 - %27:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 27, implicit $exec, implicit $mode - %28:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 28, implicit $exec, implicit $mode - %29:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 29, implicit $exec, implicit $mode - %30:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 30, implicit $exec, implicit $mode - %31:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 31, implicit $exec, implicit $mode - %32:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 32, implicit $exec, implicit $mode - %33:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 33, implicit $exec, implicit $mode + %13:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 13, implicit $exec, implicit $mode + %14:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 14, implicit $exec, implicit $mode + %15:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 15, implicit $exec, implicit $mode bb.1: S_NOP 0, implicit %0, implicit %1, implicit %2, implicit %3, implicit %4 S_NOP 0, implicit %5, implicit %6, implicit %7, implicit %8, implicit %9 S_NOP 0, implicit %10, implicit %11, implicit %12, implicit %13, implicit %14 - S_NOP 0, implicit %15, implicit %16, implicit %17, implicit %18, implicit %19 - S_NOP 0, implicit %20, implicit %21, implicit %22, implicit %23, implicit %24 - S_NOP 0, implicit %25, implicit %26, implicit %27, implicit %28, implicit %29 - S_NOP 0, implicit %30, implicit %31, implicit %32, implicit %33 - + S_NOP 0, implicit %15 S_ENDPGM 0 ... # Min/Max occupancy is 8, but user requests 7, the scheduler's rematerialization @@ -815,9 +753,9 @@ body: | S_ENDPGM 0 ... # Min/Max waves/EU is 8. For targets with non-unified RF (gfx908) we are able to -# eliminate both ArchVGPR and AGPR spilling by saving 2 VGPRs. In the unified RF -# case (gfx90a) the ArchVGPR allocation granule forces us to remat more -# ArchVGPRs to eliminate spilling. +# eliminate both ArchVGPR and AGPR spilling by saving one of each. In the +# unified RF case (gfx90a) the ArchVGPR allocation granule may force us to remat +# more ArchVGPRs to eliminate spilling. --- name: reduce_arch_and_acc_vgrp_spill tracksRegLiveness: true @@ -860,6 +798,7 @@ body: | ; GFX908-NEXT: [[DEF28:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX908-NEXT: [[DEF29:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX908-NEXT: [[DEF30:%[0-9]+]]:agpr_32 = IMPLICIT_DEF + ; GFX908-NEXT: [[DEF31:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX908-NEXT: [[V_CVT_I32_F64_e32_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 1, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_2:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 2, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_3:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 3, implicit $exec, implicit $mode, implicit-def $m0 @@ -886,12 +825,11 @@ body: | ; GFX908-NEXT: [[V_CVT_I32_F64_e32_24:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 24, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_25:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 25, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_26:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 26, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_27:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 27, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_28:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 28, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_29:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 29, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_30:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 30, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_31:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 31, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_32:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 64, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_27:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 28, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_28:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 29, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_29:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 30, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_30:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 31, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_31:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 64, implicit $exec, implicit $mode ; GFX908-NEXT: {{ $}} ; GFX908-NEXT: bb.1: ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]], implicit [[V_CVT_I32_F64_e32_2]], implicit [[V_CVT_I32_F64_e32_3]], implicit [[V_CVT_I32_F64_e32_4]] @@ -899,17 +837,17 @@ body: | ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_10]], implicit [[V_CVT_I32_F64_e32_11]], implicit [[V_CVT_I32_F64_e32_12]], implicit [[V_CVT_I32_F64_e32_13]], implicit [[V_CVT_I32_F64_e32_14]] ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_15]], implicit [[V_CVT_I32_F64_e32_16]], implicit [[V_CVT_I32_F64_e32_17]], implicit [[V_CVT_I32_F64_e32_18]], implicit [[V_CVT_I32_F64_e32_19]] ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_20]], implicit [[V_CVT_I32_F64_e32_21]], implicit [[V_CVT_I32_F64_e32_22]], implicit [[V_CVT_I32_F64_e32_23]], implicit [[V_CVT_I32_F64_e32_24]] - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_25]], implicit [[V_CVT_I32_F64_e32_26]], implicit [[V_CVT_I32_F64_e32_27]], implicit [[V_CVT_I32_F64_e32_28]], implicit [[V_CVT_I32_F64_e32_29]] - ; GFX908-NEXT: [[DEF31:%[0-9]+]]:agpr_32 = IMPLICIT_DEF + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_32:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 27, implicit $exec, implicit $mode + ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_25]], implicit [[V_CVT_I32_F64_e32_26]], implicit [[V_CVT_I32_F64_e32_32]], implicit [[V_CVT_I32_F64_e32_27]], implicit [[V_CVT_I32_F64_e32_28]] ; GFX908-NEXT: [[DEF32:%[0-9]+]]:agpr_32 = IMPLICIT_DEF - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_30]], implicit [[V_CVT_I32_F64_e32_31]], implicit [[DEF31]], implicit [[DEF32]], implicit [[DEF]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF1]], implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]], implicit [[DEF5]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF6]], implicit [[DEF7]], implicit [[DEF8]], implicit [[DEF9]], implicit [[DEF10]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF11]], implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]], implicit [[DEF15]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF16]], implicit [[DEF17]], implicit [[DEF18]], implicit [[DEF19]], implicit [[DEF20]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF21]], implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]], implicit [[DEF25]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF26]], implicit [[DEF27]], implicit [[DEF28]], implicit [[DEF29]], implicit [[V_CVT_I32_F64_e32_32]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF30]] + ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_29]], implicit [[V_CVT_I32_F64_e32_30]], implicit [[DEF32]], implicit [[DEF]], implicit [[DEF1]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]], implicit [[DEF5]], implicit [[DEF6]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF7]], implicit [[DEF8]], implicit [[DEF9]], implicit [[DEF10]], implicit [[DEF11]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]], implicit [[DEF15]], implicit [[DEF16]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF17]], implicit [[DEF18]], implicit [[DEF19]], implicit [[DEF20]], implicit [[DEF21]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]], implicit [[DEF25]], implicit [[DEF26]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF27]], implicit [[DEF28]], implicit [[DEF29]], implicit [[DEF30]], implicit [[V_CVT_I32_F64_e32_31]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF31]] ; GFX908-NEXT: S_ENDPGM 0 ; ; GFX90A-LABEL: name: reduce_arch_and_acc_vgrp_spill @@ -1358,8 +1296,7 @@ body: | ; GFX908-NEXT: [[V_CVT_I32_F64_e32_252:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 252, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_253:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 253, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[V_CVT_I32_F64_e32_254:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 254, implicit $exec, implicit $mode, implicit-def $m0 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_255:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 255, implicit $exec, implicit $mode - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_256:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 256, implicit $exec, implicit $mode + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_255:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 256, implicit $exec, implicit $mode ; GFX908-NEXT: {{ $}} ; GFX908-NEXT: bb.1: ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]], implicit [[V_CVT_I32_F64_e32_2]], implicit [[V_CVT_I32_F64_e32_3]], implicit [[V_CVT_I32_F64_e32_4]], implicit [[V_CVT_I32_F64_e32_5]], implicit [[V_CVT_I32_F64_e32_6]], implicit [[V_CVT_I32_F64_e32_7]], implicit [[V_CVT_I32_F64_e32_8]], implicit [[V_CVT_I32_F64_e32_9]] @@ -1387,7 +1324,8 @@ body: | ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_220]], implicit [[V_CVT_I32_F64_e32_221]], implicit [[V_CVT_I32_F64_e32_222]], implicit [[V_CVT_I32_F64_e32_223]], implicit [[V_CVT_I32_F64_e32_224]], implicit [[V_CVT_I32_F64_e32_225]], implicit [[V_CVT_I32_F64_e32_226]], implicit [[V_CVT_I32_F64_e32_227]], implicit [[V_CVT_I32_F64_e32_228]], implicit [[V_CVT_I32_F64_e32_229]] ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_230]], implicit [[V_CVT_I32_F64_e32_231]], implicit [[V_CVT_I32_F64_e32_232]], implicit [[V_CVT_I32_F64_e32_233]], implicit [[V_CVT_I32_F64_e32_234]], implicit [[V_CVT_I32_F64_e32_235]], implicit [[V_CVT_I32_F64_e32_236]], implicit [[V_CVT_I32_F64_e32_237]], implicit [[V_CVT_I32_F64_e32_238]], implicit [[V_CVT_I32_F64_e32_239]] ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_240]], implicit [[V_CVT_I32_F64_e32_241]], implicit [[V_CVT_I32_F64_e32_242]], implicit [[V_CVT_I32_F64_e32_243]], implicit [[V_CVT_I32_F64_e32_244]], implicit [[V_CVT_I32_F64_e32_245]], implicit [[V_CVT_I32_F64_e32_246]], implicit [[V_CVT_I32_F64_e32_247]], implicit [[V_CVT_I32_F64_e32_248]], implicit [[V_CVT_I32_F64_e32_249]] - ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_250]], implicit [[V_CVT_I32_F64_e32_251]], implicit [[V_CVT_I32_F64_e32_252]], implicit [[V_CVT_I32_F64_e32_253]], implicit [[V_CVT_I32_F64_e32_254]], implicit [[V_CVT_I32_F64_e32_255]], implicit [[V_CVT_I32_F64_e32_256]], implicit [[DEF]] + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_256:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 255, implicit $exec, implicit $mode + ; GFX908-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_250]], implicit [[V_CVT_I32_F64_e32_251]], implicit [[V_CVT_I32_F64_e32_252]], implicit [[V_CVT_I32_F64_e32_253]], implicit [[V_CVT_I32_F64_e32_254]], implicit [[V_CVT_I32_F64_e32_256]], implicit [[V_CVT_I32_F64_e32_255]], implicit [[DEF]] ; GFX908-NEXT: S_ENDPGM 0 ; ; GFX90A-LABEL: name: reduce_spill_archvgpr_above_addressable_limit @@ -1395,6 +1333,7 @@ body: | ; GFX90A-NEXT: successors: %bb.1(0x80000000) ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 0, implicit $exec, implicit $mode, implicit-def $m0 + ; GFX90A-NEXT: [[DEF:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 1, implicit $exec, implicit $mode, implicit-def $m0 ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_2:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 2, implicit $exec, implicit $mode, implicit-def $m0 ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_3:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 3, implicit $exec, implicit $mode, implicit-def $m0 @@ -1650,8 +1589,6 @@ body: | ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_253:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 253, implicit $exec, implicit $mode, implicit-def $m0 ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_254:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 254, implicit $exec, implicit $mode, implicit-def $m0 ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_255:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 256, implicit $exec, implicit $mode - ; GFX90A-NEXT: [[DEF:%[0-9]+]]:agpr_32 = IMPLICIT_DEF - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_256:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 255, implicit $exec, implicit $mode ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.1: ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]], implicit [[V_CVT_I32_F64_e32_2]], implicit [[V_CVT_I32_F64_e32_3]], implicit [[V_CVT_I32_F64_e32_4]], implicit [[V_CVT_I32_F64_e32_5]], implicit [[V_CVT_I32_F64_e32_6]], implicit [[V_CVT_I32_F64_e32_7]], implicit [[V_CVT_I32_F64_e32_8]], implicit [[V_CVT_I32_F64_e32_9]] @@ -1679,6 +1616,7 @@ body: | ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_220]], implicit [[V_CVT_I32_F64_e32_221]], implicit [[V_CVT_I32_F64_e32_222]], implicit [[V_CVT_I32_F64_e32_223]], implicit [[V_CVT_I32_F64_e32_224]], implicit [[V_CVT_I32_F64_e32_225]], implicit [[V_CVT_I32_F64_e32_226]], implicit [[V_CVT_I32_F64_e32_227]], implicit [[V_CVT_I32_F64_e32_228]], implicit [[V_CVT_I32_F64_e32_229]] ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_230]], implicit [[V_CVT_I32_F64_e32_231]], implicit [[V_CVT_I32_F64_e32_232]], implicit [[V_CVT_I32_F64_e32_233]], implicit [[V_CVT_I32_F64_e32_234]], implicit [[V_CVT_I32_F64_e32_235]], implicit [[V_CVT_I32_F64_e32_236]], implicit [[V_CVT_I32_F64_e32_237]], implicit [[V_CVT_I32_F64_e32_238]], implicit [[V_CVT_I32_F64_e32_239]] ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_240]], implicit [[V_CVT_I32_F64_e32_241]], implicit [[V_CVT_I32_F64_e32_242]], implicit [[V_CVT_I32_F64_e32_243]], implicit [[V_CVT_I32_F64_e32_244]], implicit [[V_CVT_I32_F64_e32_245]], implicit [[V_CVT_I32_F64_e32_246]], implicit [[V_CVT_I32_F64_e32_247]], implicit [[V_CVT_I32_F64_e32_248]], implicit [[V_CVT_I32_F64_e32_249]] + ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_256:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 255, implicit $exec, implicit $mode ; GFX90A-NEXT: S_NOP 0, implicit [[V_CVT_I32_F64_e32_250]], implicit [[V_CVT_I32_F64_e32_251]], implicit [[V_CVT_I32_F64_e32_252]], implicit [[V_CVT_I32_F64_e32_253]], implicit [[V_CVT_I32_F64_e32_254]], implicit [[V_CVT_I32_F64_e32_256]], implicit [[V_CVT_I32_F64_e32_255]], implicit [[DEF]] ; GFX90A-NEXT: S_ENDPGM 0 bb.0: @@ -2246,35 +2184,35 @@ body: | ; GFX908-NEXT: [[DEF253:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX908-NEXT: [[DEF254:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX908-NEXT: [[DEF255:%[0-9]+]]:agpr_32 = IMPLICIT_DEF - ; GFX908-NEXT: [[DEF256:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX908-NEXT: {{ $}} ; GFX908-NEXT: bb.1: - ; GFX908-NEXT: S_NOP 0, implicit [[DEF128]], implicit [[DEF129]], implicit [[DEF130]], implicit [[DEF131]], implicit [[DEF132]], implicit [[DEF133]], implicit [[DEF134]], implicit [[DEF135]], implicit [[DEF136]], implicit [[DEF137]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF138]], implicit [[DEF139]], implicit [[DEF140]], implicit [[DEF141]], implicit [[DEF142]], implicit [[DEF143]], implicit [[DEF144]], implicit [[DEF145]], implicit [[DEF146]], implicit [[DEF147]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF148]], implicit [[DEF149]], implicit [[DEF150]], implicit [[DEF151]], implicit [[DEF152]], implicit [[DEF153]], implicit [[DEF154]], implicit [[DEF155]], implicit [[DEF156]], implicit [[DEF157]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF158]], implicit [[DEF159]], implicit [[DEF160]], implicit [[DEF161]], implicit [[DEF162]], implicit [[DEF163]], implicit [[DEF164]], implicit [[DEF165]], implicit [[DEF166]], implicit [[DEF167]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF168]], implicit [[DEF169]], implicit [[DEF170]], implicit [[DEF171]], implicit [[DEF172]], implicit [[DEF173]], implicit [[DEF174]], implicit [[DEF175]], implicit [[DEF176]], implicit [[DEF177]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF178]], implicit [[DEF179]], implicit [[DEF180]], implicit [[DEF181]], implicit [[DEF182]], implicit [[DEF183]], implicit [[DEF184]], implicit [[DEF185]], implicit [[DEF186]], implicit [[DEF187]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF188]], implicit [[DEF189]], implicit [[DEF190]], implicit [[DEF191]], implicit [[DEF192]], implicit [[DEF193]], implicit [[DEF194]], implicit [[DEF195]], implicit [[DEF196]], implicit [[DEF197]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF198]], implicit [[DEF199]], implicit [[DEF200]], implicit [[DEF201]], implicit [[DEF202]], implicit [[DEF203]], implicit [[DEF204]], implicit [[DEF205]], implicit [[DEF206]], implicit [[DEF207]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF208]], implicit [[DEF209]], implicit [[DEF210]], implicit [[DEF211]], implicit [[DEF212]], implicit [[DEF213]], implicit [[DEF214]], implicit [[DEF215]], implicit [[DEF216]], implicit [[DEF217]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF218]], implicit [[DEF219]], implicit [[DEF220]], implicit [[DEF221]], implicit [[DEF222]], implicit [[DEF223]], implicit [[DEF224]], implicit [[DEF225]], implicit [[DEF226]], implicit [[DEF227]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF228]], implicit [[DEF229]], implicit [[DEF230]], implicit [[DEF231]], implicit [[DEF232]], implicit [[DEF233]], implicit [[DEF234]], implicit [[DEF235]], implicit [[DEF236]], implicit [[DEF237]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF238]], implicit [[DEF239]], implicit [[DEF240]], implicit [[DEF241]], implicit [[DEF242]], implicit [[DEF243]], implicit [[DEF244]], implicit [[DEF245]], implicit [[DEF246]], implicit [[DEF247]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF248]], implicit [[DEF249]], implicit [[DEF250]], implicit [[DEF251]], implicit [[DEF252]], implicit [[DEF253]], implicit [[DEF254]], implicit [[DEF255]], implicit [[DEF256]], implicit [[DEF]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF1]], implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]], implicit [[DEF5]], implicit [[DEF6]], implicit [[DEF7]], implicit [[DEF8]], implicit [[DEF9]], implicit [[DEF10]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF11]], implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]], implicit [[DEF15]], implicit [[DEF16]], implicit [[DEF17]], implicit [[DEF18]], implicit [[DEF19]], implicit [[DEF20]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF21]], implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]], implicit [[DEF25]], implicit [[DEF26]], implicit [[DEF27]], implicit [[DEF28]], implicit [[DEF29]], implicit [[DEF30]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF31]], implicit [[DEF32]], implicit [[DEF33]], implicit [[DEF34]], implicit [[DEF35]], implicit [[DEF36]], implicit [[DEF37]], implicit [[DEF38]], implicit [[DEF39]], implicit [[DEF40]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF41]], implicit [[DEF42]], implicit [[DEF43]], implicit [[DEF44]], implicit [[DEF45]], implicit [[DEF46]], implicit [[DEF47]], implicit [[DEF48]], implicit [[DEF49]], implicit [[DEF50]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF51]], implicit [[DEF52]], implicit [[DEF53]], implicit [[DEF54]], implicit [[DEF55]], implicit [[DEF56]], implicit [[DEF57]], implicit [[DEF58]], implicit [[DEF59]], implicit [[DEF60]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF61]], implicit [[DEF62]], implicit [[DEF63]], implicit [[DEF64]], implicit [[DEF65]], implicit [[DEF66]], implicit [[DEF67]], implicit [[DEF68]], implicit [[DEF69]], implicit [[DEF70]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF71]], implicit [[DEF72]], implicit [[DEF73]], implicit [[DEF74]], implicit [[DEF75]], implicit [[DEF76]], implicit [[DEF77]], implicit [[DEF78]], implicit [[DEF79]], implicit [[DEF80]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF81]], implicit [[DEF82]], implicit [[DEF83]], implicit [[DEF84]], implicit [[DEF85]], implicit [[DEF86]], implicit [[DEF87]], implicit [[DEF88]], implicit [[DEF89]], implicit [[DEF90]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF91]], implicit [[DEF92]], implicit [[DEF93]], implicit [[DEF94]], implicit [[DEF95]], implicit [[DEF96]], implicit [[DEF97]], implicit [[DEF98]], implicit [[DEF99]], implicit [[DEF100]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF101]], implicit [[DEF102]], implicit [[DEF103]], implicit [[DEF104]], implicit [[DEF105]], implicit [[DEF106]], implicit [[DEF107]], implicit [[DEF108]], implicit [[DEF109]], implicit [[DEF110]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF111]], implicit [[DEF112]], implicit [[DEF113]], implicit [[DEF114]], implicit [[DEF115]], implicit [[DEF116]], implicit [[DEF117]], implicit [[DEF118]], implicit [[DEF119]], implicit [[DEF120]] - ; GFX908-NEXT: S_NOP 0, implicit [[DEF121]], implicit [[DEF122]], implicit [[DEF123]], implicit [[DEF124]], implicit [[DEF125]], implicit [[DEF126]], implicit [[DEF127]], implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]] + ; GFX908-NEXT: [[DEF256:%[0-9]+]]:agpr_32 = IMPLICIT_DEF + ; GFX908-NEXT: S_NOP 0, implicit [[DEF256]], implicit [[DEF]], implicit [[DEF1]], implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]], implicit [[DEF5]], implicit [[DEF6]], implicit [[DEF7]], implicit [[DEF8]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF9]], implicit [[DEF10]], implicit [[DEF11]], implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]], implicit [[DEF15]], implicit [[DEF16]], implicit [[DEF17]], implicit [[DEF18]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF19]], implicit [[DEF20]], implicit [[DEF21]], implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]], implicit [[DEF25]], implicit [[DEF26]], implicit [[DEF27]], implicit [[DEF28]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF29]], implicit [[DEF30]], implicit [[DEF31]], implicit [[DEF32]], implicit [[DEF33]], implicit [[DEF34]], implicit [[DEF35]], implicit [[DEF36]], implicit [[DEF37]], implicit [[DEF38]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF39]], implicit [[DEF40]], implicit [[DEF41]], implicit [[DEF42]], implicit [[DEF43]], implicit [[DEF44]], implicit [[DEF45]], implicit [[DEF46]], implicit [[DEF47]], implicit [[DEF48]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF49]], implicit [[DEF50]], implicit [[DEF51]], implicit [[DEF52]], implicit [[DEF53]], implicit [[DEF54]], implicit [[DEF55]], implicit [[DEF56]], implicit [[DEF57]], implicit [[DEF58]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF59]], implicit [[DEF60]], implicit [[DEF61]], implicit [[DEF62]], implicit [[DEF63]], implicit [[DEF64]], implicit [[DEF65]], implicit [[DEF66]], implicit [[DEF67]], implicit [[DEF68]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF69]], implicit [[DEF70]], implicit [[DEF71]], implicit [[DEF72]], implicit [[DEF73]], implicit [[DEF74]], implicit [[DEF75]], implicit [[DEF76]], implicit [[DEF77]], implicit [[DEF78]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF79]], implicit [[DEF80]], implicit [[DEF81]], implicit [[DEF82]], implicit [[DEF83]], implicit [[DEF84]], implicit [[DEF85]], implicit [[DEF86]], implicit [[DEF87]], implicit [[DEF88]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF89]], implicit [[DEF90]], implicit [[DEF91]], implicit [[DEF92]], implicit [[DEF93]], implicit [[DEF94]], implicit [[DEF95]], implicit [[DEF96]], implicit [[DEF97]], implicit [[DEF98]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF99]], implicit [[DEF100]], implicit [[DEF101]], implicit [[DEF102]], implicit [[DEF103]], implicit [[DEF104]], implicit [[DEF105]], implicit [[DEF106]], implicit [[DEF107]], implicit [[DEF108]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF109]], implicit [[DEF110]], implicit [[DEF111]], implicit [[DEF112]], implicit [[DEF113]], implicit [[DEF114]], implicit [[DEF115]], implicit [[DEF116]], implicit [[DEF117]], implicit [[DEF118]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF119]], implicit [[DEF120]], implicit [[DEF121]], implicit [[DEF122]], implicit [[DEF123]], implicit [[DEF124]], implicit [[DEF125]], implicit [[DEF126]], implicit [[DEF127]], implicit [[DEF128]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF129]], implicit [[DEF130]], implicit [[DEF131]], implicit [[DEF132]], implicit [[DEF133]], implicit [[DEF134]], implicit [[DEF135]], implicit [[DEF136]], implicit [[DEF137]], implicit [[DEF138]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF139]], implicit [[DEF140]], implicit [[DEF141]], implicit [[DEF142]], implicit [[DEF143]], implicit [[DEF144]], implicit [[DEF145]], implicit [[DEF146]], implicit [[DEF147]], implicit [[DEF148]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF149]], implicit [[DEF150]], implicit [[DEF151]], implicit [[DEF152]], implicit [[DEF153]], implicit [[DEF154]], implicit [[DEF155]], implicit [[DEF156]], implicit [[DEF157]], implicit [[DEF158]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF159]], implicit [[DEF160]], implicit [[DEF161]], implicit [[DEF162]], implicit [[DEF163]], implicit [[DEF164]], implicit [[DEF165]], implicit [[DEF166]], implicit [[DEF167]], implicit [[DEF168]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF169]], implicit [[DEF170]], implicit [[DEF171]], implicit [[DEF172]], implicit [[DEF173]], implicit [[DEF174]], implicit [[DEF175]], implicit [[DEF176]], implicit [[DEF177]], implicit [[DEF178]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF179]], implicit [[DEF180]], implicit [[DEF181]], implicit [[DEF182]], implicit [[DEF183]], implicit [[DEF184]], implicit [[DEF185]], implicit [[DEF186]], implicit [[DEF187]], implicit [[DEF188]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF189]], implicit [[DEF190]], implicit [[DEF191]], implicit [[DEF192]], implicit [[DEF193]], implicit [[DEF194]], implicit [[DEF195]], implicit [[DEF196]], implicit [[DEF197]], implicit [[DEF198]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF199]], implicit [[DEF200]], implicit [[DEF201]], implicit [[DEF202]], implicit [[DEF203]], implicit [[DEF204]], implicit [[DEF205]], implicit [[DEF206]], implicit [[DEF207]], implicit [[DEF208]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF209]], implicit [[DEF210]], implicit [[DEF211]], implicit [[DEF212]], implicit [[DEF213]], implicit [[DEF214]], implicit [[DEF215]], implicit [[DEF216]], implicit [[DEF217]], implicit [[DEF218]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF219]], implicit [[DEF220]], implicit [[DEF221]], implicit [[DEF222]], implicit [[DEF223]], implicit [[DEF224]], implicit [[DEF225]], implicit [[DEF226]], implicit [[DEF227]], implicit [[DEF228]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF229]], implicit [[DEF230]], implicit [[DEF231]], implicit [[DEF232]], implicit [[DEF233]], implicit [[DEF234]], implicit [[DEF235]], implicit [[DEF236]], implicit [[DEF237]], implicit [[DEF238]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF239]], implicit [[DEF240]], implicit [[DEF241]], implicit [[DEF242]], implicit [[DEF243]], implicit [[DEF244]], implicit [[DEF245]], implicit [[DEF246]], implicit [[DEF247]], implicit [[DEF248]] + ; GFX908-NEXT: S_NOP 0, implicit [[DEF249]], implicit [[DEF250]], implicit [[DEF251]], implicit [[DEF252]], implicit [[DEF253]], implicit [[DEF254]], implicit [[DEF255]], implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]] ; GFX908-NEXT: S_ENDPGM 0 ; ; GFX90A-LABEL: name: reduce_spill_agpr_above_addressable_limit @@ -2533,41 +2471,41 @@ body: | ; GFX90A-NEXT: [[DEF249:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX90A-NEXT: [[DEF250:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX90A-NEXT: [[DEF251:%[0-9]+]]:agpr_32 = IMPLICIT_DEF - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 257, implicit $exec, implicit $mode - ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 258, implicit $exec, implicit $mode ; GFX90A-NEXT: [[DEF252:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX90A-NEXT: [[DEF253:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX90A-NEXT: [[DEF254:%[0-9]+]]:agpr_32 = IMPLICIT_DEF ; GFX90A-NEXT: [[DEF255:%[0-9]+]]:agpr_32 = IMPLICIT_DEF - ; GFX90A-NEXT: [[DEF256:%[0-9]+]]:agpr_32 = IMPLICIT_DEF + ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 257, implicit $exec, implicit $mode + ; GFX90A-NEXT: [[V_CVT_I32_F64_e32_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 258, implicit $exec, implicit $mode ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.1: - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF252]], implicit [[DEF253]], implicit [[DEF254]], implicit [[DEF255]], implicit [[DEF256]], implicit [[DEF]], implicit [[DEF1]], implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF5]], implicit [[DEF6]], implicit [[DEF7]], implicit [[DEF8]], implicit [[DEF9]], implicit [[DEF10]], implicit [[DEF11]], implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF15]], implicit [[DEF16]], implicit [[DEF17]], implicit [[DEF18]], implicit [[DEF19]], implicit [[DEF20]], implicit [[DEF21]], implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF25]], implicit [[DEF26]], implicit [[DEF27]], implicit [[DEF28]], implicit [[DEF29]], implicit [[DEF30]], implicit [[DEF31]], implicit [[DEF32]], implicit [[DEF33]], implicit [[DEF34]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF35]], implicit [[DEF36]], implicit [[DEF37]], implicit [[DEF38]], implicit [[DEF39]], implicit [[DEF40]], implicit [[DEF41]], implicit [[DEF42]], implicit [[DEF43]], implicit [[DEF44]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF45]], implicit [[DEF46]], implicit [[DEF47]], implicit [[DEF48]], implicit [[DEF49]], implicit [[DEF50]], implicit [[DEF51]], implicit [[DEF52]], implicit [[DEF53]], implicit [[DEF54]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF55]], implicit [[DEF56]], implicit [[DEF57]], implicit [[DEF58]], implicit [[DEF59]], implicit [[DEF60]], implicit [[DEF61]], implicit [[DEF62]], implicit [[DEF63]], implicit [[DEF64]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF65]], implicit [[DEF66]], implicit [[DEF67]], implicit [[DEF68]], implicit [[DEF69]], implicit [[DEF70]], implicit [[DEF71]], implicit [[DEF72]], implicit [[DEF73]], implicit [[DEF74]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF75]], implicit [[DEF76]], implicit [[DEF77]], implicit [[DEF78]], implicit [[DEF79]], implicit [[DEF80]], implicit [[DEF81]], implicit [[DEF82]], implicit [[DEF83]], implicit [[DEF84]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF85]], implicit [[DEF86]], implicit [[DEF87]], implicit [[DEF88]], implicit [[DEF89]], implicit [[DEF90]], implicit [[DEF91]], implicit [[DEF92]], implicit [[DEF93]], implicit [[DEF94]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF95]], implicit [[DEF96]], implicit [[DEF97]], implicit [[DEF98]], implicit [[DEF99]], implicit [[DEF100]], implicit [[DEF101]], implicit [[DEF102]], implicit [[DEF103]], implicit [[DEF104]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF105]], implicit [[DEF106]], implicit [[DEF107]], implicit [[DEF108]], implicit [[DEF109]], implicit [[DEF110]], implicit [[DEF111]], implicit [[DEF112]], implicit [[DEF113]], implicit [[DEF114]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF115]], implicit [[DEF116]], implicit [[DEF117]], implicit [[DEF118]], implicit [[DEF119]], implicit [[DEF120]], implicit [[DEF121]], implicit [[DEF122]], implicit [[DEF123]], implicit [[DEF124]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF125]], implicit [[DEF126]], implicit [[DEF127]], implicit [[DEF128]], implicit [[DEF129]], implicit [[DEF130]], implicit [[DEF131]], implicit [[DEF132]], implicit [[DEF133]], implicit [[DEF134]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF135]], implicit [[DEF136]], implicit [[DEF137]], implicit [[DEF138]], implicit [[DEF139]], implicit [[DEF140]], implicit [[DEF141]], implicit [[DEF142]], implicit [[DEF143]], implicit [[DEF144]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF145]], implicit [[DEF146]], implicit [[DEF147]], implicit [[DEF148]], implicit [[DEF149]], implicit [[DEF150]], implicit [[DEF151]], implicit [[DEF152]], implicit [[DEF153]], implicit [[DEF154]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF155]], implicit [[DEF156]], implicit [[DEF157]], implicit [[DEF158]], implicit [[DEF159]], implicit [[DEF160]], implicit [[DEF161]], implicit [[DEF162]], implicit [[DEF163]], implicit [[DEF164]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF165]], implicit [[DEF166]], implicit [[DEF167]], implicit [[DEF168]], implicit [[DEF169]], implicit [[DEF170]], implicit [[DEF171]], implicit [[DEF172]], implicit [[DEF173]], implicit [[DEF174]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF175]], implicit [[DEF176]], implicit [[DEF177]], implicit [[DEF178]], implicit [[DEF179]], implicit [[DEF180]], implicit [[DEF181]], implicit [[DEF182]], implicit [[DEF183]], implicit [[DEF184]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF185]], implicit [[DEF186]], implicit [[DEF187]], implicit [[DEF188]], implicit [[DEF189]], implicit [[DEF190]], implicit [[DEF191]], implicit [[DEF192]], implicit [[DEF193]], implicit [[DEF194]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF195]], implicit [[DEF196]], implicit [[DEF197]], implicit [[DEF198]], implicit [[DEF199]], implicit [[DEF200]], implicit [[DEF201]], implicit [[DEF202]], implicit [[DEF203]], implicit [[DEF204]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF205]], implicit [[DEF206]], implicit [[DEF207]], implicit [[DEF208]], implicit [[DEF209]], implicit [[DEF210]], implicit [[DEF211]], implicit [[DEF212]], implicit [[DEF213]], implicit [[DEF214]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF215]], implicit [[DEF216]], implicit [[DEF217]], implicit [[DEF218]], implicit [[DEF219]], implicit [[DEF220]], implicit [[DEF221]], implicit [[DEF222]], implicit [[DEF223]], implicit [[DEF224]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF225]], implicit [[DEF226]], implicit [[DEF227]], implicit [[DEF228]], implicit [[DEF229]], implicit [[DEF230]], implicit [[DEF231]], implicit [[DEF232]], implicit [[DEF233]], implicit [[DEF234]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF235]], implicit [[DEF236]], implicit [[DEF237]], implicit [[DEF238]], implicit [[DEF239]], implicit [[DEF240]], implicit [[DEF241]], implicit [[DEF242]], implicit [[DEF243]], implicit [[DEF244]] - ; GFX90A-NEXT: S_NOP 0, implicit [[DEF245]], implicit [[DEF246]], implicit [[DEF247]], implicit [[DEF248]], implicit [[DEF249]], implicit [[DEF250]], implicit [[DEF251]], implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]] + ; GFX90A-NEXT: [[DEF256:%[0-9]+]]:agpr_32 = IMPLICIT_DEF + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF256]], implicit [[DEF]], implicit [[DEF1]], implicit [[DEF2]], implicit [[DEF3]], implicit [[DEF4]], implicit [[DEF5]], implicit [[DEF6]], implicit [[DEF7]], implicit [[DEF8]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF9]], implicit [[DEF10]], implicit [[DEF11]], implicit [[DEF12]], implicit [[DEF13]], implicit [[DEF14]], implicit [[DEF15]], implicit [[DEF16]], implicit [[DEF17]], implicit [[DEF18]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF19]], implicit [[DEF20]], implicit [[DEF21]], implicit [[DEF22]], implicit [[DEF23]], implicit [[DEF24]], implicit [[DEF25]], implicit [[DEF26]], implicit [[DEF27]], implicit [[DEF28]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF29]], implicit [[DEF30]], implicit [[DEF31]], implicit [[DEF32]], implicit [[DEF33]], implicit [[DEF34]], implicit [[DEF35]], implicit [[DEF36]], implicit [[DEF37]], implicit [[DEF38]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF39]], implicit [[DEF40]], implicit [[DEF41]], implicit [[DEF42]], implicit [[DEF43]], implicit [[DEF44]], implicit [[DEF45]], implicit [[DEF46]], implicit [[DEF47]], implicit [[DEF48]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF49]], implicit [[DEF50]], implicit [[DEF51]], implicit [[DEF52]], implicit [[DEF53]], implicit [[DEF54]], implicit [[DEF55]], implicit [[DEF56]], implicit [[DEF57]], implicit [[DEF58]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF59]], implicit [[DEF60]], implicit [[DEF61]], implicit [[DEF62]], implicit [[DEF63]], implicit [[DEF64]], implicit [[DEF65]], implicit [[DEF66]], implicit [[DEF67]], implicit [[DEF68]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF69]], implicit [[DEF70]], implicit [[DEF71]], implicit [[DEF72]], implicit [[DEF73]], implicit [[DEF74]], implicit [[DEF75]], implicit [[DEF76]], implicit [[DEF77]], implicit [[DEF78]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF79]], implicit [[DEF80]], implicit [[DEF81]], implicit [[DEF82]], implicit [[DEF83]], implicit [[DEF84]], implicit [[DEF85]], implicit [[DEF86]], implicit [[DEF87]], implicit [[DEF88]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF89]], implicit [[DEF90]], implicit [[DEF91]], implicit [[DEF92]], implicit [[DEF93]], implicit [[DEF94]], implicit [[DEF95]], implicit [[DEF96]], implicit [[DEF97]], implicit [[DEF98]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF99]], implicit [[DEF100]], implicit [[DEF101]], implicit [[DEF102]], implicit [[DEF103]], implicit [[DEF104]], implicit [[DEF105]], implicit [[DEF106]], implicit [[DEF107]], implicit [[DEF108]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF109]], implicit [[DEF110]], implicit [[DEF111]], implicit [[DEF112]], implicit [[DEF113]], implicit [[DEF114]], implicit [[DEF115]], implicit [[DEF116]], implicit [[DEF117]], implicit [[DEF118]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF119]], implicit [[DEF120]], implicit [[DEF121]], implicit [[DEF122]], implicit [[DEF123]], implicit [[DEF124]], implicit [[DEF125]], implicit [[DEF126]], implicit [[DEF127]], implicit [[DEF128]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF129]], implicit [[DEF130]], implicit [[DEF131]], implicit [[DEF132]], implicit [[DEF133]], implicit [[DEF134]], implicit [[DEF135]], implicit [[DEF136]], implicit [[DEF137]], implicit [[DEF138]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF139]], implicit [[DEF140]], implicit [[DEF141]], implicit [[DEF142]], implicit [[DEF143]], implicit [[DEF144]], implicit [[DEF145]], implicit [[DEF146]], implicit [[DEF147]], implicit [[DEF148]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF149]], implicit [[DEF150]], implicit [[DEF151]], implicit [[DEF152]], implicit [[DEF153]], implicit [[DEF154]], implicit [[DEF155]], implicit [[DEF156]], implicit [[DEF157]], implicit [[DEF158]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF159]], implicit [[DEF160]], implicit [[DEF161]], implicit [[DEF162]], implicit [[DEF163]], implicit [[DEF164]], implicit [[DEF165]], implicit [[DEF166]], implicit [[DEF167]], implicit [[DEF168]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF169]], implicit [[DEF170]], implicit [[DEF171]], implicit [[DEF172]], implicit [[DEF173]], implicit [[DEF174]], implicit [[DEF175]], implicit [[DEF176]], implicit [[DEF177]], implicit [[DEF178]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF179]], implicit [[DEF180]], implicit [[DEF181]], implicit [[DEF182]], implicit [[DEF183]], implicit [[DEF184]], implicit [[DEF185]], implicit [[DEF186]], implicit [[DEF187]], implicit [[DEF188]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF189]], implicit [[DEF190]], implicit [[DEF191]], implicit [[DEF192]], implicit [[DEF193]], implicit [[DEF194]], implicit [[DEF195]], implicit [[DEF196]], implicit [[DEF197]], implicit [[DEF198]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF199]], implicit [[DEF200]], implicit [[DEF201]], implicit [[DEF202]], implicit [[DEF203]], implicit [[DEF204]], implicit [[DEF205]], implicit [[DEF206]], implicit [[DEF207]], implicit [[DEF208]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF209]], implicit [[DEF210]], implicit [[DEF211]], implicit [[DEF212]], implicit [[DEF213]], implicit [[DEF214]], implicit [[DEF215]], implicit [[DEF216]], implicit [[DEF217]], implicit [[DEF218]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF219]], implicit [[DEF220]], implicit [[DEF221]], implicit [[DEF222]], implicit [[DEF223]], implicit [[DEF224]], implicit [[DEF225]], implicit [[DEF226]], implicit [[DEF227]], implicit [[DEF228]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF229]], implicit [[DEF230]], implicit [[DEF231]], implicit [[DEF232]], implicit [[DEF233]], implicit [[DEF234]], implicit [[DEF235]], implicit [[DEF236]], implicit [[DEF237]], implicit [[DEF238]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF239]], implicit [[DEF240]], implicit [[DEF241]], implicit [[DEF242]], implicit [[DEF243]], implicit [[DEF244]], implicit [[DEF245]], implicit [[DEF246]], implicit [[DEF247]], implicit [[DEF248]] + ; GFX90A-NEXT: S_NOP 0, implicit [[DEF249]], implicit [[DEF250]], implicit [[DEF251]], implicit [[DEF252]], implicit [[DEF253]], implicit [[DEF254]], implicit [[DEF255]], implicit [[V_CVT_I32_F64_e32_]], implicit [[V_CVT_I32_F64_e32_1]] ; GFX90A-NEXT: S_ENDPGM 0 bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir index f69337e..06d8474 100644 --- a/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir +++ b/llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats.mir @@ -2104,13 +2104,9 @@ body: | ; GFX908-NEXT: [[S_MOV_B32_58:%[0-9]+]]:sgpr_32 = S_MOV_B32 69 ; GFX908-NEXT: [[S_MOV_B32_59:%[0-9]+]]:sgpr_32 = S_MOV_B32 70 ; GFX908-NEXT: [[S_MOV_B32_60:%[0-9]+]]:sgpr_32 = S_MOV_B32 71 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_20:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 20, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[S_MOV_B32_61:%[0-9]+]]:sgpr_32 = S_MOV_B32 72 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_21:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 21, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[S_MOV_B32_62:%[0-9]+]]:sgpr_32 = S_MOV_B32 73 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_22:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 22, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[S_MOV_B32_63:%[0-9]+]]:sgpr_32 = S_MOV_B32 74 - ; GFX908-NEXT: [[V_CVT_I32_F64_e32_23:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 23, implicit $exec, implicit $mode ; GFX908-NEXT: [[S_MOV_B32_64:%[0-9]+]]:sgpr_32 = S_MOV_B32 75 ; GFX908-NEXT: [[S_MOV_B32_65:%[0-9]+]]:sgpr_32 = S_MOV_B32 76 ; GFX908-NEXT: [[S_MOV_B32_66:%[0-9]+]]:sgpr_32 = S_MOV_B32 77 @@ -2120,7 +2116,11 @@ body: | ; GFX908-NEXT: [[S_MOV_B32_70:%[0-9]+]]:sgpr_32 = S_MOV_B32 81 ; GFX908-NEXT: [[S_MOV_B32_71:%[0-9]+]]:sgpr_32 = S_MOV_B32 82 ; GFX908-NEXT: [[S_MOV_B32_72:%[0-9]+]]:sgpr_32 = S_MOV_B32 83 + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_20:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 20, implicit $exec, implicit $mode, implicit-def $m0 ; GFX908-NEXT: [[S_MOV_B32_73:%[0-9]+]]:sgpr_32 = S_MOV_B32 84 + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_21:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 21, implicit $exec, implicit $mode, implicit-def $m0 + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_22:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 22, implicit $exec, implicit $mode, implicit-def $m0 + ; GFX908-NEXT: [[V_CVT_I32_F64_e32_23:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_I32_F64_e32 23, implicit $exec, implicit $mode ; GFX908-NEXT: {{ $}} ; GFX908-NEXT: bb.1: ; GFX908-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000) diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll index 11cda2d..c96ba75 100644 --- a/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll @@ -199,7 +199,6 @@ define float @v_mad_mix_f32_bf16lo_bf16lo_negabsf32(bfloat %src0, bfloat %src1, ret float %result } - define float @v_mad_mix_f32_bf16lo_bf16lo_f32imm1(bfloat %src0, bfloat %src1) #0 { ; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_f32imm1: ; GFX1250: ; %bb.0: @@ -230,7 +229,6 @@ define float @v_mad_mix_f32_bf16lo_bf16lo_f32imminv2pi(bfloat %src0, bfloat %src ret float %result } - define float @v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imminv2pi(bfloat %src0, bfloat %src1) #0 { ; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imminv2pi: ; GFX1250: ; %bb.0: @@ -247,7 +245,6 @@ define float @v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imminv2pi(bfloat %src0, bfloat ret float %result } - define float @v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imm63(bfloat %src0, bfloat %src1) #0 { ; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imm63: ; GFX1250: ; %bb.0: @@ -360,7 +357,6 @@ define float @no_mix_simple_fabs(float %src0, float %src1, float %src2) #0 { ret float %result } - define float @v_mad_mix_f32_bf16lo_bf16lo_bf16lo_f32_denormals(bfloat %src0, bfloat %src1, bfloat %src2) #1 { ; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_bf16lo_f32_denormals: ; GFX1250: ; %bb.0: @@ -469,7 +465,6 @@ define float @v_mad_mix_f32_negprecvtbf16lo_bf16lo_bf16lo(i32 %src0.arg, bfloat ret float %result } - define float @v_mad_mix_f32_precvtnegbf16hi_abs_bf16lo_bf16lo(i32 %src0.arg, bfloat %src1, bfloat %src2) #0 { ; GFX1250-LABEL: v_mad_mix_f32_precvtnegbf16hi_abs_bf16lo_bf16lo: ; GFX1250: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll index 1b2eb83..03304ae 100644 --- a/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll @@ -74,9 +74,7 @@ define bfloat @v_mad_mixlo_bf16_bf16lo_bf16lo_f32_clamp_post_cvt(bfloat %src0, b ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_fma_mixlo_bf16 v0, v0, v1, v2 op_sel_hi:[1,1,0] -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp +; GFX1250-NEXT: v_fma_mixlo_bf16 v0, v0, v1, v2 op_sel_hi:[1,1,0] clamp ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %src0.ext = fpext bfloat %src0 to float %src1.ext = fpext bfloat %src1 to float @@ -105,7 +103,6 @@ define bfloat @v_mad_mixlo_bf16_bf16lo_bf16lo_f32_clamp_pre_cvt(bfloat %src0, bf ret bfloat %cvt.result } - define <2 x bfloat> @v_mad_mix_v2f32(<2 x bfloat> %src0, <2 x bfloat> %src1, <2 x bfloat> %src2) #0 { ; GFX1250-LABEL: v_mad_mix_v2f32: ; GFX1250: ; %bb.0: @@ -178,7 +175,6 @@ define <4 x bfloat> @v_mad_mix_v4f32(<4 x bfloat> %src0, <4 x bfloat> %src1, <4 ret <4 x bfloat> %cvt.result } - define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bfloat> %src1, <2 x bfloat> %src2) #0 { ; GFX1250-LABEL: v_mad_mix_v2f32_clamp_postcvt: ; GFX1250: ; %bb.0: @@ -191,9 +187,7 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bflo ; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v2 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[4:5], v[6:7], v[0:1] -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 clamp ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %src0.ext = fpext <2 x bfloat> %src0 to <2 x float> %src1.ext = fpext <2 x bfloat> %src1 to <2 x float> @@ -205,7 +199,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bflo ret <2 x bfloat> %clamp } - define <3 x bfloat> @v_mad_mix_v3f32_clamp_postcvt(<3 x bfloat> %src0, <3 x bfloat> %src1, <3 x bfloat> %src2) #0 { ; GFX1250-LABEL: v_mad_mix_v3f32_clamp_postcvt: ; GFX1250: ; %bb.0: @@ -247,11 +240,8 @@ define <4 x bfloat> @v_mad_mix_v4f32_clamp_postcvt(<4 x bfloat> %src0, <4 x bflo ; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[6:7], v[0:1], v[2:3] ; GFX1250-NEXT: v_pk_fma_f32 v[2:3], v[8:9], v[10:11], v[12:13] ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v2, v3 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX1250-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp -; GFX1250-NEXT: v_pk_max_num_bf16 v1, v1, v1 clamp +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 clamp +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v2, v3 clamp ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %src0.ext = fpext <4 x bfloat> %src0 to <4 x float> %src1.ext = fpext <4 x bfloat> %src1 to <4 x float> @@ -323,7 +313,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt_hi(<2 x bfloat> %src0, <2 x b ret <2 x bfloat> %insert } - define <2 x bfloat> @v_mad_mix_v2f32_clamp_precvt(<2 x bfloat> %src0, <2 x bfloat> %src1, <2 x bfloat> %src2) #0 { ; GFX1250-LABEL: v_mad_mix_v2f32_clamp_precvt: ; GFX1250: ; %bb.0: @@ -351,7 +340,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_precvt(<2 x bfloat> %src0, <2 x bfloa ret <2 x bfloat> %cvt.result } - define <3 x bfloat> @v_mad_mix_v3f32_clamp_precvt(<3 x bfloat> %src0, <3 x bfloat> %src1, <3 x bfloat> %src2) #0 { ; GFX1250-LABEL: v_mad_mix_v3f32_clamp_precvt: ; GFX1250: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll new file mode 100644 index 0000000..6d0aa1e --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll @@ -0,0 +1,108 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck %s + +define protected amdgpu_kernel void @no_folding_imm_to_inst_with_fi(<4 x i64> %val4, <16 x i64> %val16) { +; CHECK-LABEL: no_folding_imm_to_inst_with_fi: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_clause 0x2 +; CHECK-NEXT: s_load_b256 s[36:43], s[4:5], 0x24 +; CHECK-NEXT: s_load_b512 s[16:31], s[4:5], 0xe4 +; CHECK-NEXT: s_load_b512 s[0:15], s[4:5], 0xa4 +; CHECK-NEXT: s_mov_b64 s[34:35], src_private_base +; CHECK-NEXT: s_movk_i32 s33, 0x70 +; CHECK-NEXT: s_movk_i32 s34, 0x60 +; CHECK-NEXT: s_or_b32 s44, 0x80, s33 +; CHECK-NEXT: s_mov_b32 s45, s35 +; CHECK-NEXT: s_or_b32 s46, 0x80, s34 +; CHECK-NEXT: s_mov_b32 s47, s35 +; CHECK-NEXT: v_dual_mov_b32 v20, s44 :: v_dual_mov_b32 v21, s45 +; CHECK-NEXT: v_dual_mov_b32 v22, s46 :: v_dual_mov_b32 v23, s47 +; CHECK-NEXT: s_movk_i32 s34, 0x80 +; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; CHECK-NEXT: v_dual_mov_b32 v34, s34 :: v_dual_mov_b32 v35, s35 +; CHECK-NEXT: s_wait_kmcnt 0x0 +; CHECK-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v1, s41 +; CHECK-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43 +; CHECK-NEXT: v_dual_mov_b32 v4, s36 :: v_dual_mov_b32 v5, s37 +; CHECK-NEXT: v_dual_mov_b32 v6, s38 :: v_dual_mov_b32 v7, s39 +; CHECK-NEXT: scratch_store_b128 off, v[0:3], off offset:16 scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: v_dual_mov_b32 v0, s20 :: v_dual_mov_b32 v1, s21 +; CHECK-NEXT: s_movk_i32 s20, 0x50 +; CHECK-NEXT: v_dual_mov_b32 v8, s28 :: v_dual_mov_b32 v9, s29 +; CHECK-NEXT: v_dual_mov_b32 v10, s30 :: v_dual_mov_b32 v11, s31 +; CHECK-NEXT: s_wait_alu 0xfffe +; CHECK-NEXT: s_or_b32 s20, 0x80, s20 +; CHECK-NEXT: s_mov_b32 s21, s35 +; CHECK-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25 +; CHECK-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27 +; CHECK-NEXT: v_dual_mov_b32 v2, s22 :: v_dual_mov_b32 v3, s23 +; CHECK-NEXT: s_wait_alu 0xfffe +; CHECK-NEXT: v_dual_mov_b32 v25, s21 :: v_dual_mov_b32 v24, s20 +; CHECK-NEXT: scratch_store_b128 off, v[4:7], off scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[20:21], v[8:11] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[22:23], v[12:15] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[24:25], v[0:3] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v1, s17 +; CHECK-NEXT: s_or_b32 s16, 0x80, 64 +; CHECK-NEXT: s_mov_b32 s17, s35 +; CHECK-NEXT: v_dual_mov_b32 v4, s12 :: v_dual_mov_b32 v5, s13 +; CHECK-NEXT: s_or_b32 s12, 0x80, 48 +; CHECK-NEXT: s_mov_b32 s13, s35 +; CHECK-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9 +; CHECK-NEXT: s_or_b32 s8, 0x80, 32 +; CHECK-NEXT: s_mov_b32 s9, s35 +; CHECK-NEXT: v_dual_mov_b32 v12, s4 :: v_dual_mov_b32 v13, s5 +; CHECK-NEXT: s_or_b32 s4, 0x80, 16 +; CHECK-NEXT: s_mov_b32 s5, s35 +; CHECK-NEXT: v_dual_mov_b32 v2, s18 :: v_dual_mov_b32 v3, s19 +; CHECK-NEXT: s_wait_alu 0xfffe +; CHECK-NEXT: v_dual_mov_b32 v27, s17 :: v_dual_mov_b32 v26, s16 +; CHECK-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s15 +; CHECK-NEXT: v_dual_mov_b32 v29, s13 :: v_dual_mov_b32 v28, s12 +; CHECK-NEXT: v_dual_mov_b32 v31, s9 :: v_dual_mov_b32 v30, s8 +; CHECK-NEXT: v_dual_mov_b32 v33, s5 :: v_dual_mov_b32 v32, s4 +; CHECK-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11 +; CHECK-NEXT: v_dual_mov_b32 v14, s6 :: v_dual_mov_b32 v15, s7 +; CHECK-NEXT: v_dual_mov_b32 v16, s0 :: v_dual_mov_b32 v17, s1 +; CHECK-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v19, s3 +; CHECK-NEXT: flat_store_b128 v[26:27], v[0:3] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[28:29], v[4:7] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[30:31], v[8:11] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[32:33], v[12:15] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[34:35], v[16:19] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[22:23] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[26:27] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[24:25] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[30:31] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[28:29] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[34:35] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[32:33] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt 0x0 +; CHECK-NEXT: s_endpgm +bb: + %alloca = alloca <4 x i64>, align 32, addrspace(5) + %alloca1 = alloca <16 x i64>, align 128, addrspace(5) + store volatile <4 x i64> %val4, ptr addrspace(5) %alloca + %ascast = addrspacecast ptr addrspace(5) %alloca1 to ptr + store volatile <16 x i64> %val16, ptr %ascast + %load = load volatile <16 x i64>, ptr %ascast + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll index 42401af..8304be9 100644 --- a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll +++ b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll @@ -78,12 +78,14 @@ define amdgpu_kernel void @fadd_v2_vs(ptr addrspace(1) %a, <2 x float> %x) { ; GFX1250-LABEL: fadd_v2_vs: ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -142,13 +144,16 @@ define amdgpu_kernel void @fadd_v4_vs(ptr addrspace(1) %a, <4 x float> %x) { ; GFX1250-SDAG-NEXT: s_clause 0x1 ; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34 -; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1] +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[2:3] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[0:1] -; GFX1250-SDAG-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[4:5] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[6:7] +; GFX1250-SDAG-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_v4_vs: @@ -156,13 +161,16 @@ define amdgpu_kernel void @fadd_v4_vs(ptr addrspace(1) %a, <4 x float> %x) { ; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x34 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[0:1] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[4:5] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[6:7] +; GFX1250-GISEL-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <4 x float>, ptr addrspace(1) %a, i32 %id @@ -332,56 +340,69 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; ; GFX1250-SDAG-LABEL: fadd_v32_vs: ; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v32, 7, v0 +; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v40, 7, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v32, s[0:1] offset:16 -; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v32, s[0:1] -; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v32, s[0:1] offset:48 -; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v32, s[0:1] offset:32 -; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v32, s[0:1] offset:80 -; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v32, s[0:1] offset:64 -; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v32, s[0:1] offset:112 -; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v32, s[0:1] offset:96 -; GFX1250-SDAG-NEXT: s_clause 0x1 -; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xa4 -; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xe4 -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 +; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v40, s[34:35] offset:16 +; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v40, s[34:35] offset:48 +; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v40, s[34:35] offset:32 +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v40, s[34:35] +; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v40, s[34:35] offset:80 +; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v40, s[34:35] offset:96 +; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v40, s[34:35] offset:64 +; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v40, s[34:35] offset:112 +; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 +; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 +; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[12:13] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[14:15] +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s20 :: v_dual_mov_b32 v35, s21 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s22 :: v_dual_mov_b32 v39, s23 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s18 :: v_dual_mov_b32 v37, s29 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s30 :: v_dual_mov_b32 v43, s31 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v44, s24 :: v_dual_mov_b32 v33, s19 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s28 :: v_dual_mov_b32 v57, s15 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s3 :: v_dual_mov_b32 v54, s12 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v55, s13 :: v_dual_mov_b32 v56, s14 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s7 :: v_dual_mov_b32 v52, s2 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s27 :: v_dual_mov_b32 v48, s4 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s5 :: v_dual_mov_b32 v50, s6 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s25 :: v_dual_mov_b32 v46, s26 +; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[28:29], v[28:29], v[34:35] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[30:31], v[30:31], v[38:39] +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s8 :: v_dual_mov_b32 v35, s9 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s10 :: v_dual_mov_b32 v39, s11 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[6:7], v[6:7], s[10:11] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x4 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[20:21], v[20:21], s[16:17] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x3 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[12:13], v[12:13], s[40:41] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[26:27], v[26:27], v[42:43] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[0:1] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[24:25], v[24:25], v[36:37] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[16:17] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[18:19], v[18:19], s[38:39] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x1 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[24:25], v[24:25], s[48:49] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[16:17], v[16:17], v[34:35] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[18:19], v[18:19], v[38:39] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[28:29], v[28:29], s[44:45] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[30:31], v[30:31], s[46:47] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[26:27], v[26:27], s[50:51] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[16:17], v[16:17], s[36:37] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[14:15], v[14:15], s[42:43] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[22:23], v[22:23], s[18:19] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[8:9], v[8:9], s[20:21] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[10:11], v[10:11], s[22:23] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[4:5], v[4:5], s[8:9] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[12:13], v[12:13], v[54:55] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[14:15], v[14:15], v[56:57] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[10:11], v[10:11], v[52:53] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[8:9], v[8:9], v[42:43] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[4:5], v[4:5], v[48:49] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[6:7], v[6:7], v[50:51] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[20:21], v[20:21], v[44:45] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[22:23], v[22:23], v[46:47] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[32:33] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[36:37] ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[28:31], s[0:1] offset:96 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[24:27], s[0:1] offset:112 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[16:19], s[0:1] offset:64 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[12:15], s[0:1] offset:80 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[20:23], s[0:1] offset:32 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[8:11], s[0:1] offset:48 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[4:7], s[0:1] -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[0:3], s[0:1] offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[16:19], s[34:35] offset:96 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[12:15], s[34:35] offset:112 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[8:11], s[34:35] offset:64 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[4:7], s[34:35] offset:80 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[20:23], s[34:35] offset:32 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[24:27], s[34:35] offset:48 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[0:3], s[34:35] +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[28:31], s[34:35] offset:16 ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_v32_vs: @@ -389,54 +410,70 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v32, 7, v0 +; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v32, s[34:35] -; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v32, s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v32, s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v32, s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v32, s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v32, s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v32, s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v32, s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35] +; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16 +; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 +; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48 +; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64 +; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80 +; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96 +; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 ; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 -; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[16:17] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[18:19] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7] +; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[32:33] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[34:35] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[4:5], v[4:5], s[20:21] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[6:7], v[6:7], s[22:23] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[4:5], v[4:5], v[36:37] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[6:7], v[6:7], v[38:39] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[8:9], v[8:9], s[24:25] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[10:11], v[10:11], s[26:27] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[8:9], v[8:9], v[40:41] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[10:11], v[10:11], v[42:43] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x4 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[12:13], v[12:13], s[28:29] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[14:15], v[14:15], s[30:31] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[12:13], v[12:13], v[44:45] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[14:15], v[14:15], v[46:47] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x3 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[16:17], v[16:17], s[0:1] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[18:19], v[18:19], s[2:3] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[16:17], v[16:17], v[48:49] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[18:19], v[18:19], v[50:51] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x2 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[20:21], v[20:21], s[4:5] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[22:23], v[22:23], s[6:7] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[20:21], v[20:21], v[52:53] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[22:23], v[22:23], v[54:55] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x1 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[24:25], v[24:25], s[8:9] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[26:27], v[26:27], s[10:11] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[24:25], v[24:25], v[32:33] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[26:27], v[26:27], v[34:35] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[28:29], v[28:29], s[12:13] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[30:31], v[30:31], s[14:15] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[28:29], v[28:29], v[36:37] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[30:31], v[30:31], v[38:39] ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[0:3], s[34:35] -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[4:7], s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[8:11], s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[12:15], s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[16:19], s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[20:23], s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[24:27], s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[28:31], s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35] +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id @@ -502,15 +539,16 @@ define amdgpu_kernel void @fadd_v2_v_imm(ptr addrspace(1) %a) { ; GFX1250-GISEL-LABEL: fadd_v2_v_imm: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 0x42c80000 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -645,15 +683,16 @@ define amdgpu_kernel void @fadd_v2_v_lit_splat(ptr addrspace(1) %a) { ; GFX1250-GISEL-LABEL: fadd_v2_v_lit_splat: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 1.0 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -703,13 +742,15 @@ define amdgpu_kernel void @fadd_v2_v_lit_hi0(ptr addrspace(1) %a) { ; GFX1250-GISEL-LABEL: fadd_v2_v_lit_hi0: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], 0x3f800000 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -746,17 +787,31 @@ define amdgpu_kernel void @fadd_v2_v_lit_lo0(ptr addrspace(1) %a) { ; PACKED-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] ; PACKED-NEXT: s_endpgm ; -; GFX1250-LABEL: fadd_v2_v_lit_lo0: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0 -; GFX1250-NEXT: s_mov_b64 s[2:3], lit64(0x3f80000000000000) -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset -; GFX1250-NEXT: s_endpgm +; GFX1250-SDAG-LABEL: fadd_v2_v_lit_lo0: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x3f80000000000000) +; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: fadd_v2_v_lit_lo0: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x3f80000000000000) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] +; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id %load = load <2 x float>, ptr addrspace(1) %gep, align 8 @@ -792,17 +847,31 @@ define amdgpu_kernel void @fadd_v2_v_unfoldable_lit(ptr addrspace(1) %a) { ; PACKED-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] ; PACKED-NEXT: s_endpgm ; -; GFX1250-LABEL: fadd_v2_v_unfoldable_lit: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0 -; GFX1250-NEXT: s_mov_b64 s[2:3], lit64(0x400000003f800000) -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset -; GFX1250-NEXT: s_endpgm +; GFX1250-SDAG-LABEL: fadd_v2_v_unfoldable_lit: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x400000003f800000) +; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: fadd_v2_v_unfoldable_lit: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x400000003f800000) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] +; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id %load = load <2 x float>, ptr addrspace(1) %gep, align 8 @@ -1085,12 +1154,14 @@ define amdgpu_kernel void @fadd_v2_v_fneg_lo2(ptr addrspace(1) %a, float %x, flo ; GFX1250-SDAG-LABEL: fadd_v2_v_fneg_lo2: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] neg_lo:[0,1] -; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] neg_lo:[0,1] +; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_v2_v_fneg_lo2: @@ -1159,12 +1230,14 @@ define amdgpu_kernel void @fadd_v2_v_fneg_hi2(ptr addrspace(1) %a, float %x, flo ; GFX1250-SDAG-LABEL: fadd_v2_v_fneg_hi2: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] op_sel:[0,1] op_sel_hi:[1,0] neg_hi:[0,1] -; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] op_sel:[0,1] op_sel_hi:[1,0] neg_hi:[0,1] +; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_v2_v_fneg_hi2: @@ -1262,12 +1335,14 @@ define amdgpu_kernel void @fmul_v2_vs(ptr addrspace(1) %a, <2 x float> %x) { ; GFX1250-LABEL: fmul_v2_vs: ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -1326,13 +1401,16 @@ define amdgpu_kernel void @fmul_v4_vs(ptr addrspace(1) %a, <4 x float> %x) { ; GFX1250-SDAG-NEXT: s_clause 0x1 ; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34 -; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1] +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[2:3] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[0:1] -; GFX1250-SDAG-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[4:5] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[6:7] +; GFX1250-SDAG-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fmul_v4_vs: @@ -1340,13 +1418,16 @@ define amdgpu_kernel void @fmul_v4_vs(ptr addrspace(1) %a, <4 x float> %x) { ; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x34 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[0:1] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[4:5] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[6:7] +; GFX1250-GISEL-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <4 x float>, ptr addrspace(1) %a, i32 %id @@ -1516,56 +1597,69 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; ; GFX1250-SDAG-LABEL: fmul_v32_vs: ; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v32, 7, v0 +; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v40, 7, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v32, s[0:1] offset:16 -; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v32, s[0:1] -; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v32, s[0:1] offset:48 -; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v32, s[0:1] offset:32 -; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v32, s[0:1] offset:80 -; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v32, s[0:1] offset:64 -; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v32, s[0:1] offset:112 -; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v32, s[0:1] offset:96 -; GFX1250-SDAG-NEXT: s_clause 0x1 -; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xa4 -; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xe4 -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 +; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v40, s[34:35] offset:16 +; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v40, s[34:35] offset:48 +; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v40, s[34:35] offset:32 +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v40, s[34:35] +; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v40, s[34:35] offset:80 +; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v40, s[34:35] offset:96 +; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v40, s[34:35] offset:64 +; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v40, s[34:35] offset:112 +; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 +; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 +; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[12:13] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[14:15] +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s20 :: v_dual_mov_b32 v35, s21 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s22 :: v_dual_mov_b32 v39, s23 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s18 :: v_dual_mov_b32 v37, s29 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s30 :: v_dual_mov_b32 v43, s31 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v44, s24 :: v_dual_mov_b32 v33, s19 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s28 :: v_dual_mov_b32 v57, s15 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s3 :: v_dual_mov_b32 v54, s12 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v55, s13 :: v_dual_mov_b32 v56, s14 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s7 :: v_dual_mov_b32 v52, s2 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s27 :: v_dual_mov_b32 v48, s4 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s5 :: v_dual_mov_b32 v50, s6 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s25 :: v_dual_mov_b32 v46, s26 +; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[28:29], v[28:29], v[34:35] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[30:31], v[30:31], v[38:39] +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s8 :: v_dual_mov_b32 v35, s9 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s10 :: v_dual_mov_b32 v39, s11 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[6:7], v[6:7], s[10:11] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x4 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[20:21], v[20:21], s[16:17] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x3 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[12:13], v[12:13], s[40:41] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[26:27], v[26:27], v[42:43] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[0:1] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[24:25], v[24:25], v[36:37] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[16:17] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[18:19], v[18:19], s[38:39] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x1 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[24:25], v[24:25], s[48:49] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[16:17], v[16:17], v[34:35] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[18:19], v[18:19], v[38:39] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[28:29], v[28:29], s[44:45] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[30:31], v[30:31], s[46:47] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[26:27], v[26:27], s[50:51] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[16:17], v[16:17], s[36:37] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[14:15], v[14:15], s[42:43] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[22:23], v[22:23], s[18:19] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[8:9], v[8:9], s[20:21] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[10:11], v[10:11], s[22:23] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[4:5], v[4:5], s[8:9] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[12:13], v[12:13], v[54:55] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[14:15], v[14:15], v[56:57] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[10:11], v[10:11], v[52:53] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[8:9], v[8:9], v[42:43] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[4:5], v[4:5], v[48:49] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[6:7], v[6:7], v[50:51] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[20:21], v[20:21], v[44:45] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[22:23], v[22:23], v[46:47] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[32:33] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[36:37] ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[28:31], s[0:1] offset:96 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[24:27], s[0:1] offset:112 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[16:19], s[0:1] offset:64 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[12:15], s[0:1] offset:80 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[20:23], s[0:1] offset:32 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[8:11], s[0:1] offset:48 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[4:7], s[0:1] -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[0:3], s[0:1] offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[16:19], s[34:35] offset:96 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[12:15], s[34:35] offset:112 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[8:11], s[34:35] offset:64 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[4:7], s[34:35] offset:80 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[20:23], s[34:35] offset:32 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[24:27], s[34:35] offset:48 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[0:3], s[34:35] +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[28:31], s[34:35] offset:16 ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fmul_v32_vs: @@ -1573,54 +1667,70 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v32, 7, v0 +; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v32, s[34:35] -; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v32, s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v32, s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v32, s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v32, s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v32, s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v32, s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v32, s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35] +; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16 +; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 +; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48 +; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64 +; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80 +; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96 +; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 ; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 -; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[16:17] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[18:19] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7] +; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[32:33] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[34:35] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[4:5], v[4:5], s[20:21] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[6:7], v[6:7], s[22:23] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[4:5], v[4:5], v[36:37] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[6:7], v[6:7], v[38:39] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[8:9], v[8:9], s[24:25] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[10:11], v[10:11], s[26:27] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[8:9], v[8:9], v[40:41] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[10:11], v[10:11], v[42:43] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x4 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[12:13], v[12:13], s[28:29] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[14:15], v[14:15], s[30:31] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[12:13], v[12:13], v[44:45] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[14:15], v[14:15], v[46:47] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x3 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[16:17], v[16:17], s[0:1] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[18:19], v[18:19], s[2:3] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[16:17], v[16:17], v[48:49] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[18:19], v[18:19], v[50:51] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x2 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[20:21], v[20:21], s[4:5] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[22:23], v[22:23], s[6:7] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[20:21], v[20:21], v[52:53] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[22:23], v[22:23], v[54:55] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x1 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[24:25], v[24:25], s[8:9] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[26:27], v[26:27], s[10:11] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[24:25], v[24:25], v[32:33] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[26:27], v[26:27], v[34:35] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[28:29], v[28:29], s[12:13] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[30:31], v[30:31], s[14:15] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[28:29], v[28:29], v[36:37] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[30:31], v[30:31], v[38:39] ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[0:3], s[34:35] -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[4:7], s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[8:11], s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[12:15], s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[16:19], s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[20:23], s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[24:27], s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[28:31], s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35] +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id @@ -1685,15 +1795,16 @@ define amdgpu_kernel void @fmul_v2_v_imm(ptr addrspace(1) %a) { ; GFX1250-GISEL-LABEL: fmul_v2_v_imm: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 0x42c80000 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -1828,15 +1939,16 @@ define amdgpu_kernel void @fmul_v2_v_lit_splat(ptr addrspace(1) %a) { ; GFX1250-GISEL-LABEL: fmul_v2_v_lit_splat: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 4.0 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -1873,17 +1985,31 @@ define amdgpu_kernel void @fmul_v2_v_unfoldable_lit(ptr addrspace(1) %a) { ; PACKED-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] ; PACKED-NEXT: s_endpgm ; -; GFX1250-LABEL: fmul_v2_v_unfoldable_lit: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0 -; GFX1250-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000) -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset -; GFX1250-NEXT: s_endpgm +; GFX1250-SDAG-LABEL: fmul_v2_v_unfoldable_lit: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4040000040800000) +; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: fmul_v2_v_unfoldable_lit: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] +; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id %load = load <2 x float>, ptr addrspace(1) %gep, align 8 @@ -2040,12 +2166,14 @@ define amdgpu_kernel void @fma_v2_vs(ptr addrspace(1) %a, <2 x float> %x) { ; GFX1250-LABEL: fma_v2_vs: ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[2:3] -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[2:3] +; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -2104,13 +2232,16 @@ define amdgpu_kernel void @fma_v4_vs(ptr addrspace(1) %a, <4 x float> %x) { ; GFX1250-SDAG-NEXT: s_clause 0x1 ; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34 -; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[2:3], s[2:3] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[0:1], s[0:1] -; GFX1250-SDAG-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[4:5], v[4:5] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[6:7], v[6:7] +; GFX1250-SDAG-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fma_v4_vs: @@ -2118,13 +2249,16 @@ define amdgpu_kernel void @fma_v4_vs(ptr addrspace(1) %a, <4 x float> %x) { ; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x34 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[0:1], s[0:1] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[2:3], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[4:5], v[4:5] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[6:7], v[6:7] +; GFX1250-GISEL-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <4 x float>, ptr addrspace(1) %a, i32 %id @@ -2294,56 +2428,68 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; ; GFX1250-SDAG-LABEL: fma_v32_vs: ; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v32, 7, v0 +; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v34, 7, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v32, s[0:1] offset:16 -; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v32, s[0:1] -; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v32, s[0:1] offset:48 -; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v32, s[0:1] offset:32 -; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v32, s[0:1] offset:80 -; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v32, s[0:1] offset:64 -; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v32, s[0:1] offset:112 -; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v32, s[0:1] offset:96 -; GFX1250-SDAG-NEXT: s_clause 0x1 -; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xa4 -; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xe4 -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 +; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v34, s[34:35] offset:16 +; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v34, s[34:35] offset:48 +; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v34, s[34:35] offset:32 +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v34, s[34:35] +; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v34, s[34:35] offset:80 +; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v34, s[34:35] offset:96 +; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v34, s[34:35] offset:64 +; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v34, s[34:35] offset:112 +; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 +; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 +; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[12:13], s[12:13] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[14:15], s[14:15] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[20:21] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[22:23] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[30:31] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[28:29] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[54:55], s[12:13] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[56:57], s[14:15] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[52:53], s[2:3] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[48:49], s[4:5] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[50:51], s[6:7] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[44:45], s[24:25] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[46:47], s[26:27] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[18:19] +; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[28:29], v[28:29], v[36:37], v[36:37] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[30:31], v[30:31], v[38:39], v[38:39] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[8:9] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[10:11] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[6:7], v[6:7], s[10:11], s[10:11] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x4 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[20:21], v[20:21], s[16:17], s[16:17] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x3 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[12:13], v[12:13], s[40:41], s[40:41] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[18:19], v[18:19], s[38:39], s[38:39] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x1 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[24:25], v[24:25], s[48:49], s[48:49] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[26:27], v[26:27], v[42:43], v[42:43] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[0:1] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[24:25], v[24:25], v[40:41], v[40:41] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[16:17] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[28:29], v[28:29], s[44:45], s[44:45] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[30:31], v[30:31], s[46:47], s[46:47] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[26:27], v[26:27], s[50:51], s[50:51] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[16:17], v[16:17], s[36:37], s[36:37] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[14:15], v[14:15], s[42:43], s[42:43] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[22:23], v[22:23], s[18:19], s[18:19] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[8:9], v[8:9], s[20:21], s[20:21] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[10:11], v[10:11], s[22:23], s[22:23] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[4:5], v[4:5], s[8:9], s[8:9] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[12:13], v[12:13], v[54:55], v[54:55] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[16:17], v[16:17], v[36:37], v[36:37] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[18:19], v[18:19], v[38:39], v[38:39] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[14:15], v[14:15], v[56:57], v[56:57] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[10:11], v[10:11], v[52:53], v[52:53] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[8:9], v[8:9], v[42:43], v[42:43] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[4:5], v[4:5], v[48:49], v[48:49] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[6:7], v[6:7], v[50:51], v[50:51] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[20:21], v[20:21], v[44:45], v[44:45] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[22:23], v[22:23], v[46:47], v[46:47] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[32:33], v[32:33] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[40:41], v[40:41] ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[28:31], s[0:1] offset:96 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[24:27], s[0:1] offset:112 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[16:19], s[0:1] offset:64 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[12:15], s[0:1] offset:80 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[20:23], s[0:1] offset:32 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[8:11], s[0:1] offset:48 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[4:7], s[0:1] -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[0:3], s[0:1] offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[16:19], s[34:35] offset:96 +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[12:15], s[34:35] offset:112 +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[8:11], s[34:35] offset:64 +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[4:7], s[34:35] offset:80 +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[20:23], s[34:35] offset:32 +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[24:27], s[34:35] offset:48 +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[0:3], s[34:35] +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[28:31], s[34:35] offset:16 ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fma_v32_vs: @@ -2351,54 +2497,70 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v32, 7, v0 +; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v32, s[34:35] -; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v32, s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v32, s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v32, s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v32, s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v32, s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v32, s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v32, s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35] +; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16 +; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 +; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48 +; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64 +; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80 +; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96 +; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 ; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 -; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[16:17], s[16:17] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[18:19], s[18:19] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7] +; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[32:33], v[32:33] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[34:35], v[34:35] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[4:5], v[4:5], s[20:21], s[20:21] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[6:7], v[6:7], s[22:23], s[22:23] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[4:5], v[4:5], v[36:37], v[36:37] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[6:7], v[6:7], v[38:39], v[38:39] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[8:9], v[8:9], s[24:25], s[24:25] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[10:11], v[10:11], s[26:27], s[26:27] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[8:9], v[8:9], v[40:41], v[40:41] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[10:11], v[10:11], v[42:43], v[42:43] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x4 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[12:13], v[12:13], s[28:29], s[28:29] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[14:15], v[14:15], s[30:31], s[30:31] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[12:13], v[12:13], v[44:45], v[44:45] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[14:15], v[14:15], v[46:47], v[46:47] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x3 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[16:17], v[16:17], s[0:1], s[0:1] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[18:19], v[18:19], s[2:3], s[2:3] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[16:17], v[16:17], v[48:49], v[48:49] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[18:19], v[18:19], v[50:51], v[50:51] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x2 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[20:21], v[20:21], s[4:5], s[4:5] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[22:23], v[22:23], s[6:7], s[6:7] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[20:21], v[20:21], v[52:53], v[52:53] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[22:23], v[22:23], v[54:55], v[54:55] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x1 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[24:25], v[24:25], s[8:9], s[8:9] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[26:27], v[26:27], s[10:11], s[10:11] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[24:25], v[24:25], v[32:33], v[32:33] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[26:27], v[26:27], v[34:35], v[34:35] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[28:29], v[28:29], s[12:13], s[12:13] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[30:31], v[30:31], s[14:15], s[14:15] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[28:29], v[28:29], v[36:37], v[36:37] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[30:31], v[30:31], v[38:39], v[38:39] ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[0:3], s[34:35] -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[4:7], s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[8:11], s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[12:15], s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[16:19], s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[20:23], s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[24:27], s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[28:31], s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35] +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id @@ -2488,17 +2650,19 @@ define amdgpu_kernel void @fma_v2_v_imm(ptr addrspace(1) %a) { ; GFX1250-GISEL-LABEL: fma_v2_v_imm: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v6, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 0x42c80000 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_mov_b32 s4, 0x43480000 ; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2 ; GFX1250-GISEL-NEXT: s_mov_b32 s5, s4 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[4:5] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX1250-GISEL-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -2653,17 +2817,19 @@ define amdgpu_kernel void @fma_v2_v_lit_splat(ptr addrspace(1) %a) { ; GFX1250-GISEL-LABEL: fma_v2_v_lit_splat: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v6, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 4.0 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1.0 ; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2 ; GFX1250-GISEL-NEXT: s_mov_b32 s5, s4 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[4:5] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX1250-GISEL-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -2740,29 +2906,30 @@ define amdgpu_kernel void @fma_v2_v_unfoldable_lit(ptr addrspace(1) %a) { ; GFX1250-SDAG-LABEL: fma_v2_v_unfoldable_lit: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, 0x3ff, v0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[2:3], lit64(0x400000003f800000) -; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[4:5], lit64(0x4040000040800000) +; GFX1250-SDAG-NEXT: v_and_b32_e32 v6, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4040000040800000) +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[4:5], lit64(0x400000003f800000) ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[4:5], s[2:3] -; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX1250-SDAG-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fma_v2_v_unfoldable_lit: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v6, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000) ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_mov_b64 s[4:5], lit64(0x400000003f800000) +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[4:5] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX1250-GISEL-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -3268,20 +3435,22 @@ define amdgpu_kernel void @fadd_fadd_fsub_0(<2 x float> %arg) { ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_3) ; GFX1250-SDAG-NEXT: s_add_f32 s1, s1, 0 ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GFX1250-SDAG-NEXT: flat_store_b64 v[0:1], v[0:1] +; GFX1250-SDAG-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_fadd_fsub_0: ; GFX1250-GISEL: ; %bb.0: ; %bb ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], s[0:1], 0 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, s0 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], 0 +; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v1 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v0, v1 ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], 0 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_mov_b32_e32 v3, v0 -; GFX1250-GISEL-NEXT: flat_store_b64 v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: flat_store_b64 v[0:1], v[2:3] scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_endpgm bb: %i12 = fadd <2 x float> zeroinitializer, %arg @@ -3363,15 +3532,16 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 ; GFX1250-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 -; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, 0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: s_add_f32 s6, s1, s3 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], s[2:3], s[6:7] op_sel_hi:[1,0] -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, v0 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[2:3], s[2:3] neg_lo:[0,1] neg_hi:[0,1] -; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[4:5] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[2:3] +; GFX1250-SDAG-NEXT: s_add_f32 s2, s1, s3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_3) +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[0:1], s[2:3] op_sel_hi:[1,0] +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, v2 +; GFX1250-SDAG-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[4:5], v[0:1] neg_lo:[0,1] neg_hi:[0,1] +; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[4:5] ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_fadd_fsub: @@ -3380,13 +3550,16 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], s[0:1], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_sub_f32 s0, s0, s2 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_3) -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v0, v1 :: v_dual_mov_b32 v2, s0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], s[2:3], v[0:1] -; GFX1250-GISEL-NEXT: v_dual_subrev_f32 v3, s3, v0 :: v_dual_mov_b32 v0, 0 +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v1 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[2:3], v[0:1] +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_subrev_f32 v3, s3, v0 +; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0 ; GFX1250-GISEL-NEXT: global_store_b64 v0, v[2:3], s[4:5] ; GFX1250-GISEL-NEXT: s_endpgm bb: @@ -3593,7 +3766,9 @@ define amdgpu_kernel void @fneg_v2f32_scalar(ptr addrspace(1) %a, <2 x float> %x ; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], 1.0, s[2:3] op_sel_hi:[0,1] neg_lo:[0,1] neg_hi:[0,1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], 1.0, v[0:1] op_sel_hi:[0,1] neg_lo:[0,1] neg_hi:[0,1] ; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-GISEL-NEXT: s_endpgm %fneg = fsub <2 x float> <float -0.0, float -0.0>, %x diff --git a/llvm/test/CodeGen/AMDGPU/ps-shader-arg-count.ll b/llvm/test/CodeGen/AMDGPU/ps-shader-arg-count.ll index 013b68a..99e5d00 100644 --- a/llvm/test/CodeGen/AMDGPU/ps-shader-arg-count.ll +++ b/llvm/test/CodeGen/AMDGPU/ps-shader-arg-count.ll @@ -1,5 +1,7 @@ -;RUN: llc < %s -mtriple=amdgcn-pal -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK -;RUN: llc < %s -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK +;RUN: llc -global-isel=1 < %s -mtriple=amdgcn-pal -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK +;RUN: llc -global-isel=1 < %s -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK +;RUN: llc -global-isel=0 < %s -mtriple=amdgcn-pal -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK +;RUN: llc -global-isel=0 < %s -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 | FileCheck %s --check-prefixes=CHECK ; ;CHECK-LABEL: {{^}}_amdgpu_ps_1_arg: ; ;CHECK: NumVgprs: 4 diff --git a/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll b/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll index 131c5f3..f67cbe3 100644 --- a/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll +++ b/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll @@ -10,6 +10,8 @@ ; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-vopd=0 < %s | FileCheck -check-prefixes=GETREG,GETREG-GISEL -check-prefix=GCN %s ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GCN,GFX12 %s ; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GCN,GFX12 %s +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250 %s declare i64 @llvm.readcyclecounter() #0 @@ -21,6 +23,7 @@ declare i64 @llvm.readcyclecounter() #0 ; GFX12: s_getreg_b32 [[HI2:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_HI) ; GFX12: s_cmp_eq_u32 [[HI1]], [[HI2]] ; GFX12: s_cselect_b32 {{s[0-9]+}}, [[LO1]], 0 +; GFX1250: s_get_shader_cycles_u64 s{{\[[0-9]+:[0-9]+\]}} ; GCN-DAG: kmcnt ; MEMTIME: store_dwordx2 ; SIVI-NOT: kmcnt @@ -53,6 +56,7 @@ define amdgpu_kernel void @test_readcyclecounter(ptr addrspace(1) %out) #0 { ; GFX12: s_getreg_b32 [[HI1:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_HI) ; GFX12: s_getreg_b32 [[LO1:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_LO) ; GFX12: s_getreg_b32 [[HI2:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_HI) +; GFX1250: s_get_shader_cycles_u64 s{{\[[0-9]+:[0-9]+\]}} ; GCN-DAG: s_load_{{dword|b32|b64}} ; GETREG-DAG: s_getreg_b32 s{{[0-9]+}}, hwreg(HW_REG_SHADER_CYCLES, 0, 20) ; GFX12: s_cmp_eq_u32 [[HI1]], [[HI2]] diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll index 0c6339e..b35a74e 100644 --- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mcpu=gfx90a < %s | FileCheck %s +; RUN: llc -mcpu=gfx942 -amdgpu-mfma-vgpr-form < %s | FileCheck %s target triple = "amdgcn-amd-amdhsa" @@ -7,7 +7,10 @@ define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma(ptr addrsp ; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma: ; CHECK: ; %bb.0: ; %bb ; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0 +; CHECK-NEXT: v_mov_b32_e32 v32, 1.0 +; CHECK-NEXT: v_mov_b32_e32 v33, 2.0 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-NEXT: global_load_dwordx4 v[28:31], v0, s[0:1] offset:112 ; CHECK-NEXT: global_load_dwordx4 v[24:27], v0, s[0:1] offset:96 @@ -18,7 +21,229 @@ define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma(ptr addrsp ; CHECK-NEXT: global_load_dwordx4 v[4:7], v0, s[0:1] offset:16 ; CHECK-NEXT: s_nop 0 ; CHECK-NEXT: global_load_dwordx4 v[0:3], v0, s[0:1] +; CHECK-NEXT: v_accvgpr_write_b32 a0, 1.0 +; CHECK-NEXT: v_accvgpr_write_b32 a1, 2.0 ; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[32:63], a0, a1, v[0:31] +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 1 +; CHECK-NEXT: v_mov_b32_e32 v2, v32 +; CHECK-NEXT: v_mov_b32_e32 v3, v33 +; CHECK-NEXT: v_mov_b32_e32 v4, v34 +; CHECK-NEXT: v_mov_b32_e32 v5, v35 +; CHECK-NEXT: v_mov_b32_e32 v6, v36 +; CHECK-NEXT: v_mov_b32_e32 v7, v37 +; CHECK-NEXT: v_mov_b32_e32 v8, v38 +; CHECK-NEXT: v_mov_b32_e32 v9, v39 +; CHECK-NEXT: v_mov_b32_e32 v10, v40 +; CHECK-NEXT: v_mov_b32_e32 v11, v41 +; CHECK-NEXT: v_mov_b32_e32 v12, v42 +; CHECK-NEXT: v_mov_b32_e32 v13, v43 +; CHECK-NEXT: v_mov_b32_e32 v14, v44 +; CHECK-NEXT: v_mov_b32_e32 v15, v45 +; CHECK-NEXT: v_mov_b32_e32 v16, v46 +; CHECK-NEXT: v_mov_b32_e32 v17, v47 +; CHECK-NEXT: v_mov_b32_e32 v18, v48 +; CHECK-NEXT: v_mov_b32_e32 v19, v49 +; CHECK-NEXT: v_mov_b32_e32 v20, v50 +; CHECK-NEXT: v_mov_b32_e32 v21, v51 +; CHECK-NEXT: v_mov_b32_e32 v22, v52 +; CHECK-NEXT: v_mov_b32_e32 v23, v53 +; CHECK-NEXT: v_mov_b32_e32 v24, v54 +; CHECK-NEXT: v_mov_b32_e32 v25, v55 +; CHECK-NEXT: v_mov_b32_e32 v26, v56 +; CHECK-NEXT: v_mov_b32_e32 v27, v57 +; CHECK-NEXT: v_mov_b32_e32 v28, v58 +; CHECK-NEXT: v_mov_b32_e32 v29, v59 +; CHECK-NEXT: v_mov_b32_e32 v30, v60 +; CHECK-NEXT: v_mov_b32_e32 v31, v61 +; CHECK-NEXT: v_mov_b32_e32 v32, 0 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], a0, a1, v[0:31] +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 1 +; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96 +; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112 +; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64 +; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80 +; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32 +; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48 +; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1] +; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16 +; CHECK-NEXT: s_endpgm +bb: + %id = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id + %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128 + %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %in.1, i32 0, i32 0, i32 0) + %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0) + %tmp.1 = shufflevector <32 x float> %mai.2, <32 x float> %mai.1, <32 x i32> <i32 32, i32 33, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29> + %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %tmp.1, i32 0, i32 0, i32 0) + store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128 + ret void +} + +define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_noshuffle(ptr addrspace(1) %arg) #0 { +; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_noshuffle: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0 +; CHECK-NEXT: v_mov_b32_e32 v32, 1.0 +; CHECK-NEXT: v_mov_b32_e32 v33, 2.0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 v[28:31], v0, s[0:1] offset:112 +; CHECK-NEXT: global_load_dwordx4 v[24:27], v0, s[0:1] offset:96 +; CHECK-NEXT: global_load_dwordx4 v[20:23], v0, s[0:1] offset:80 +; CHECK-NEXT: global_load_dwordx4 v[16:19], v0, s[0:1] offset:64 +; CHECK-NEXT: global_load_dwordx4 v[12:15], v0, s[0:1] offset:48 +; CHECK-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1] offset:32 +; CHECK-NEXT: global_load_dwordx4 v[4:7], v0, s[0:1] offset:16 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: global_load_dwordx4 v[0:3], v0, s[0:1] +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mov_b32_e32 v32, 0 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96 +; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112 +; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64 +; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80 +; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32 +; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48 +; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1] +; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16 +; CHECK-NEXT: s_endpgm +bb: + %id = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id + %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128 + %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %in.1, i32 0, i32 0, i32 0) + %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0) + %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.2, i32 0, i32 0, i32 0) + store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128 + ret void +} + +define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm0_src2(ptr addrspace(1) %arg) #0 { +; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm0_src2: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: v_mov_b32_e32 v32, 1.0 +; CHECK-NEXT: v_mov_b32_e32 v33, 2.0 +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, 0 +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mov_b32_e32 v32, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112 +; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96 +; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80 +; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64 +; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48 +; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32 +; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16 +; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1] +; CHECK-NEXT: s_endpgm +bb: + %id = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id + %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128 + %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> zeroinitializer, i32 0, i32 0, i32 0) + %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0) + %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.2, i32 0, i32 0, i32 0) + store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128 + ret void +} + +define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm1_src2(ptr addrspace(1) %arg) #0 { +; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm1_src2: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: v_mov_b32_e32 v32, 1.0 +; CHECK-NEXT: v_mov_b32_e32 v33, 2.0 +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, 1.0 +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mov_b32_e32 v32, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112 +; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96 +; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80 +; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64 +; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48 +; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32 +; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16 +; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1] +; CHECK-NEXT: s_endpgm +bb: + %id = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id + %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128 + %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> splat (float 1.0), i32 0, i32 0, i32 0) + %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0) + %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.2, i32 0, i32 0, i32 0) + store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128 + ret void +} + +; The inline asm requires the value be copied to an AGPR class, not +; the AV_* pseudo we usually expect for register allocator live range +; splits. +define amdgpu_kernel void @test_rewrite_mfma_direct_copy_to_agpr_class(ptr addrspace(1) %arg) #0 { +; CHECK-LABEL: test_rewrite_mfma_direct_copy_to_agpr_class: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0 +; CHECK-NEXT: v_mov_b32_e32 v32, 2.0 +; CHECK-NEXT: v_mov_b32_e32 v33, 4.0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[28:31], v0, s[0:1] offset:112 +; CHECK-NEXT: global_load_dwordx4 a[24:27], v0, s[0:1] offset:96 +; CHECK-NEXT: global_load_dwordx4 a[20:23], v0, s[0:1] offset:80 +; CHECK-NEXT: global_load_dwordx4 a[16:19], v0, s[0:1] offset:64 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v0, s[0:1] offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v0, s[0:1] offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v0, s[0:1] offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v0, s[0:1] +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 a[0:31], v32, v33, a[0:31] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:31] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_endpgm +bb: + %id = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id + %in = load <32 x float>, ptr addrspace(1) %gep, align 128 + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 2.0, float 4.0, <32 x float> %in, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<32 x float> %mai) + ret void +} + +; TODO: Handle rewriting this case +define void @test_rewrite_mfma_imm_src2(float %arg0, float %arg1) #0 { +; CHECK-LABEL: test_rewrite_mfma_imm_src2: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v0, v1, 2.0 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 1 ; CHECK-NEXT: v_accvgpr_write_b32 a0, v0 ; CHECK-NEXT: v_accvgpr_write_b32 a1, v1 ; CHECK-NEXT: v_accvgpr_write_b32 a2, v2 @@ -51,145 +276,124 @@ define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma(ptr addrsp ; CHECK-NEXT: v_accvgpr_write_b32 a29, v29 ; CHECK-NEXT: v_accvgpr_write_b32 a30, v30 ; CHECK-NEXT: v_accvgpr_write_b32 a31, v31 -; CHECK-NEXT: v_mov_b32_e32 v0, 1.0 -; CHECK-NEXT: v_mov_b32_e32 v1, 2.0 -; CHECK-NEXT: s_nop 1 -; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31] -; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v0, v1, a[0:31] -; CHECK-NEXT: s_nop 7 -; CHECK-NEXT: s_nop 7 -; CHECK-NEXT: s_nop 2 -; CHECK-NEXT: v_accvgpr_read_b32 v4, a59 -; CHECK-NEXT: v_accvgpr_read_b32 v5, a58 -; CHECK-NEXT: v_accvgpr_read_b32 v6, a57 -; CHECK-NEXT: v_accvgpr_read_b32 v7, a56 -; CHECK-NEXT: v_accvgpr_read_b32 v8, a55 -; CHECK-NEXT: v_accvgpr_read_b32 v9, a54 -; CHECK-NEXT: v_accvgpr_read_b32 v10, a53 -; CHECK-NEXT: v_accvgpr_read_b32 v11, a52 -; CHECK-NEXT: v_accvgpr_read_b32 v12, a51 -; CHECK-NEXT: v_accvgpr_read_b32 v13, a50 -; CHECK-NEXT: v_accvgpr_read_b32 v14, a49 -; CHECK-NEXT: v_accvgpr_read_b32 v15, a48 -; CHECK-NEXT: v_accvgpr_read_b32 v16, a47 -; CHECK-NEXT: v_accvgpr_read_b32 v17, a46 -; CHECK-NEXT: v_accvgpr_read_b32 v18, a45 -; CHECK-NEXT: v_accvgpr_read_b32 v19, a44 -; CHECK-NEXT: v_accvgpr_read_b32 v20, a43 -; CHECK-NEXT: v_accvgpr_read_b32 v21, a42 -; CHECK-NEXT: v_accvgpr_read_b32 v22, a41 -; CHECK-NEXT: v_accvgpr_read_b32 v23, a40 -; CHECK-NEXT: v_accvgpr_read_b32 v24, a39 -; CHECK-NEXT: v_accvgpr_read_b32 v25, a38 -; CHECK-NEXT: v_accvgpr_read_b32 v26, a37 -; CHECK-NEXT: v_accvgpr_read_b32 v27, a36 -; CHECK-NEXT: v_accvgpr_read_b32 v28, a35 -; CHECK-NEXT: v_accvgpr_read_b32 v29, a34 -; CHECK-NEXT: v_accvgpr_mov_b32 a2, a32 -; CHECK-NEXT: v_accvgpr_mov_b32 a3, a33 -; CHECK-NEXT: v_accvgpr_write_b32 a4, v29 -; CHECK-NEXT: v_accvgpr_write_b32 a5, v28 -; CHECK-NEXT: v_accvgpr_write_b32 a6, v27 -; CHECK-NEXT: v_accvgpr_write_b32 a7, v26 -; CHECK-NEXT: v_accvgpr_write_b32 a8, v25 -; CHECK-NEXT: v_accvgpr_write_b32 a9, v24 -; CHECK-NEXT: v_accvgpr_write_b32 a10, v23 -; CHECK-NEXT: v_accvgpr_write_b32 a11, v22 -; CHECK-NEXT: v_accvgpr_write_b32 a12, v21 -; CHECK-NEXT: v_accvgpr_write_b32 a13, v20 -; CHECK-NEXT: v_accvgpr_write_b32 a14, v19 -; CHECK-NEXT: v_accvgpr_write_b32 a15, v18 -; CHECK-NEXT: v_accvgpr_write_b32 a16, v17 -; CHECK-NEXT: v_accvgpr_write_b32 a17, v16 -; CHECK-NEXT: v_accvgpr_write_b32 a18, v15 -; CHECK-NEXT: v_accvgpr_write_b32 a19, v14 -; CHECK-NEXT: v_accvgpr_write_b32 a20, v13 -; CHECK-NEXT: v_accvgpr_write_b32 a21, v12 -; CHECK-NEXT: v_accvgpr_write_b32 a22, v11 -; CHECK-NEXT: v_accvgpr_write_b32 a23, v10 -; CHECK-NEXT: v_accvgpr_write_b32 a24, v9 -; CHECK-NEXT: v_accvgpr_write_b32 a25, v8 -; CHECK-NEXT: v_accvgpr_write_b32 a26, v7 -; CHECK-NEXT: v_accvgpr_write_b32 a27, v6 -; CHECK-NEXT: v_accvgpr_write_b32 a28, v5 -; CHECK-NEXT: v_accvgpr_write_b32 a29, v4 -; CHECK-NEXT: v_accvgpr_mov_b32 a30, a60 -; CHECK-NEXT: v_accvgpr_mov_b32 a31, a61 -; CHECK-NEXT: s_nop 1 -; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31] -; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:31] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] +bb: + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> splat (float 2.0), i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<32 x float> %mai) + ret void +} + +; TODO: Handle rewriting this case +define void @test_rewrite_mfma_subreg_extract0(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_subreg_extract0: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 v[30:33], v[2:3], off offset:112 +; CHECK-NEXT: global_load_dwordx4 v[26:29], v[2:3], off offset:96 +; CHECK-NEXT: global_load_dwordx4 v[22:25], v[2:3], off offset:80 +; CHECK-NEXT: global_load_dwordx4 v[18:21], v[2:3], off offset:64 +; CHECK-NEXT: global_load_dwordx4 v[14:17], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 v[10:13], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 v[6:9], v[2:3], off offset:16 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[2:33], v0, v1, v[2:33] ; CHECK-NEXT: s_nop 7 ; CHECK-NEXT: s_nop 7 ; CHECK-NEXT: s_nop 1 -; CHECK-NEXT: global_store_dwordx4 v0, a[24:27], s[0:1] offset:96 -; CHECK-NEXT: global_store_dwordx4 v0, a[28:31], s[0:1] offset:112 -; CHECK-NEXT: global_store_dwordx4 v0, a[16:19], s[0:1] offset:64 -; CHECK-NEXT: global_store_dwordx4 v0, a[20:23], s[0:1] offset:80 -; CHECK-NEXT: global_store_dwordx4 v0, a[8:11], s[0:1] offset:32 -; CHECK-NEXT: global_store_dwordx4 v0, a[12:15], s[0:1] offset:48 -; CHECK-NEXT: global_store_dwordx4 v0, a[0:3], s[0:1] -; CHECK-NEXT: global_store_dwordx4 v0, a[4:7], s[0:1] offset:16 -; CHECK-NEXT: s_endpgm +; CHECK-NEXT: v_accvgpr_write_b32 a0, v2 +; CHECK-NEXT: v_accvgpr_write_b32 a1, v3 +; CHECK-NEXT: v_accvgpr_write_b32 a2, v4 +; CHECK-NEXT: v_accvgpr_write_b32 a3, v5 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] bb: - %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id - %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128 - %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %in.1, i32 0, i32 0, i32 0) - %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0) - %tmp.1 = shufflevector <32 x float> %mai.2, <32 x float> %mai.1, <32 x i32> <i32 32, i32 33, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29> - %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %tmp.1, i32 0, i32 0, i32 0) - store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128 + %src2 = load <32 x float>, ptr addrspace(1) %ptr + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0) + %extract.sub4 = shufflevector <32 x float> %mai, <32 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + call void asm sideeffect "; use $0", "a"(<4 x float> %extract.sub4) ret void } -define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_noshuffle(ptr addrspace(1) %arg) #0 { -; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_noshuffle: +define void @test_rewrite_mfma_subreg_extract1(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_subreg_extract1: ; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0 -; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0 -; CHECK-NEXT: v_mov_b32_e32 v1, 2.0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: global_load_dwordx4 a[28:31], v0, s[0:1] offset:112 -; CHECK-NEXT: global_load_dwordx4 a[24:27], v0, s[0:1] offset:96 -; CHECK-NEXT: global_load_dwordx4 a[20:23], v0, s[0:1] offset:80 -; CHECK-NEXT: global_load_dwordx4 a[16:19], v0, s[0:1] offset:64 -; CHECK-NEXT: global_load_dwordx4 a[12:15], v0, s[0:1] offset:48 -; CHECK-NEXT: global_load_dwordx4 a[8:11], v0, s[0:1] offset:32 -; CHECK-NEXT: global_load_dwordx4 a[4:7], v0, s[0:1] offset:16 -; CHECK-NEXT: global_load_dwordx4 a[0:3], v0, s[0:1] -; CHECK-NEXT: v_mov_b32_e32 v0, 1.0 +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 v[30:33], v[2:3], off offset:112 +; CHECK-NEXT: global_load_dwordx4 v[26:29], v[2:3], off offset:96 +; CHECK-NEXT: global_load_dwordx4 v[22:25], v[2:3], off offset:80 +; CHECK-NEXT: global_load_dwordx4 v[18:21], v[2:3], off offset:64 +; CHECK-NEXT: global_load_dwordx4 v[14:17], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 v[10:13], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 v[6:9], v[2:3], off offset:16 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off ; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[2:33], v0, v1, v[2:33] +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 1 +; CHECK-NEXT: v_accvgpr_write_b32 a0, v6 +; CHECK-NEXT: v_accvgpr_write_b32 a1, v7 +; CHECK-NEXT: v_accvgpr_write_b32 a2, v8 +; CHECK-NEXT: v_accvgpr_write_b32 a3, v9 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] +bb: + %src2 = load <32 x float>, ptr addrspace(1) %ptr + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0) + %extract.sub4 = shufflevector <32 x float> %mai, <32 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7> + call void asm sideeffect "; use $0", "a"(<4 x float> %extract.sub4) + ret void +} + +; odd offset +define void @test_rewrite_mfma_subreg_extract2(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_subreg_extract2: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 v[30:33], v[2:3], off offset:112 +; CHECK-NEXT: global_load_dwordx4 v[26:29], v[2:3], off offset:96 +; CHECK-NEXT: global_load_dwordx4 v[22:25], v[2:3], off offset:80 +; CHECK-NEXT: global_load_dwordx4 v[18:21], v[2:3], off offset:64 +; CHECK-NEXT: global_load_dwordx4 v[14:17], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 v[10:13], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 v[6:9], v[2:3], off offset:16 ; CHECK-NEXT: s_nop 0 -; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31] -; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31] -; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31] -; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[2:33], v0, v1, v[2:33] ; CHECK-NEXT: s_nop 7 ; CHECK-NEXT: s_nop 7 ; CHECK-NEXT: s_nop 1 -; CHECK-NEXT: global_store_dwordx4 v0, a[24:27], s[0:1] offset:96 -; CHECK-NEXT: global_store_dwordx4 v0, a[28:31], s[0:1] offset:112 -; CHECK-NEXT: global_store_dwordx4 v0, a[16:19], s[0:1] offset:64 -; CHECK-NEXT: global_store_dwordx4 v0, a[20:23], s[0:1] offset:80 -; CHECK-NEXT: global_store_dwordx4 v0, a[8:11], s[0:1] offset:32 -; CHECK-NEXT: global_store_dwordx4 v0, a[12:15], s[0:1] offset:48 -; CHECK-NEXT: global_store_dwordx4 v0, a[0:3], s[0:1] -; CHECK-NEXT: global_store_dwordx4 v0, a[4:7], s[0:1] offset:16 -; CHECK-NEXT: s_endpgm +; CHECK-NEXT: v_accvgpr_write_b32 a0, v3 +; CHECK-NEXT: v_accvgpr_write_b32 a1, v4 +; CHECK-NEXT: v_accvgpr_write_b32 a2, v5 +; CHECK-NEXT: v_accvgpr_write_b32 a3, v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] bb: - %id = call i32 @llvm.amdgcn.workitem.id.x() - %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id - %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128 - %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %in.1, i32 0, i32 0, i32 0) - %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0) - %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.2, i32 0, i32 0, i32 0) - store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128 + %src2 = load <32 x float>, ptr addrspace(1) %ptr + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0) + %extract.sub4 = shufflevector <32 x float> %mai, <32 x float> poison, <4 x i32> <i32 1, i32 2, i32 3, i32 4> + call void asm sideeffect "; use $0", "a"(<4 x float> %extract.sub4) ret void } -declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg) #1 -declare noundef i32 @llvm.amdgcn.workitem.id.x() #2 +declare <4 x float> @llvm.amdgcn.mfma.f32.16x16x16f16(<4 x half>, <4 x half>, <4 x float>, i32 immarg, i32 immarg, i32 immarg) #2 +declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg) #2 +declare noundef range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.x() #3 -attributes #0 = { "amdgpu-flat-work-group-size"="1,256" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,4" } +attributes #0 = { nounwind "amdgpu-flat-work-group-size"="1,256" "amdgpu-waves-per-eu"="4,4" } attributes #1 = { convergent nocallback nofree nosync nounwind willreturn memory(none) } attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } diff --git a/llvm/test/CodeGen/AMDGPU/saddsat.ll b/llvm/test/CodeGen/AMDGPU/saddsat.ll index 019eb2c..4995ce6 100644 --- a/llvm/test/CodeGen/AMDGPU/saddsat.ll +++ b/llvm/test/CodeGen/AMDGPU/saddsat.ll @@ -124,9 +124,8 @@ define i32 @v_saddsat_i32(i32 %lhs, i32 %rhs) { ; GFX6-NEXT: v_add_i32_e64 v1, s[4:5], v0, v1 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v1, v0 ; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v1 -; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v0, v1, -v0, s[4:5] ; GFX6-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_saddsat_i32: @@ -136,9 +135,8 @@ define i32 @v_saddsat_i32(i32 %lhs, i32 %rhs) { ; GFX8-NEXT: v_add_u32_e64 v1, s[4:5], v0, v1 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v1, v0 ; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v1 -; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v0, v1, -v0, s[4:5] ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: v_saddsat_i32: @@ -383,16 +381,14 @@ define <2 x i32> @v_saddsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) { ; GFX6-NEXT: v_add_i32_e64 v2, s[4:5], v0, v2 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0 ; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v2 -; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, -v0, s[4:5] ; GFX6-NEXT: v_add_i32_e64 v2, s[4:5], v1, v3 ; GFX6-NEXT: v_cmp_gt_i32_e32 vcc, 0, v3 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v1 ; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v2 -; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v1, v2, -v1, s[4:5] ; GFX6-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_saddsat_v2i32: @@ -402,16 +398,14 @@ define <2 x i32> @v_saddsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) { ; GFX8-NEXT: v_add_u32_e64 v2, s[4:5], v0, v2 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0 ; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v2 -; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, -v0, s[4:5] ; GFX8-NEXT: v_add_u32_e64 v2, s[4:5], v1, v3 ; GFX8-NEXT: v_cmp_gt_i32_e32 vcc, 0, v3 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v1 ; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v2 -; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v1, v2, -v1, s[4:5] ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: v_saddsat_v2i32: @@ -442,8 +436,7 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) { ; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v5 ; GFX6-NEXT: s_xor_b64 vcc, s[4:5], vcc ; GFX6-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX6-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; GFX6-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc ; GFX6-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_saddsat_i64: @@ -456,8 +449,7 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) { ; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v5 ; GFX8-NEXT: s_xor_b64 vcc, s[4:5], vcc ; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: v_saddsat_i64: @@ -470,8 +462,7 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) { ; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v5 ; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc ; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc ; GFX9-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_saddsat_i64: @@ -480,12 +471,11 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) { ; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, v0, v2 ; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v1, v3, vcc_lo ; GFX10-NEXT: v_cmp_gt_i64_e64 s4, 0, v[2:3] -; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v5 ; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1] -; GFX10-NEXT: v_xor_b32_e32 v1, 0x80000000, v6 +; GFX10-NEXT: v_ashrrev_i32_e32 v1, 31, v5 ; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc_lo ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: v_saddsat_i64: @@ -494,11 +484,11 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) { ; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v0, v2 ; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, v1, v3, vcc_lo ; GFX11-NEXT: v_cmp_gt_i64_e64 s0, 0, v[2:3] -; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v5 ; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1] -; GFX11-NEXT: v_xor_b32_e32 v1, 0x80000000, v6 +; GFX11-NEXT: v_ashrrev_i32_e32 v1, 31, v5 ; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo -; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_cndmask_b32 v1, v5, v1 +; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc_lo ; GFX11-NEXT: s_setpc_b64 s[30:31] %result = call i64 @llvm.sadd.sat.i64(i64 %lhs, i64 %rhs) ret i64 %result diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-count-graphics.ll b/llvm/test/CodeGen/AMDGPU/sgpr-count-graphics.ll new file mode 100644 index 0000000..3c7b5bf --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/sgpr-count-graphics.ll @@ -0,0 +1,38 @@ +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck %s --check-prefixes=CHECK,PACKED16 +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti < %s | FileCheck %s --check-prefixes=CHECK,SPLIT16 + +@global = addrspace(1) global i32 poison, align 4 + +; The hardware initializes the registers received as arguments by entry points, +; so they will be counted even if unused. + +; Vectors of i1 are always unpacked + +; CHECK-LABEL: vec_of_i1: +; CHECK: TotalNumSgprs: 8 +define amdgpu_ps void @vec_of_i1(<8 x i1> inreg %v8i1) { + ret void +} + +; Vectors of i8 are always unpacked + +; CHECK-LABEL: vec_of_i8: +; CHECK: TotalNumSgprs: 4 +define amdgpu_ps void @vec_of_i8(<4 x i8> inreg %v4i8) { + ret void +} + +; Vectors of 16-bit types are packed for newer architectures and unpacked for older ones. + +; CHECK-LABEL: vec_of_16_bit_ty: +; PACKED16: TotalNumSgprs: 3 +; SPLIT16: TotalNumSgprs: 6 +define amdgpu_ps void @vec_of_16_bit_ty(<2 x i16> inreg %v2i16, <4 x half> inreg %v4half) { + ret void +} + +; CHECK-LABEL: buffer_fat_ptr: +; CHECK: TotalNumSgprs: 5 +define amdgpu_ps void @buffer_fat_ptr(ptr addrspace(7) inreg %p) { + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/ssubsat.ll b/llvm/test/CodeGen/AMDGPU/ssubsat.ll index 40d80f5..09c0e77 100644 --- a/llvm/test/CodeGen/AMDGPU/ssubsat.ll +++ b/llvm/test/CodeGen/AMDGPU/ssubsat.ll @@ -124,9 +124,8 @@ define i32 @v_ssubsat_i32(i32 %lhs, i32 %rhs) { ; GFX6-NEXT: v_sub_i32_e64 v1, s[4:5], v0, v1 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v1, v0 ; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v1 -; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v0, v1, -v0, s[4:5] ; GFX6-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_ssubsat_i32: @@ -136,9 +135,8 @@ define i32 @v_ssubsat_i32(i32 %lhs, i32 %rhs) { ; GFX8-NEXT: v_sub_u32_e64 v1, s[4:5], v0, v1 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v1, v0 ; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v1 -; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v0, v1, -v0, s[4:5] ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: v_ssubsat_i32: @@ -383,16 +381,14 @@ define <2 x i32> @v_ssubsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) { ; GFX6-NEXT: v_sub_i32_e64 v2, s[4:5], v0, v2 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0 ; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v2 -; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, -v0, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v2, s[4:5], v1, v3 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v3 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v1 ; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v2 -; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v1, v2, -v1, s[4:5] ; GFX6-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_ssubsat_v2i32: @@ -402,16 +398,14 @@ define <2 x i32> @v_ssubsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) { ; GFX8-NEXT: v_sub_u32_e64 v2, s[4:5], v0, v2 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0 ; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v2 -; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, -v0, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v2, s[4:5], v1, v3 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v3 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v1 ; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v2 -; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v1, v2, -v1, s[4:5] ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: v_ssubsat_v2i32: @@ -439,23 +433,20 @@ define <3 x i32> @v_ssubsat_v3i32(<3 x i32> %lhs, <3 x i32> %rhs) { ; GFX6-NEXT: v_sub_i32_e64 v3, s[4:5], v0, v3 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v0 ; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v3 -; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v0, v3, -v0, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v3, s[4:5], v1, v4 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v4 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v1 ; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v3 -; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v1, v3, -v1, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v3, s[4:5], v2, v5 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v5 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v2 ; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v3 -; GFX6-NEXT: v_xor_b32_e32 v2, 0x80000000, v2 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v2, v3, -v2, s[4:5] ; GFX6-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_ssubsat_v3i32: @@ -465,23 +456,20 @@ define <3 x i32> @v_ssubsat_v3i32(<3 x i32> %lhs, <3 x i32> %rhs) { ; GFX8-NEXT: v_sub_u32_e64 v3, s[4:5], v0, v3 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v0 ; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v3 -; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v0, v3, -v0, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v3, s[4:5], v1, v4 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v4 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v1 ; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v3 -; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, -v1, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v3, s[4:5], v2, v5 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v5 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v2 ; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v3 -; GFX8-NEXT: v_xor_b32_e32 v2, 0x80000000, v2 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v2, v3, -v2, s[4:5] ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: v_ssubsat_v3i32: @@ -511,30 +499,26 @@ define <4 x i32> @v_ssubsat_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) { ; GFX6-NEXT: v_sub_i32_e64 v4, s[4:5], v0, v4 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v0 ; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v4 -; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v0, v4, -v0, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v4, s[4:5], v1, v5 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v5 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v1 ; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v4 -; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v1, v4, -v1, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v4, s[4:5], v2, v6 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v6 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v2 ; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v4 -; GFX6-NEXT: v_xor_b32_e32 v2, 0x80000000, v2 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v2, v4, -v2, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v4, s[4:5], v3, v7 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v7 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v3 ; GFX6-NEXT: v_ashrrev_i32_e32 v3, 31, v4 -; GFX6-NEXT: v_xor_b32_e32 v3, 0x80000000, v3 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v3, v4, -v3, s[4:5] ; GFX6-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_ssubsat_v4i32: @@ -544,30 +528,26 @@ define <4 x i32> @v_ssubsat_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) { ; GFX8-NEXT: v_sub_u32_e64 v4, s[4:5], v0, v4 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v0 ; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v4 -; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, -v0, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v4, s[4:5], v1, v5 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v5 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v1 ; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v4 -; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v1, v4, -v1, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v4, s[4:5], v2, v6 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v6 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v2 ; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v4 -; GFX8-NEXT: v_xor_b32_e32 v2, 0x80000000, v2 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v2, v4, -v2, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v4, s[4:5], v3, v7 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v7 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v3 ; GFX8-NEXT: v_ashrrev_i32_e32 v3, 31, v4 -; GFX8-NEXT: v_xor_b32_e32 v3, 0x80000000, v3 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v4, -v3, s[4:5] ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: v_ssubsat_v4i32: @@ -599,58 +579,50 @@ define <8 x i32> @v_ssubsat_v8i32(<8 x i32> %lhs, <8 x i32> %rhs) { ; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v0, v8 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v0 ; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v8 -; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v0, v8, -v0, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v1, v9 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v9 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v1 ; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v8 -; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v1, v8, -v1, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v2, v10 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v10 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v2 ; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v8 -; GFX6-NEXT: v_xor_b32_e32 v2, 0x80000000, v2 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v2, v8, -v2, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v3, v11 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v11 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v3 ; GFX6-NEXT: v_ashrrev_i32_e32 v3, 31, v8 -; GFX6-NEXT: v_xor_b32_e32 v3, 0x80000000, v3 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v3, v8, v3, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v3, v8, -v3, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v4, v12 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v12 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v4 ; GFX6-NEXT: v_ashrrev_i32_e32 v4, 31, v8 -; GFX6-NEXT: v_xor_b32_e32 v4, 0x80000000, v4 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v4, v8, -v4, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v5, v13 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v13 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v5 ; GFX6-NEXT: v_ashrrev_i32_e32 v5, 31, v8 -; GFX6-NEXT: v_xor_b32_e32 v5, 0x80000000, v5 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v5, v8, v5, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v5, v8, -v5, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v6, v14 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v14 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v6 ; GFX6-NEXT: v_ashrrev_i32_e32 v6, 31, v8 -; GFX6-NEXT: v_xor_b32_e32 v6, 0x80000000, v6 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v6, v8, v6, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v6, v8, -v6, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v7, v15 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v15 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v7 ; GFX6-NEXT: v_ashrrev_i32_e32 v7, 31, v8 -; GFX6-NEXT: v_xor_b32_e32 v7, 0x80000000, v7 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v7, v8, v7, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v7, v8, -v7, s[4:5] ; GFX6-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_ssubsat_v8i32: @@ -660,58 +632,50 @@ define <8 x i32> @v_ssubsat_v8i32(<8 x i32> %lhs, <8 x i32> %rhs) { ; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v0, v8 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v0 ; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v8 -; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v0, v8, -v0, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v1, v9 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v9 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v1 ; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v8 -; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v1, v8, -v1, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v2, v10 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v10 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v2 ; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v8 -; GFX8-NEXT: v_xor_b32_e32 v2, 0x80000000, v2 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v2, v8, -v2, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v3, v11 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v11 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v3 ; GFX8-NEXT: v_ashrrev_i32_e32 v3, 31, v8 -; GFX8-NEXT: v_xor_b32_e32 v3, 0x80000000, v3 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v3, v8, v3, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v8, -v3, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v4, v12 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v12 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v4 ; GFX8-NEXT: v_ashrrev_i32_e32 v4, 31, v8 -; GFX8-NEXT: v_xor_b32_e32 v4, 0x80000000, v4 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v4, v8, -v4, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v5, v13 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v13 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v5 ; GFX8-NEXT: v_ashrrev_i32_e32 v5, 31, v8 -; GFX8-NEXT: v_xor_b32_e32 v5, 0x80000000, v5 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v5, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v5, v8, -v5, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v6, v14 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v14 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v6 ; GFX8-NEXT: v_ashrrev_i32_e32 v6, 31, v8 -; GFX8-NEXT: v_xor_b32_e32 v6, 0x80000000, v6 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v6, v8, v6, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v6, v8, -v6, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v7, v15 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v15 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v7 ; GFX8-NEXT: v_ashrrev_i32_e32 v7, 31, v8 -; GFX8-NEXT: v_xor_b32_e32 v7, 0x80000000, v7 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v7, v8, v7, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v7, v8, -v7, s[4:5] ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: v_ssubsat_v8i32: @@ -751,116 +715,100 @@ define <16 x i32> @v_ssubsat_v16i32(<16 x i32> %lhs, <16 x i32> %rhs) { ; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v0, v16 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v0 ; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v16 -; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v0, v16, -v0, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v1, v17 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v17 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v1 ; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v16 -; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v1, v16, v1, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v1, v16, -v1, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v2, v18 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v18 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v2 ; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v16 -; GFX6-NEXT: v_xor_b32_e32 v2, 0x80000000, v2 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v2, v16, v2, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v2, v16, -v2, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v3, v19 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v19 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v3 ; GFX6-NEXT: v_ashrrev_i32_e32 v3, 31, v16 -; GFX6-NEXT: v_xor_b32_e32 v3, 0x80000000, v3 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v3, v16, v3, vcc -; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v4, v20 -; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v20 -; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v4 -; GFX6-NEXT: v_ashrrev_i32_e32 v4, 31, v16 -; GFX6-NEXT: v_xor_b32_e32 v4, 0x80000000, v4 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v4, v16, v4, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v3, v16, -v3, s[4:5] ; GFX6-NEXT: buffer_load_dword v16, off, s[0:3], s32 +; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v4, v20 +; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v20 +; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v4 +; GFX6-NEXT: v_ashrrev_i32_e32 v4, 31, v17 +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v4, v17, -v4, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v5, v21 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v21 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v5 ; GFX6-NEXT: v_ashrrev_i32_e32 v5, 31, v17 -; GFX6-NEXT: v_xor_b32_e32 v5, 0x80000000, v5 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v5, v17, v5, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v5, v17, -v5, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v6, v22 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v22 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v6 ; GFX6-NEXT: v_ashrrev_i32_e32 v6, 31, v17 -; GFX6-NEXT: v_xor_b32_e32 v6, 0x80000000, v6 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v6, v17, v6, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v6, v17, -v6, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v7, v23 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v23 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v7 ; GFX6-NEXT: v_ashrrev_i32_e32 v7, 31, v17 -; GFX6-NEXT: v_xor_b32_e32 v7, 0x80000000, v7 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v7, v17, v7, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v7, v17, -v7, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v8, v24 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v24 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v8 ; GFX6-NEXT: v_ashrrev_i32_e32 v8, 31, v17 -; GFX6-NEXT: v_xor_b32_e32 v8, 0x80000000, v8 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v8, v17, v8, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v8, v17, -v8, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v9, v25 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v25 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v9 ; GFX6-NEXT: v_ashrrev_i32_e32 v9, 31, v17 -; GFX6-NEXT: v_xor_b32_e32 v9, 0x80000000, v9 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v9, v17, v9, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v9, v17, -v9, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v10, v26 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v26 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v10 ; GFX6-NEXT: v_ashrrev_i32_e32 v10, 31, v17 -; GFX6-NEXT: v_xor_b32_e32 v10, 0x80000000, v10 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v10, v17, v10, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v10, v17, -v10, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v11, v27 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v27 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v11 ; GFX6-NEXT: v_ashrrev_i32_e32 v11, 31, v17 -; GFX6-NEXT: v_xor_b32_e32 v11, 0x80000000, v11 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v11, v17, v11, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v11, v17, -v11, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v12, v28 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v28 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v12 ; GFX6-NEXT: v_ashrrev_i32_e32 v12, 31, v17 -; GFX6-NEXT: v_xor_b32_e32 v12, 0x80000000, v12 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v12, v17, v12, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v12, v17, -v12, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v13, v29 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v29 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v13 ; GFX6-NEXT: v_ashrrev_i32_e32 v13, 31, v17 -; GFX6-NEXT: v_xor_b32_e32 v13, 0x80000000, v13 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v13, v17, v13, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v13, v17, -v13, s[4:5] ; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v14, v30 ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v30 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v14 ; GFX6-NEXT: v_ashrrev_i32_e32 v14, 31, v17 -; GFX6-NEXT: v_xor_b32_e32 v14, 0x80000000, v14 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v14, v17, v14, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v14, v17, -v14, s[4:5] ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v16 ; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v15, v16 ; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v15 ; GFX6-NEXT: v_ashrrev_i32_e32 v15, 31, v16 -; GFX6-NEXT: v_xor_b32_e32 v15, 0x80000000, v15 -; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX6-NEXT: v_cndmask_b32_e32 v15, v16, v15, vcc +; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_cndmask_b32_e64 v15, v16, -v15, s[4:5] ; GFX6-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_ssubsat_v16i32: @@ -870,116 +818,100 @@ define <16 x i32> @v_ssubsat_v16i32(<16 x i32> %lhs, <16 x i32> %rhs) { ; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v0, v16 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v0 ; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v16 -; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v0, v16, -v0, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v1, v17 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v17 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v1 ; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v16 -; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v1, v16, v1, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v1, v16, -v1, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v2, v18 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v18 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v2 ; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v16 -; GFX8-NEXT: v_xor_b32_e32 v2, 0x80000000, v2 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v2, v16, v2, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v2, v16, -v2, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v3, v19 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v19 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v3 ; GFX8-NEXT: v_ashrrev_i32_e32 v3, 31, v16 -; GFX8-NEXT: v_xor_b32_e32 v3, 0x80000000, v3 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v3, v16, v3, vcc -; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v4, v20 -; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v20 -; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v4 -; GFX8-NEXT: v_ashrrev_i32_e32 v4, 31, v16 -; GFX8-NEXT: v_xor_b32_e32 v4, 0x80000000, v4 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v4, v16, v4, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v16, -v3, s[4:5] ; GFX8-NEXT: buffer_load_dword v16, off, s[0:3], s32 +; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v4, v20 +; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v20 +; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v4 +; GFX8-NEXT: v_ashrrev_i32_e32 v4, 31, v17 +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v4, v17, -v4, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v5, v21 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v21 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v5 ; GFX8-NEXT: v_ashrrev_i32_e32 v5, 31, v17 -; GFX8-NEXT: v_xor_b32_e32 v5, 0x80000000, v5 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v5, v17, v5, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v5, v17, -v5, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v6, v22 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v22 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v6 ; GFX8-NEXT: v_ashrrev_i32_e32 v6, 31, v17 -; GFX8-NEXT: v_xor_b32_e32 v6, 0x80000000, v6 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v6, v17, v6, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v6, v17, -v6, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v7, v23 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v23 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v7 ; GFX8-NEXT: v_ashrrev_i32_e32 v7, 31, v17 -; GFX8-NEXT: v_xor_b32_e32 v7, 0x80000000, v7 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v7, v17, v7, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v7, v17, -v7, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v8, v24 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v24 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v8 ; GFX8-NEXT: v_ashrrev_i32_e32 v8, 31, v17 -; GFX8-NEXT: v_xor_b32_e32 v8, 0x80000000, v8 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v8, v17, v8, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v8, v17, -v8, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v9, v25 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v25 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v9 ; GFX8-NEXT: v_ashrrev_i32_e32 v9, 31, v17 -; GFX8-NEXT: v_xor_b32_e32 v9, 0x80000000, v9 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v9, v17, v9, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v9, v17, -v9, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v10, v26 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v26 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v10 ; GFX8-NEXT: v_ashrrev_i32_e32 v10, 31, v17 -; GFX8-NEXT: v_xor_b32_e32 v10, 0x80000000, v10 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v10, v17, v10, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v10, v17, -v10, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v11, v27 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v27 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v11 ; GFX8-NEXT: v_ashrrev_i32_e32 v11, 31, v17 -; GFX8-NEXT: v_xor_b32_e32 v11, 0x80000000, v11 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v11, v17, v11, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v11, v17, -v11, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v12, v28 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v28 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v12 ; GFX8-NEXT: v_ashrrev_i32_e32 v12, 31, v17 -; GFX8-NEXT: v_xor_b32_e32 v12, 0x80000000, v12 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v12, v17, v12, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v12, v17, -v12, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v13, v29 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v29 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v13 ; GFX8-NEXT: v_ashrrev_i32_e32 v13, 31, v17 -; GFX8-NEXT: v_xor_b32_e32 v13, 0x80000000, v13 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v13, v17, v13, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v13, v17, -v13, s[4:5] ; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v14, v30 ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v30 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v14 ; GFX8-NEXT: v_ashrrev_i32_e32 v14, 31, v17 -; GFX8-NEXT: v_xor_b32_e32 v14, 0x80000000, v14 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v14, v17, v14, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v14, v17, -v14, s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v16 ; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v15, v16 ; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v15 ; GFX8-NEXT: v_ashrrev_i32_e32 v15, 31, v16 -; GFX8-NEXT: v_xor_b32_e32 v15, 0x80000000, v15 -; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e32 v15, v16, v15, vcc +; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v15, v16, -v15, s[4:5] ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: v_ssubsat_v16i32: @@ -1066,8 +998,7 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) { ; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v5 ; GFX6-NEXT: s_xor_b64 vcc, s[4:5], vcc ; GFX6-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX6-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; GFX6-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc ; GFX6-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_ssubsat_i64: @@ -1080,8 +1011,7 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) { ; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v5 ; GFX8-NEXT: s_xor_b64 vcc, s[4:5], vcc ; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: v_ssubsat_i64: @@ -1094,8 +1024,7 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) { ; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v5 ; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc ; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc -; GFX9-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc ; GFX9-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_ssubsat_i64: @@ -1104,12 +1033,11 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) { ; GFX10-NEXT: v_sub_co_u32 v4, vcc_lo, v0, v2 ; GFX10-NEXT: v_sub_co_ci_u32_e32 v5, vcc_lo, v1, v3, vcc_lo ; GFX10-NEXT: v_cmp_lt_i64_e64 s4, 0, v[2:3] -; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v5 ; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1] -; GFX10-NEXT: v_xor_b32_e32 v1, 0x80000000, v6 +; GFX10-NEXT: v_ashrrev_i32_e32 v1, 31, v5 ; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc_lo ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: v_ssubsat_i64: @@ -1118,11 +1046,11 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) { ; GFX11-NEXT: v_sub_co_u32 v4, vcc_lo, v0, v2 ; GFX11-NEXT: v_sub_co_ci_u32_e64 v5, null, v1, v3, vcc_lo ; GFX11-NEXT: v_cmp_lt_i64_e64 s0, 0, v[2:3] -; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v5 ; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1] -; GFX11-NEXT: v_xor_b32_e32 v1, 0x80000000, v6 +; GFX11-NEXT: v_ashrrev_i32_e32 v1, 31, v5 ; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo -; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_cndmask_b32 v1, v5, v1 +; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc_lo +; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc_lo ; GFX11-NEXT: s_setpc_b64 s[30:31] %result = call i64 @llvm.ssub.sat.i64(i64 %lhs, i64 %rhs) ret i64 %result diff --git a/llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll b/llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll deleted file mode 100644 index 726e35d..0000000 --- a/llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll +++ /dev/null @@ -1,47 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefix=GCN %s - -declare i32 @llvm.amdgcn.atomic.cond.sub.u32.p1(ptr addrspace(1), i32) - - -define amdgpu_kernel void @test_isel_single_lane(ptr addrspace(1) %in, ptr addrspace(1) %out) #0 { -; GCN-LABEL: test_isel_single_lane: -; GCN: ; %bb.0: -; GCN-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: s_load_b32 s4, s[0:1], 0x58 -; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 -; GCN-NEXT: global_atomic_cond_sub_u32 v1, v0, v1, s[0:1] offset:16 th:TH_ATOMIC_RETURN -; GCN-NEXT: s_wait_loadcnt 0x0 -; GCN-NEXT: v_readfirstlane_b32 s0, v1 -; GCN-NEXT: s_addk_co_i32 s0, 0xf4 -; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GCN-NEXT: s_lshl_b32 s1, s0, 4 -; GCN-NEXT: s_mul_i32 s0, s0, s1 -; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GCN-NEXT: s_lshl_b32 s0, s0, 12 -; GCN-NEXT: s_sub_co_i32 s0, s1, s0 -; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GCN-NEXT: v_mov_b32_e32 v1, s0 -; GCN-NEXT: global_store_b32 v0, v1, s[2:3] -; GCN-NEXT: s_endpgm - %gep0 = getelementptr i32, ptr addrspace(1) %in, i32 22 - %val0 = load i32, ptr addrspace(1) %gep0, align 4 - %gep1 = getelementptr i32, ptr addrspace(1) %in, i32 4 - %val1 = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p0(ptr addrspace(1) %gep1, i32 %val0) - %res0 = add i32 %val1, 244 - %res1 = shl i32 %res0, 4 - %res2 = mul i32 %res0, %res1 - %res3 = shl i32 %res2, 12 - %res4 = sub i32 %res1, %res3 - store i32 %res4, ptr addrspace(1) %out - ret void -} - - -attributes #0 = { - "amdgpu-flat-work-group-size"="1,1" - "amdgpu-waves-per-eu"="1,1" - "uniform-work-group-size"="true" -} diff --git a/llvm/test/CodeGen/AMDGPU/wave_dispatch_regs.ll b/llvm/test/CodeGen/AMDGPU/wave_dispatch_regs.ll index 76c331c..e2ef60b 100644 --- a/llvm/test/CodeGen/AMDGPU/wave_dispatch_regs.ll +++ b/llvm/test/CodeGen/AMDGPU/wave_dispatch_regs.ll @@ -1,6 +1,9 @@ -; RUN: llc -mtriple=amdgcn--amdpal < %s | FileCheck -check-prefix=GCN -check-prefix=SI -enable-var-scope %s -; RUN: llc -mtriple=amdgcn--amdpal -mcpu=tonga < %s | FileCheck -check-prefix=GCN -check-prefix=VI -enable-var-scope %s -; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -enable-var-scope %s +; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal < %s | FileCheck -check-prefix=GCN -check-prefix=SI -enable-var-scope %s +; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=tonga < %s | FileCheck -check-prefix=GCN -check-prefix=VI -enable-var-scope %s +; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -enable-var-scope %s +; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal < %s | FileCheck -check-prefix=GCN -check-prefix=SI -enable-var-scope %s +; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=tonga < %s | FileCheck -check-prefix=GCN -check-prefix=VI -enable-var-scope %s +; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -enable-var-scope %s ; This compute shader has input args that claim that it has 17 sgprs and 5 vgprs ; in wave dispatch. Ensure that the sgpr and vgpr counts in COMPUTE_PGM_RSRC1 @@ -17,7 +20,7 @@ ; GCN-NEXT: .scratch_memory_size: 0 ; SI-NEXT: .sgpr_count: 0x11 ; VI-NEXT: .sgpr_count: 0x60 -; GFX9-NEXT: .sgpr_count: 0x11 +; GFX9-NEXT: .sgpr_count: 0x15 ; SI-NEXT: .vgpr_count: 0x5 ; VI-NEXT: .vgpr_count: 0x5 ; GFX9-NEXT: .vgpr_count: 0x5 diff --git a/llvm/test/CodeGen/AMDGPU/wqm.mir b/llvm/test/CodeGen/AMDGPU/wqm.mir index 350b233..ceb1b3e 100644 --- a/llvm/test/CodeGen/AMDGPU/wqm.mir +++ b/llvm/test/CodeGen/AMDGPU/wqm.mir @@ -1,3 +1,4 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 # RUN: llc -mtriple=amdgcn -mcpu=fiji -verify-machineinstrs -run-pass si-wqm -o - %s | FileCheck %s # RUN: llc -mtriple=amdgcn -mcpu=fiji -passes=si-wqm -o - %s | FileCheck %s @@ -46,10 +47,6 @@ --- # Check for awareness that s_or_saveexec_b64 clobbers SCC -# -#CHECK: ENTER_STRICT_WWM -#CHECK: S_CMP_LT_I32 -#CHECK: S_CSELECT_B32 name: test_strict_wwm_scc alignment: 1 exposesReturnsTwice: false @@ -80,6 +77,21 @@ body: | bb.0: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0 + ; CHECK-LABEL: name: test_strict_wwm_scc + ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: S_CMP_LT_I32 0, [[COPY3]], implicit-def $scc + ; CHECK-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[COPY]], [[COPY]], implicit-def $vcc, implicit $exec + ; CHECK-NEXT: [[S_CSELECT_B32_:%[0-9]+]]:sgpr_32 = S_CSELECT_B32 [[COPY1]], [[COPY2]], implicit $scc + ; CHECK-NEXT: [[V_ADD_CO_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_CSELECT_B32_]], [[V_ADD_CO_U32_e32_]], implicit-def $vcc, implicit $exec + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]] + ; CHECK-NEXT: early-clobber $vgpr0 = V_MOV_B32_e32 [[V_ADD_CO_U32_e32_1]], implicit $exec + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0 %3 = COPY $vgpr0 %2 = COPY $sgpr2 %1 = COPY $sgpr1 @@ -96,16 +108,35 @@ body: | --- # Second test for awareness that s_or_saveexec_b64 clobbers SCC # Because entry block is treated differently. -# -#CHECK: %bb.1 -#CHECK: S_CMP_LT_I32 -#CHECK: COPY $scc -#CHECK: ENTER_STRICT_WWM -#CHECK: $scc = COPY -#CHECK: S_CSELECT_B32 name: test_strict_wwm_scc2 tracksRegLiveness: true body: | + ; CHECK-LABEL: name: test_strict_wwm_scc2 + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]] + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: S_CMP_LT_I32 0, [[COPY3]], implicit-def $scc + ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY]], [[DEF]], 0, 0, 0, 0, implicit $exec + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; CHECK-NEXT: [[ENTER_STRICT_WWM1:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: $scc = COPY [[COPY4]] + ; CHECK-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[COPY]], [[COPY]], implicit-def $vcc, implicit $exec + ; CHECK-NEXT: [[S_CSELECT_B32_:%[0-9]+]]:sgpr_32 = S_CSELECT_B32 [[COPY1]], [[COPY2]], implicit $scc + ; CHECK-NEXT: [[V_ADD_CO_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_CSELECT_B32_]], [[V_ADD_CO_U32_e32_]], implicit-def $vcc, implicit $exec + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM1]] + ; CHECK-NEXT: early-clobber $vgpr0 = V_MOV_B32_e32 [[V_ADD_CO_U32_e32_1]], implicit $exec + ; CHECK-NEXT: $vgpr1 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1 bb.0: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0 @@ -130,7 +161,6 @@ body: | --- # V_SET_INACTIVE, when its second operand is undef, is replaced by a # COPY by si-wqm. Ensure the instruction is removed. -#CHECK-NOT: V_SET_INACTIVE name: no_cfg alignment: 1 exposesReturnsTwice: false @@ -167,6 +197,28 @@ body: | bb.0: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 + ; CHECK-LABEL: name: no_cfg + ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr3 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2, [[COPY]], %subreg.sub3 + ; CHECK-NEXT: dead [[COPY4:%[0-9]+]]:sgpr_128 = COPY [[REG_SEQUENCE]] + ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec + ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFSET]].sub1 + ; CHECK-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; CHECK-NEXT: dead [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF + ; CHECK-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY6]], implicit $exec, implicit-def $scc + ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; CHECK-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY8]], [[COPY7]], 323, 12, 15, 0, implicit $exec + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]] + ; CHECK-NEXT: early-clobber %15:vgpr_32 = V_MOV_B32_e32 [[V_MOV_B32_dpp]], implicit $exec + ; CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET_exact %15, [[REG_SEQUENCE]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec + ; CHECK-NEXT: S_ENDPGM 0 %3:sgpr_32 = COPY $sgpr3 %2:sgpr_32 = COPY $sgpr2 %1:sgpr_32 = COPY $sgpr1 @@ -189,18 +241,32 @@ body: | --- # Ensure that strict_wwm is not put around an EXEC copy -#CHECK-LABEL: name: copy_exec -#CHECK: %7:sreg_64 = COPY $exec -#CHECK-NEXT: %13:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec -#CHECK-NEXT: %8:vgpr_32 = V_MOV_B32_e32 0, implicit $exec -#CHECK-NEXT: $exec = EXIT_STRICT_WWM %13 -#CHECK-NEXT: %9:vgpr_32 = V_MBCNT_LO_U32_B32_e64 %7.sub0, 0, implicit $exec name: copy_exec tracksRegLiveness: true body: | bb.0: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 + ; CHECK-LABEL: name: copy_exec + ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr3 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2, [[COPY]], %subreg.sub3 + ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK-NEXT: dead [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sreg_64 = COPY $exec + ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]] + ; CHECK-NEXT: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 [[COPY4]].sub0, 0, implicit $exec + ; CHECK-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_MBCNT_LO_U32_B32_e64_]], 312, 15, 15, 0, implicit $exec + ; CHECK-NEXT: dead [[V_READLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READLANE_B32 [[V_MOV_B32_dpp]], 63 + ; CHECK-NEXT: early-clobber %12:vgpr_32 = V_MOV_B32_e32 [[V_MOV_B32_e32_]], implicit $exec + ; CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET_exact %12, [[REG_SEQUENCE]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec + ; CHECK-NEXT: S_ENDPGM 0 %3:sgpr_32 = COPY $sgpr3 %2:sgpr_32 = COPY $sgpr2 %1:sgpr_32 = COPY $sgpr1 @@ -224,20 +290,48 @@ body: | --- # Check exit of WQM is still inserted correctly when SCC is live until block end. # Critially this tests that compilation does not fail. -#CHECK-LABEL: name: scc_always_live -#CHECK: %8:vreg_128 = IMAGE_SAMPLE_V4_V2 %7 -#CHECK-NEXT: S_CMP_EQ_U32 %2, 0, implicit-def $scc -#CHECK-NEXT: undef %9.sub0:vreg_64 = nsz arcp nofpexcept V_ADD_F32_e64 -#CHECK-NEXT: %9.sub1:vreg_64 = nsz arcp nofpexcept V_MUL_F32_e32 -#CHECK-NEXT: %14:sreg_32_xm0 = COPY $scc -#CHECK-NEXT: $exec = S_AND_B64 $exec, %13, implicit-def $scc -#CHECK-NEXT: $scc = COPY %14 -#CHECK-NEXT: %10:vgpr_32 = nsz arcp nofpexcept V_ADD_F32_e64 -#CHECK-NEXT: %11:vreg_128 = IMAGE_SAMPLE_V4_V2 -#CHECK-NEXT: S_CBRANCH_SCC0 %bb.2 name: scc_always_live tracksRegLiveness: true body: | + ; CHECK-LABEL: name: scc_always_live + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) + ; CHECK-NEXT: liveins: $sgpr1, $sgpr2, $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $exec + ; CHECK-NEXT: $m0 = COPY $sgpr1 + ; CHECK-NEXT: $exec = S_WQM_B64 $exec, implicit-def $scc + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: [[V_INTERP_P1_F32_:%[0-9]+]]:vgpr_32 = V_INTERP_P1_F32 [[COPY1]], 3, 2, implicit $mode, implicit $m0, implicit $exec + ; CHECK-NEXT: [[V_INTERP_P1_F32_1:%[0-9]+]]:vgpr_32 = V_INTERP_P1_F32 [[COPY2]], 3, 2, implicit $mode, implicit $m0, implicit $exec + ; CHECK-NEXT: undef [[COPY4:%[0-9]+]].sub0:vreg_64 = COPY [[V_INTERP_P1_F32_]] + ; CHECK-NEXT: [[COPY4:%[0-9]+]].sub1:vreg_64 = COPY [[V_INTERP_P1_F32_1]] + ; CHECK-NEXT: [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY4]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) + ; CHECK-NEXT: S_CMP_EQ_U32 [[COPY3]], 0, implicit-def $scc + ; CHECK-NEXT: undef [[V_ADD_F32_e64_:%[0-9]+]].sub0:vreg_64 = nsz arcp nofpexcept V_ADD_F32_e64 0, [[IMAGE_SAMPLE_V4_V2_]].sub0, 0, [[V_INTERP_P1_F32_1]], 1, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_ADD_F32_e64_:%[0-9]+]].sub1:vreg_64 = nsz arcp nofpexcept V_MUL_F32_e32 [[V_INTERP_P1_F32_]], [[V_INTERP_P1_F32_1]], implicit $mode, implicit $exec + ; CHECK-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0 = COPY $scc + ; CHECK-NEXT: $exec = S_AND_B64 $exec, [[COPY]], implicit-def $scc + ; CHECK-NEXT: $scc = COPY [[COPY5]] + ; CHECK-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nsz arcp nofpexcept V_ADD_F32_e64 0, [[V_INTERP_P1_F32_]], 0, [[V_INTERP_P1_F32_1]], 1, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: [[IMAGE_SAMPLE_V4_V2_1:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[V_ADD_F32_e64_]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) + ; CHECK-NEXT: S_CBRANCH_SCC0 %bb.2, implicit $scc + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET_exact [[V_ADD_F32_e64_1]], [[DEF1]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec + ; CHECK-NEXT: S_ENDPGM 0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: $vgpr0 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub0 + ; CHECK-NEXT: $vgpr1 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub1 + ; CHECK-NEXT: $vgpr2 = COPY [[IMAGE_SAMPLE_V4_V2_1]].sub0 + ; CHECK-NEXT: $vgpr3 = COPY [[IMAGE_SAMPLE_V4_V2_1]].sub1 + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1, $vgpr2, $vgpr3 bb.0: liveins: $sgpr1, $sgpr2, $vgpr1, $vgpr2 @@ -281,18 +375,26 @@ body: | --- # Check that unnecessary instruction do not get marked for WWM # -#CHECK-NOT: ENTER_STRICT_WWM -#CHECK: BUFFER_LOAD_DWORDX2 -#CHECK: ENTER_STRICT_WWM -#CHECK: V_SET_INACTIVE_B32 -#CHECK: V_SET_INACTIVE_B32 -#CHECK-NOT: ENTER_STRICT_WWM -#CHECK: V_MAX name: test_wwm_set_inactive_propagation tracksRegLiveness: true body: | bb.0: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0 + ; CHECK-LABEL: name: test_wwm_set_inactive_propagation + ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFEN [[COPY1]], [[COPY]], 0, 0, 0, 0, implicit $exec + ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64_xexec = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK-NEXT: dead [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF + ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]].sub0:vreg_64 = V_SET_INACTIVE_B32 0, [[BUFFER_LOAD_DWORDX2_OFFEN]].sub0, 0, 0, undef [[ENTER_STRICT_WWM]], implicit $exec, implicit-def $scc + ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]].sub1:vreg_64 = V_SET_INACTIVE_B32 0, [[BUFFER_LOAD_DWORDX2_OFFEN]].sub1, 0, 0, undef [[ENTER_STRICT_WWM]], implicit $exec, implicit-def $scc + ; CHECK-NEXT: [[V_MAX_F64_e64_:%[0-9]+]]:vreg_64 = nnan nsz arcp contract reassoc nofpexcept V_MAX_F64_e64 0, [[BUFFER_LOAD_DWORDX2_OFFEN]], 0, [[BUFFER_LOAD_DWORDX2_OFFEN]], 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]] + ; CHECK-NEXT: early-clobber $vgpr0 = V_MOV_B32_e32 [[V_MAX_F64_e64_]].sub0, implicit $exec + ; CHECK-NEXT: early-clobber $vgpr1 = V_MOV_B32_e32 [[V_MAX_F64_e64_]].sub1, implicit $exec + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1 %0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3 %1:vgpr_32 = COPY $vgpr0 %2:vreg_64 = BUFFER_LOAD_DWORDX2_OFFEN %1:vgpr_32, %0:sgpr_128, 0, 0, 0, 0, implicit $exec @@ -308,15 +410,46 @@ body: | --- # Check that WQM marking occurs correctly through phi nodes in live range graph. # If not then initial V_MOV will not be in WQM. -# -#CHECK-LABEL: name: test_wqm_lr_phi -#CHECK: COPY $exec -#CHECK-NEXT: S_WQM -#CHECK-NEXT: V_MOV_B32_e32 -10 -#CHECK-NEXT: V_MOV_B32_e32 0 name: test_wqm_lr_phi tracksRegLiveness: true body: | + ; CHECK-LABEL: name: test_wqm_lr_phi + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $exec + ; CHECK-NEXT: $exec = S_WQM_B64 $exec, implicit-def $scc + ; CHECK-NEXT: undef [[V_MOV_B32_e32_:%[0-9]+]].sub0:vreg_64 = V_MOV_B32_e32 -10, implicit $exec + ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: [[S_GETPC_B64_:%[0-9]+]]:sreg_64 = S_GETPC_B64 + ; CHECK-NEXT: [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM [[S_GETPC_B64_]], 32, 0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $vcc = V_CMP_LT_U32_e64 4, 4, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.3, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: successors: %bb.3(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub0:vreg_64 = V_ADD_U32_e32 1, [[V_MOV_B32_e32_]].sub1, implicit $exec + ; CHECK-NEXT: S_BRANCH %bb.3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.3: + ; CHECK-NEXT: successors: %bb.4(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub1:vreg_64 = V_ADD_U32_e32 1, [[V_MOV_B32_e32_]].sub1, implicit $exec + ; CHECK-NEXT: S_BRANCH %bb.4 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.4: + ; CHECK-NEXT: $exec = S_AND_B64 $exec, [[COPY]], implicit-def $scc + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX8_IMM]], [[DEF]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), addrspace 7) + ; CHECK-NEXT: $vgpr0 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub0 + ; CHECK-NEXT: $vgpr1 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub1 + ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1 bb.0: undef %0.sub0:vreg_64 = V_MOV_B32_e32 -10, implicit $exec %0.sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec @@ -345,14 +478,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_cs -#CHECK-NOT: S_WQM name: no_wqm_in_cs tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_cs + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF @@ -362,14 +501,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_es -#CHECK-NOT: S_WQM name: no_wqm_in_es tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_es + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF @@ -379,14 +524,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_gs -#CHECK-NOT: S_WQM name: no_wqm_in_gs tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_gs + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF @@ -396,14 +547,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_hs -#CHECK-NOT: S_WQM name: no_wqm_in_hs tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_hs + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF @@ -413,14 +570,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_ls -#CHECK-NOT: S_WQM name: no_wqm_in_ls tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_ls + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF @@ -430,14 +593,20 @@ body: | ... --- -#CHECK-LABEL: name: no_wqm_in_vs -#CHECK-NOT: S_WQM name: no_wqm_in_vs tracksRegLiveness: true body: | bb.0: liveins: $vgpr1, $vgpr2 + ; CHECK-LABEL: name: no_wqm_in_vs + ; CHECK: liveins: $vgpr1, $vgpr2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF + ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4) undef %0.sub0:vreg_64 = COPY $vgpr1 %0.sub1:vreg_64 = COPY $vgpr2 %100:sgpr_256 = IMPLICIT_DEF diff --git a/llvm/test/CodeGen/ARM/bad-constraint.ll b/llvm/test/CodeGen/ARM/bad-constraint.ll index 9b8fcd5..7d80f0c 100644 --- a/llvm/test/CodeGen/ARM/bad-constraint.ll +++ b/llvm/test/CodeGen/ARM/bad-constraint.ll @@ -1,6 +1,7 @@ ; RUN: not llc -filetype=obj %s -o /dev/null 2>&1 | FileCheck %s ; CHECK: error: couldn't allocate input reg for constraint '{d2}' ; CHECK-NEXT: error: couldn't allocate input reg for constraint '{s2}' +; CHECK-NEXT: error: couldn't allocate input reg for constraint '{d3}' target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" target triple = "armv8a-unknown-linux-gnueabihf" @@ -23,3 +24,8 @@ entry: ret void } +define void @_Z1dv() local_unnamed_addr { +entry: + tail call void asm sideeffect "", "{d3}"(<16 x i8> splat (i8 -1)) + ret void +} diff --git a/llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll b/llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll new file mode 100644 index 0000000..0c01bb9 --- /dev/null +++ b/llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll @@ -0,0 +1,14 @@ +; RUN: llc %s -filetype=asm -o - | FileCheck %s + +; CHECK: vmov.i8 d3, #0xff + +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" +target triple = "armv8a-unknown-linux-gnueabihf" + +; Function Attrs: mustprogress noimplicitfloat nounwind +define void @cvt_vec() local_unnamed_addr { +entry: + tail call void asm sideeffect "", "{d3}"(<8 x i8> splat (i8 -1)) + ret void +} + diff --git a/llvm/test/CodeGen/ARM/scmp.ll b/llvm/test/CodeGen/ARM/scmp.ll index 6e493c9..9189aee 100644 --- a/llvm/test/CodeGen/ARM/scmp.ll +++ b/llvm/test/CodeGen/ARM/scmp.ll @@ -4,12 +4,9 @@ define i8 @scmp_8_8(i8 signext %x, i8 signext %y) nounwind { ; CHECK-LABEL: scmp_8_8: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlt r0, #1 -; CHECK-NEXT: movwgt r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwgt r0, #1 +; CHECK-NEXT: mvnlt r0, #0 ; CHECK-NEXT: bx lr %1 = call i8 @llvm.scmp(i8 %x, i8 %y) ret i8 %1 @@ -18,12 +15,9 @@ define i8 @scmp_8_8(i8 signext %x, i8 signext %y) nounwind { define i8 @scmp_8_16(i16 signext %x, i16 signext %y) nounwind { ; CHECK-LABEL: scmp_8_16: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlt r0, #1 -; CHECK-NEXT: movwgt r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwgt r0, #1 +; CHECK-NEXT: mvnlt r0, #0 ; CHECK-NEXT: bx lr %1 = call i8 @llvm.scmp(i16 %x, i16 %y) ret i8 %1 @@ -32,12 +26,9 @@ define i8 @scmp_8_16(i16 signext %x, i16 signext %y) nounwind { define i8 @scmp_8_32(i32 %x, i32 %y) nounwind { ; CHECK-LABEL: scmp_8_32: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlt r0, #1 -; CHECK-NEXT: movwgt r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwgt r0, #1 +; CHECK-NEXT: mvnlt r0, #0 ; CHECK-NEXT: bx lr %1 = call i8 @llvm.scmp(i32 %x, i32 %y) ret i8 %1 @@ -92,17 +83,26 @@ define i8 @scmp_8_128(i128 %x, i128 %y) nounwind { define i32 @scmp_32_32(i32 %x, i32 %y) nounwind { ; CHECK-LABEL: scmp_32_32: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlt r0, #1 -; CHECK-NEXT: movwgt r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwgt r0, #1 +; CHECK-NEXT: mvnlt r0, #0 ; CHECK-NEXT: bx lr %1 = call i32 @llvm.scmp(i32 %x, i32 %y) ret i32 %1 } +define i32 @scmp_neg(i32 %x, i32 %y) nounwind { +; CHECK-LABEL: scmp_neg: +; CHECK: @ %bb.0: +; CHECK-NEXT: adds r0, r0, r1 +; CHECK-NEXT: movwgt r0, #1 +; CHECK-NEXT: mvnlt r0, #0 +; CHECK-NEXT: bx lr + %yy = sub nsw i32 0, %y + %1 = call i32 @llvm.scmp(i32 %x, i32 %yy) + ret i32 %1 +} + define i32 @scmp_32_64(i64 %x, i64 %y) nounwind { ; CHECK-LABEL: scmp_32_64: ; CHECK: @ %bb.0: diff --git a/llvm/test/CodeGen/ARM/ucmp.ll b/llvm/test/CodeGen/ARM/ucmp.ll index ad4af53..bb02014 100644 --- a/llvm/test/CodeGen/ARM/ucmp.ll +++ b/llvm/test/CodeGen/ARM/ucmp.ll @@ -4,12 +4,9 @@ define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind { ; CHECK-LABEL: ucmp_8_8: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlo r0, #1 -; CHECK-NEXT: movwhi r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwhi r0, #1 +; CHECK-NEXT: mvnlo r0, #0 ; CHECK-NEXT: bx lr %1 = call i8 @llvm.ucmp(i8 %x, i8 %y) ret i8 %1 @@ -18,12 +15,9 @@ define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind { define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind { ; CHECK-LABEL: ucmp_8_16: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlo r0, #1 -; CHECK-NEXT: movwhi r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwhi r0, #1 +; CHECK-NEXT: mvnlo r0, #0 ; CHECK-NEXT: bx lr %1 = call i8 @llvm.ucmp(i16 %x, i16 %y) ret i8 %1 @@ -32,12 +26,9 @@ define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind { define i8 @ucmp_8_32(i32 %x, i32 %y) nounwind { ; CHECK-LABEL: ucmp_8_32: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlo r0, #1 -; CHECK-NEXT: movwhi r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwhi r0, #1 +; CHECK-NEXT: mvnlo r0, #0 ; CHECK-NEXT: bx lr %1 = call i8 @llvm.ucmp(i32 %x, i32 %y) ret i8 %1 @@ -92,12 +83,9 @@ define i8 @ucmp_8_128(i128 %x, i128 %y) nounwind { define i32 @ucmp_32_32(i32 %x, i32 %y) nounwind { ; CHECK-LABEL: ucmp_32_32: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov r0, #0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlo r0, #1 -; CHECK-NEXT: movwhi r2, #1 -; CHECK-NEXT: sub r0, r2, r0 +; CHECK-NEXT: subs r0, r0, r1 +; CHECK-NEXT: movwhi r0, #1 +; CHECK-NEXT: mvnlo r0, #0 ; CHECK-NEXT: bx lr %1 = call i32 @llvm.ucmp(i32 %x, i32 %y) ret i32 %1 diff --git a/llvm/test/CodeGen/AVR/bug-143247.ll b/llvm/test/CodeGen/AVR/bug-143247.ll index 07c4c65..d449327 100644 --- a/llvm/test/CodeGen/AVR/bug-143247.ll +++ b/llvm/test/CodeGen/AVR/bug-143247.ll @@ -8,18 +8,18 @@ define void @complex_sbi() { ; CHECK: ; %bb.0: ; %entry ; CHECK-NEXT: push r16 ; CHECK-NEXT: push r17 -; CHECK-NEXT: ldi r24, 0 +; CHECK-NEXT: ldi r24, 1 ; CHECK-NEXT: ldi r25, 0 ; CHECK-NEXT: .LBB0_1: ; %while.cond ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: sbi 1, 7 -; CHECK-NEXT: adiw r24, 1 ; CHECK-NEXT: movw r16, r24 ; CHECK-NEXT: andi r24, 15 ; CHECK-NEXT: andi r25, 0 ; CHECK-NEXT: adiw r24, 1 ; CHECK-NEXT: call nil ; CHECK-NEXT: movw r24, r16 +; CHECK-NEXT: adiw r24, 1 ; CHECK-NEXT: rjmp .LBB0_1 entry: br label %while.cond diff --git a/llvm/test/CodeGen/AVR/cmp.ll b/llvm/test/CodeGen/AVR/cmp.ll index efc9b8d..c932bda1 100644 --- a/llvm/test/CodeGen/AVR/cmp.ll +++ b/llvm/test/CodeGen/AVR/cmp.ll @@ -298,3 +298,18 @@ define i16 @cmp_i16_gt_1023(i16 %0) { %3 = zext i1 %2 to i16 ret i16 %3 } + +define void @cmp_issue152097(i16 %a) addrspace(1) { +; See: https://github.com/llvm/llvm-project/issues/152097 +; CHECK-LABEL: cmp_issue152097 +; CHECK: ldi r18, -1 +; CHECK-NEXT: cpi r24, -2 +; CHECK-NEXT: cpc r25, r18 +; CHECK-NEXT: ret + %cmp = icmp ugt i16 -2, %a + br i1 %cmp, label %if.then, label %if.else +if.then: + ret void +if.else: + ret void +} diff --git a/llvm/test/CodeGen/AVR/half.ll b/llvm/test/CodeGen/AVR/half.ll new file mode 100644 index 0000000..c922293 --- /dev/null +++ b/llvm/test/CodeGen/AVR/half.ll @@ -0,0 +1,534 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc %s -o - -mtriple=avr | FileCheck %s + +; Tests for various operations on half precison float. Much of the test is +; copied from test/CodeGen/X86/half.ll. + +define void @store(half %x, ptr %p) nounwind { +; CHECK-LABEL: store: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov r30, r22 +; CHECK-NEXT: mov r31, r23 +; CHECK-NEXT: std Z+1, r25 +; CHECK-NEXT: st Z, r24 +; CHECK-NEXT: ret + store half %x, ptr %p + ret void +} + +define half @return(ptr %p) nounwind { +; CHECK-LABEL: return: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov r30, r24 +; CHECK-NEXT: mov r31, r25 +; CHECK-NEXT: ld r24, Z +; CHECK-NEXT: ldd r25, Z+1 +; CHECK-NEXT: ret + %r = load half, ptr %p + ret half %r +} + +define dso_local double @loadd(ptr nocapture readonly %a) local_unnamed_addr nounwind { +; CHECK-LABEL: loadd: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: mov r30, r24 +; CHECK-NEXT: mov r31, r25 +; CHECK-NEXT: ldd r24, Z+2 +; CHECK-NEXT: ldd r25, Z+3 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: rcall __extendsfdf2 +; CHECK-NEXT: ret +entry: + %arrayidx = getelementptr inbounds i16, ptr %a, i64 1 + %0 = load i16, ptr %arrayidx, align 2 + %1 = tail call double @llvm.convert.from.fp16.f64(i16 %0) + ret double %1 +} + +define dso_local float @loadf(ptr nocapture readonly %a) local_unnamed_addr nounwind { +; CHECK-LABEL: loadf: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: mov r30, r24 +; CHECK-NEXT: mov r31, r25 +; CHECK-NEXT: ldd r24, Z+2 +; CHECK-NEXT: ldd r25, Z+3 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: ret +entry: + %arrayidx = getelementptr inbounds i16, ptr %a, i64 1 + %0 = load i16, ptr %arrayidx, align 2 + %1 = tail call float @llvm.convert.from.fp16.f32(i16 %0) + ret float %1 +} + +define dso_local void @stored(ptr nocapture %a, double %b) local_unnamed_addr nounwind { +; CHECK-LABEL: stored: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: push r16 +; CHECK-NEXT: push r17 +; CHECK-NEXT: mov r30, r22 +; CHECK-NEXT: mov r31, r23 +; CHECK-NEXT: mov r22, r20 +; CHECK-NEXT: mov r23, r21 +; CHECK-NEXT: mov r20, r18 +; CHECK-NEXT: mov r21, r19 +; CHECK-NEXT: mov r18, r16 +; CHECK-NEXT: mov r19, r17 +; CHECK-NEXT: mov r16, r24 +; CHECK-NEXT: mov r17, r25 +; CHECK-NEXT: mov r24, r30 +; CHECK-NEXT: mov r25, r31 +; CHECK-NEXT: rcall __truncdfhf2 +; CHECK-NEXT: mov r30, r16 +; CHECK-NEXT: mov r31, r17 +; CHECK-NEXT: std Z+1, r25 +; CHECK-NEXT: st Z, r24 +; CHECK-NEXT: pop r17 +; CHECK-NEXT: pop r16 +; CHECK-NEXT: ret +entry: + %0 = tail call i16 @llvm.convert.to.fp16.f64(double %b) + store i16 %0, ptr %a, align 2 + ret void +} + +define dso_local void @storef(ptr nocapture %a, float %b) local_unnamed_addr nounwind { +; CHECK-LABEL: storef: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: push r16 +; CHECK-NEXT: push r17 +; CHECK-NEXT: mov r18, r22 +; CHECK-NEXT: mov r19, r23 +; CHECK-NEXT: mov r16, r24 +; CHECK-NEXT: mov r17, r25 +; CHECK-NEXT: mov r22, r20 +; CHECK-NEXT: mov r23, r21 +; CHECK-NEXT: mov r24, r18 +; CHECK-NEXT: mov r25, r19 +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r30, r16 +; CHECK-NEXT: mov r31, r17 +; CHECK-NEXT: std Z+1, r25 +; CHECK-NEXT: st Z, r24 +; CHECK-NEXT: pop r17 +; CHECK-NEXT: pop r16 +; CHECK-NEXT: ret +entry: + %0 = tail call i16 @llvm.convert.to.fp16.f32(float %b) + store i16 %0, ptr %a, align 2 + ret void +} + +define void @test_load_store(ptr %in, ptr %out) nounwind { +; CHECK-LABEL: test_load_store: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov r30, r24 +; CHECK-NEXT: mov r31, r25 +; CHECK-NEXT: ld r24, Z +; CHECK-NEXT: ldd r25, Z+1 +; CHECK-NEXT: mov r30, r22 +; CHECK-NEXT: mov r31, r23 +; CHECK-NEXT: std Z+1, r25 +; CHECK-NEXT: st Z, r24 +; CHECK-NEXT: ret + %val = load half, ptr %in + store half %val, ptr %out + ret void +} + +define i16 @test_bitcast_from_half(ptr %addr) nounwind { +; CHECK-LABEL: test_bitcast_from_half: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov r30, r24 +; CHECK-NEXT: mov r31, r25 +; CHECK-NEXT: ld r24, Z +; CHECK-NEXT: ldd r25, Z+1 +; CHECK-NEXT: ret + %val = load half, ptr %addr + %val_int = bitcast half %val to i16 + ret i16 %val_int +} + +define void @test_bitcast_to_half(ptr %addr, i16 %in) nounwind { +; CHECK-LABEL: test_bitcast_to_half: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov r30, r24 +; CHECK-NEXT: mov r31, r25 +; CHECK-NEXT: std Z+1, r23 +; CHECK-NEXT: st Z, r22 +; CHECK-NEXT: ret + %val_fp = bitcast i16 %in to half + store half %val_fp, ptr %addr + ret void +} + +define half @from_bits(i16 %x) nounwind { +; CHECK-LABEL: from_bits: +; CHECK: ; %bb.0: +; CHECK-NEXT: ret + %res = bitcast i16 %x to half + ret half %res +} + +define i16 @to_bits(half %x) nounwind { +; CHECK-LABEL: to_bits: +; CHECK: ; %bb.0: +; CHECK-NEXT: ret + %res = bitcast half %x to i16 + ret i16 %res +} + +define float @test_extend32(ptr %addr) nounwind { +; CHECK-LABEL: test_extend32: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov r30, r24 +; CHECK-NEXT: mov r31, r25 +; CHECK-NEXT: ld r24, Z +; CHECK-NEXT: ldd r25, Z+1 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: ret + %val16 = load half, ptr %addr + %val32 = fpext half %val16 to float + ret float %val32 +} + +define double @test_extend64(ptr %addr) nounwind { +; CHECK-LABEL: test_extend64: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov r30, r24 +; CHECK-NEXT: mov r31, r25 +; CHECK-NEXT: ld r24, Z +; CHECK-NEXT: ldd r25, Z+1 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: rcall __extendsfdf2 +; CHECK-NEXT: ret + %val16 = load half, ptr %addr + %val32 = fpext half %val16 to double + ret double %val32 +} + +define void @test_trunc32(float %in, ptr %addr) nounwind { +; CHECK-LABEL: test_trunc32: +; CHECK: ; %bb.0: +; CHECK-NEXT: push r16 +; CHECK-NEXT: push r17 +; CHECK-NEXT: mov r16, r20 +; CHECK-NEXT: mov r17, r21 +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r30, r16 +; CHECK-NEXT: mov r31, r17 +; CHECK-NEXT: std Z+1, r25 +; CHECK-NEXT: st Z, r24 +; CHECK-NEXT: pop r17 +; CHECK-NEXT: pop r16 +; CHECK-NEXT: ret + %val16 = fptrunc float %in to half + store half %val16, ptr %addr + ret void +} + +define void @test_trunc64(double %in, ptr %addr) nounwind { +; CHECK-LABEL: test_trunc64: +; CHECK: ; %bb.0: +; CHECK-NEXT: rcall __truncdfhf2 +; CHECK-NEXT: mov r30, r16 +; CHECK-NEXT: mov r31, r17 +; CHECK-NEXT: std Z+1, r25 +; CHECK-NEXT: st Z, r24 +; CHECK-NEXT: ret + %val16 = fptrunc double %in to half + store half %val16, ptr %addr + ret void +} + +define i64 @test_fptosi_i64(ptr %p) nounwind { +; CHECK-LABEL: test_fptosi_i64: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov r30, r24 +; CHECK-NEXT: mov r31, r25 +; CHECK-NEXT: ld r24, Z +; CHECK-NEXT: ldd r25, Z+1 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: rcall __fixsfdi +; CHECK-NEXT: ret + %a = load half, ptr %p, align 2 + %r = fptosi half %a to i64 + ret i64 %r +} + +define void @test_sitofp_i64(i64 %a, ptr %p) nounwind { +; CHECK-LABEL: test_sitofp_i64: +; CHECK: ; %bb.0: +; CHECK-NEXT: rcall __floatdisf +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r30, r16 +; CHECK-NEXT: mov r31, r17 +; CHECK-NEXT: std Z+1, r25 +; CHECK-NEXT: st Z, r24 +; CHECK-NEXT: ret + %r = sitofp i64 %a to half + store half %r, ptr %p + ret void +} + +define i64 @test_fptoui_i64(ptr %p) nounwind { +; CHECK-LABEL: test_fptoui_i64: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov r30, r24 +; CHECK-NEXT: mov r31, r25 +; CHECK-NEXT: ld r24, Z +; CHECK-NEXT: ldd r25, Z+1 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: rcall __fixunssfdi +; CHECK-NEXT: ret + %a = load half, ptr %p, align 2 + %r = fptoui half %a to i64 + ret i64 %r +} + +define void @test_uitofp_i64(i64 %a, ptr %p) nounwind { +; CHECK-LABEL: test_uitofp_i64: +; CHECK: ; %bb.0: +; CHECK-NEXT: rcall __floatundisf +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r30, r16 +; CHECK-NEXT: mov r31, r17 +; CHECK-NEXT: std Z+1, r25 +; CHECK-NEXT: st Z, r24 +; CHECK-NEXT: ret + %r = uitofp i64 %a to half + store half %r, ptr %p + ret void +} + +define <2 x float> @test_extend32_vec2(ptr %p) nounwind { +; CHECK-LABEL: test_extend32_vec2: +; CHECK: ; %bb.0: +; CHECK-NEXT: push r12 +; CHECK-NEXT: push r13 +; CHECK-NEXT: push r14 +; CHECK-NEXT: push r15 +; CHECK-NEXT: push r16 +; CHECK-NEXT: push r17 +; CHECK-NEXT: mov r30, r24 +; CHECK-NEXT: mov r31, r25 +; CHECK-NEXT: ld r24, Z +; CHECK-NEXT: ldd r25, Z+1 +; CHECK-NEXT: mov r12, r30 +; CHECK-NEXT: mov r13, r31 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: mov r16, r22 +; CHECK-NEXT: mov r17, r23 +; CHECK-NEXT: mov r14, r24 +; CHECK-NEXT: mov r15, r25 +; CHECK-NEXT: mov r30, r12 +; CHECK-NEXT: mov r31, r13 +; CHECK-NEXT: ldd r24, Z+2 +; CHECK-NEXT: ldd r25, Z+3 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: mov r18, r16 +; CHECK-NEXT: mov r19, r17 +; CHECK-NEXT: mov r20, r14 +; CHECK-NEXT: mov r21, r15 +; CHECK-NEXT: pop r17 +; CHECK-NEXT: pop r16 +; CHECK-NEXT: pop r15 +; CHECK-NEXT: pop r14 +; CHECK-NEXT: pop r13 +; CHECK-NEXT: pop r12 +; CHECK-NEXT: ret + %a = load <2 x half>, ptr %p, align 8 + %b = fpext <2 x half> %a to <2 x float> + ret <2 x float> %b +} + +define <1 x double> @test_extend64_vec1(ptr %p) nounwind { +; CHECK-LABEL: test_extend64_vec1: +; CHECK: ; %bb.0: +; CHECK-NEXT: mov r30, r24 +; CHECK-NEXT: mov r31, r25 +; CHECK-NEXT: ld r24, Z +; CHECK-NEXT: ldd r25, Z+1 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: rcall __extendsfdf2 +; CHECK-NEXT: ret + %a = load <1 x half>, ptr %p, align 8 + %b = fpext <1 x half> %a to <1 x double> + ret <1 x double> %b +} + +define void @test_trunc32_vec2(<2 x float> %a, ptr %p) nounwind { +; CHECK-LABEL: test_trunc32_vec2: +; CHECK: ; %bb.0: +; CHECK-NEXT: push r12 +; CHECK-NEXT: push r13 +; CHECK-NEXT: push r14 +; CHECK-NEXT: push r15 +; CHECK-NEXT: mov r14, r20 +; CHECK-NEXT: mov r15, r21 +; CHECK-NEXT: mov r12, r18 +; CHECK-NEXT: mov r13, r19 +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r30, r16 +; CHECK-NEXT: mov r31, r17 +; CHECK-NEXT: std Z+3, r25 +; CHECK-NEXT: std Z+2, r24 +; CHECK-NEXT: mov r22, r12 +; CHECK-NEXT: mov r23, r13 +; CHECK-NEXT: mov r24, r14 +; CHECK-NEXT: mov r25, r15 +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r30, r16 +; CHECK-NEXT: mov r31, r17 +; CHECK-NEXT: std Z+1, r25 +; CHECK-NEXT: st Z, r24 +; CHECK-NEXT: pop r15 +; CHECK-NEXT: pop r14 +; CHECK-NEXT: pop r13 +; CHECK-NEXT: pop r12 +; CHECK-NEXT: ret + %v = fptrunc <2 x float> %a to <2 x half> + store <2 x half> %v, ptr %p + ret void +} + +define void @test_trunc64_vec1(<1 x double> %a, ptr %p) nounwind { +; CHECK-LABEL: test_trunc64_vec1: +; CHECK: ; %bb.0: +; CHECK-NEXT: rcall __truncdfhf2 +; CHECK-NEXT: mov r30, r16 +; CHECK-NEXT: mov r31, r17 +; CHECK-NEXT: std Z+1, r25 +; CHECK-NEXT: st Z, r24 +; CHECK-NEXT: ret + %v = fptrunc <1 x double> %a to <1 x half> + store <1 x half> %v, ptr %p + ret void +} + +define float @test_sitofp_fadd_i32(i32 %a, ptr %b) nounwind { +; CHECK-LABEL: test_sitofp_fadd_i32: +; CHECK: ; %bb.0: +; CHECK-NEXT: push r12 +; CHECK-NEXT: push r13 +; CHECK-NEXT: push r14 +; CHECK-NEXT: push r15 +; CHECK-NEXT: push r16 +; CHECK-NEXT: push r17 +; CHECK-NEXT: mov r16, r20 +; CHECK-NEXT: mov r17, r21 +; CHECK-NEXT: rcall __floatsisf +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r14, r24 +; CHECK-NEXT: mov r15, r25 +; CHECK-NEXT: mov r30, r16 +; CHECK-NEXT: mov r31, r17 +; CHECK-NEXT: ld r24, Z +; CHECK-NEXT: ldd r25, Z+1 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: mov r16, r22 +; CHECK-NEXT: mov r17, r23 +; CHECK-NEXT: mov r12, r24 +; CHECK-NEXT: mov r13, r25 +; CHECK-NEXT: mov r24, r14 +; CHECK-NEXT: mov r25, r15 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: mov r18, r22 +; CHECK-NEXT: mov r19, r23 +; CHECK-NEXT: mov r20, r24 +; CHECK-NEXT: mov r21, r25 +; CHECK-NEXT: mov r22, r16 +; CHECK-NEXT: mov r23, r17 +; CHECK-NEXT: mov r24, r12 +; CHECK-NEXT: mov r25, r13 +; CHECK-NEXT: rcall __addsf3 +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: pop r17 +; CHECK-NEXT: pop r16 +; CHECK-NEXT: pop r15 +; CHECK-NEXT: pop r14 +; CHECK-NEXT: pop r13 +; CHECK-NEXT: pop r12 +; CHECK-NEXT: ret + %tmp0 = load half, ptr %b + %tmp1 = sitofp i32 %a to half + %tmp2 = fadd half %tmp0, %tmp1 + %tmp3 = fpext half %tmp2 to float + ret float %tmp3 +} + +define half @chained_fp_ops(half %x) { +; CHECK-LABEL: chained_fp_ops: +; CHECK: ; %bb.0: ; %start +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: mov r18, r22 +; CHECK-NEXT: mov r19, r23 +; CHECK-NEXT: mov r20, r24 +; CHECK-NEXT: mov r21, r25 +; CHECK-NEXT: rcall __addsf3 +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: ldi r18, 0 +; CHECK-NEXT: ldi r19, 0 +; CHECK-NEXT: ldi r20, 0 +; CHECK-NEXT: ldi r21, 63 +; CHECK-NEXT: rcall __mulsf3 +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: ret +start: + %y = fmul half %x, 0xH4000 + %z = fdiv half %y, 0xH4000 + ret half %z +} + +define half @test_select_cc(half) nounwind { +; CHECK-LABEL: test_select_cc: +; CHECK: ; %bb.0: +; CHECK-NEXT: push r16 +; CHECK-NEXT: push r17 +; CHECK-NEXT: rcall __extendhfsf2 +; CHECK-NEXT: ldi r16, 0 +; CHECK-NEXT: ldi r17, 0 +; CHECK-NEXT: mov r18, r16 +; CHECK-NEXT: mov r19, r17 +; CHECK-NEXT: mov r20, r16 +; CHECK-NEXT: mov r21, r17 +; CHECK-NEXT: rcall __nesf2 +; CHECK-NEXT: cpi r24, 0 +; CHECK-NEXT: breq .LBB25_2 +; CHECK-NEXT: ; %bb.1: +; CHECK-NEXT: ldi r16, 0 +; CHECK-NEXT: ldi r17, 60 +; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: mov r24, r16 +; CHECK-NEXT: mov r25, r17 +; CHECK-NEXT: pop r17 +; CHECK-NEXT: pop r16 +; CHECK-NEXT: ret + %2 = fcmp une half %0, 0xH0000 + %3 = uitofp i1 %2 to half + ret half %3 +} + +define half @fabs(half %x) nounwind { +; CHECK-LABEL: fabs: +; CHECK: ; %bb.0: +; CHECK-NEXT: andi r25, 127 +; CHECK-NEXT: ret + %a = call half @llvm.fabs.f16(half %x) + ret half %a +} + +define half @fcopysign(half %x, half %y) nounwind { +; CHECK-LABEL: fcopysign: +; CHECK: ; %bb.0: +; CHECK-NEXT: andi r22, 0 +; CHECK-NEXT: andi r23, 128 +; CHECK-NEXT: andi r25, 127 +; CHECK-NEXT: or r24, r22 +; CHECK-NEXT: or r25, r23 +; CHECK-NEXT: ret + %a = call half @llvm.copysign.f16(half %x, half %y) + ret half %a +} diff --git a/llvm/test/CodeGen/AVR/issue-151080.ll b/llvm/test/CodeGen/AVR/issue-151080.ll new file mode 100644 index 0000000..9829224 --- /dev/null +++ b/llvm/test/CodeGen/AVR/issue-151080.ll @@ -0,0 +1,95 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -O=3 -mtriple=avr-none -mcpu=attiny85 -verify-machineinstrs | FileCheck %s + +declare dso_local void @foo(i16 noundef) addrspace(1) +@ci = dso_local global [30 x i16] zeroinitializer, align 1 +define void @loopreduce() { +; CHECK-LABEL: loopreduce: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: push r14 +; CHECK-NEXT: push r15 +; CHECK-NEXT: push r16 +; CHECK-NEXT: push r17 +; CHECK-NEXT: ldi r26, lo8(ci) +; CHECK-NEXT: ldi r27, hi8(ci) +; CHECK-NEXT: ldi r16, lo8(ci+60) +; CHECK-NEXT: ldi r17, hi8(ci+60) +; CHECK-NEXT: .LBB0_1: ; %for.body +; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: ld r24, X+ +; CHECK-NEXT: ld r25, X+ +; CHECK-NEXT: movw r14, r26 +; CHECK-NEXT: rcall foo +; CHECK-NEXT: movw r26, r14 +; CHECK-NEXT: cp r26, r16 +; CHECK-NEXT: cpc r27, r17 +; CHECK-NEXT: brne .LBB0_1 +; CHECK-NEXT: ; %bb.2: ; %for.cond.cleanup +; CHECK-NEXT: pop r17 +; CHECK-NEXT: pop r16 +; CHECK-NEXT: pop r15 +; CHECK-NEXT: pop r14 +; CHECK-NEXT: ret +entry: + br label %for.body +for.body: ; preds = %entry, %for.body + %i.03 = phi i16 [ 0, %entry ], [ %inc, %for.body ] + %arrayidx = getelementptr inbounds nuw [30 x i16], ptr @ci, i16 0, i16 %i.03 + %0 = load i16, ptr %arrayidx, align 1 + tail call addrspace(1) void @foo(i16 noundef %0) + %inc = add nuw nsw i16 %i.03, 1 + %exitcond.not = icmp eq i16 %inc, 30 + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +; Exit blocks +for.cond.cleanup: ; preds = %for.body + ret void +} + +define void @indvar() { +; CHECK-LABEL: indvar: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: push r12 +; CHECK-NEXT: push r13 +; CHECK-NEXT: push r14 +; CHECK-NEXT: push r15 +; CHECK-NEXT: push r17 +; CHECK-NEXT: ldi r24, 8 +; CHECK-NEXT: ldi r25, 0 +; CHECK-NEXT: movw r14, r24 +; CHECK-NEXT: ldi r24, 1 +; CHECK-NEXT: ldi r25, 0 +; CHECK-NEXT: movw r12, r24 +; CHECK-NEXT: ldi r17, 3 +; CHECK-NEXT: .LBB1_1: ; %for.body +; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: movw r24, r12 +; CHECK-NEXT: rcall foo +; CHECK-NEXT: movw r22, r14 +; CHECK-NEXT: movw r24, r22 +; CHECK-NEXT: rcall __mulhi3 +; CHECK-NEXT: movw r30, r14 +; CHECK-NEXT: adiw r30, 1 +; CHECK-NEXT: movw r14, r30 +; CHECK-NEXT: cpi r24, -24 +; CHECK-NEXT: cpc r25, r17 +; CHECK-NEXT: brlo .LBB1_1 +; CHECK-NEXT: ; %bb.2: ; %for.cond.cleanup +; CHECK-NEXT: pop r17 +; CHECK-NEXT: pop r15 +; CHECK-NEXT: pop r14 +; CHECK-NEXT: pop r13 +; CHECK-NEXT: pop r12 +; CHECK-NEXT: ret +entry: + br label %for.body +for.body: ; preds = %entry, %for.body + %i.03 = phi i16 [ 7, %entry ], [ %inc, %for.body ] + tail call addrspace(1) void @foo(i16 noundef 1) + %inc = add nuw nsw i16 %i.03, 1 + %mul = mul nuw nsw i16 %inc, %inc + %cmp = icmp samesign ult i16 %mul, 1000 + br i1 %cmp, label %for.body, label %for.cond.cleanup +for.cond.cleanup: ; preds = %for.body + ret void +} + diff --git a/llvm/test/CodeGen/AVR/llvm.sincos.ll b/llvm/test/CodeGen/AVR/llvm.sincos.ll index b70b8d3..ff01da9 100644 --- a/llvm/test/CodeGen/AVR/llvm.sincos.ll +++ b/llvm/test/CodeGen/AVR/llvm.sincos.ll @@ -11,8 +11,6 @@ define { half, half } @test_sincos_f16(half %a) #0 { ; CHECK-NEXT: push r15 ; CHECK-NEXT: push r16 ; CHECK-NEXT: push r17 -; CHECK-NEXT: mov r24, r22 -; CHECK-NEXT: mov r25, r23 ; CHECK-NEXT: rcall __extendhfsf2 ; CHECK-NEXT: mov r16, r22 ; CHECK-NEXT: mov r17, r23 @@ -28,10 +26,8 @@ define { half, half } @test_sincos_f16(half %a) #0 { ; CHECK-NEXT: mov r25, r15 ; CHECK-NEXT: rcall cos ; CHECK-NEXT: rcall __truncsfhf2 -; CHECK-NEXT: mov r22, r24 -; CHECK-NEXT: mov r23, r25 -; CHECK-NEXT: mov r18, r12 -; CHECK-NEXT: mov r19, r13 +; CHECK-NEXT: mov r22, r12 +; CHECK-NEXT: mov r23, r13 ; CHECK-NEXT: pop r17 ; CHECK-NEXT: pop r16 ; CHECK-NEXT: pop r15 @@ -46,13 +42,9 @@ define { half, half } @test_sincos_f16(half %a) #0 { define half @test_sincos_f16_only_use_sin(half %a) #0 { ; CHECK-LABEL: test_sincos_f16_only_use_sin: ; CHECK: ; %bb.0: -; CHECK-NEXT: mov r24, r22 -; CHECK-NEXT: mov r25, r23 ; CHECK-NEXT: rcall __extendhfsf2 ; CHECK-NEXT: rcall sin ; CHECK-NEXT: rcall __truncsfhf2 -; CHECK-NEXT: mov r22, r24 -; CHECK-NEXT: mov r23, r25 ; CHECK-NEXT: ret %result = call { half, half } @llvm.sincos.f16(half %a) %result.0 = extractvalue { half, half } %result, 0 @@ -62,13 +54,9 @@ define half @test_sincos_f16_only_use_sin(half %a) #0 { define half @test_sincos_f16_only_use_cos(half %a) #0 { ; CHECK-LABEL: test_sincos_f16_only_use_cos: ; CHECK: ; %bb.0: -; CHECK-NEXT: mov r24, r22 -; CHECK-NEXT: mov r25, r23 ; CHECK-NEXT: rcall __extendhfsf2 ; CHECK-NEXT: rcall cos ; CHECK-NEXT: rcall __truncsfhf2 -; CHECK-NEXT: mov r22, r24 -; CHECK-NEXT: mov r23, r25 ; CHECK-NEXT: ret %result = call { half, half } @llvm.sincos.f16(half %a) %result.1 = extractvalue { half, half } %result, 1 @@ -90,48 +78,50 @@ define { <2 x half>, <2 x half> } @test_sincos_v2f16(<2 x half> %a) #0 { ; CHECK-NEXT: push r15 ; CHECK-NEXT: push r16 ; CHECK-NEXT: push r17 -; CHECK-NEXT: mov r10, r22 -; CHECK-NEXT: mov r11, r23 +; CHECK-NEXT: mov r16, r24 +; CHECK-NEXT: mov r17, r25 +; CHECK-NEXT: mov r24, r22 +; CHECK-NEXT: mov r25, r23 ; CHECK-NEXT: rcall __extendhfsf2 -; CHECK-NEXT: mov r16, r22 -; CHECK-NEXT: mov r17, r23 -; CHECK-NEXT: mov r14, r24 -; CHECK-NEXT: mov r15, r25 -; CHECK-NEXT: rcall sin -; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r14, r22 +; CHECK-NEXT: mov r15, r23 ; CHECK-NEXT: mov r12, r24 ; CHECK-NEXT: mov r13, r25 -; CHECK-NEXT: mov r24, r10 -; CHECK-NEXT: mov r25, r11 +; CHECK-NEXT: rcall sin +; CHECK-NEXT: rcall __truncsfhf2 +; CHECK-NEXT: mov r10, r24 +; CHECK-NEXT: mov r11, r25 +; CHECK-NEXT: mov r24, r16 +; CHECK-NEXT: mov r25, r17 ; CHECK-NEXT: rcall __extendhfsf2 -; CHECK-NEXT: mov r10, r22 -; CHECK-NEXT: mov r11, r23 +; CHECK-NEXT: mov r16, r22 +; CHECK-NEXT: mov r17, r23 ; CHECK-NEXT: mov r8, r24 ; CHECK-NEXT: mov r9, r25 -; CHECK-NEXT: rcall cos +; CHECK-NEXT: rcall sin ; CHECK-NEXT: rcall __truncsfhf2 ; CHECK-NEXT: mov r6, r24 ; CHECK-NEXT: mov r7, r25 -; CHECK-NEXT: mov r22, r10 -; CHECK-NEXT: mov r23, r11 -; CHECK-NEXT: mov r24, r8 -; CHECK-NEXT: mov r25, r9 -; CHECK-NEXT: rcall sin +; CHECK-NEXT: mov r22, r14 +; CHECK-NEXT: mov r23, r15 +; CHECK-NEXT: mov r24, r12 +; CHECK-NEXT: mov r25, r13 +; CHECK-NEXT: rcall cos ; CHECK-NEXT: rcall __truncsfhf2 -; CHECK-NEXT: mov r10, r24 -; CHECK-NEXT: mov r11, r25 +; CHECK-NEXT: mov r14, r24 +; CHECK-NEXT: mov r15, r25 ; CHECK-NEXT: mov r22, r16 ; CHECK-NEXT: mov r23, r17 -; CHECK-NEXT: mov r24, r14 -; CHECK-NEXT: mov r25, r15 +; CHECK-NEXT: mov r24, r8 +; CHECK-NEXT: mov r25, r9 ; CHECK-NEXT: rcall cos ; CHECK-NEXT: rcall __truncsfhf2 ; CHECK-NEXT: mov r18, r10 ; CHECK-NEXT: mov r19, r11 -; CHECK-NEXT: mov r20, r12 -; CHECK-NEXT: mov r21, r13 -; CHECK-NEXT: mov r22, r6 -; CHECK-NEXT: mov r23, r7 +; CHECK-NEXT: mov r20, r6 +; CHECK-NEXT: mov r21, r7 +; CHECK-NEXT: mov r22, r14 +; CHECK-NEXT: mov r23, r15 ; CHECK-NEXT: pop r17 ; CHECK-NEXT: pop r16 ; CHECK-NEXT: pop r15 diff --git a/llvm/test/CodeGen/AVR/load.ll b/llvm/test/CodeGen/AVR/load.ll index 5de6b48..6a1e067 100644 --- a/llvm/test/CodeGen/AVR/load.ll +++ b/llvm/test/CodeGen/AVR/load.ll @@ -1,4 +1,4 @@ -; RUN: llc -mattr=avr6,sram < %s -mtriple=avr -verify-machineinstrs | FileCheck %s +; RUN: llc -mattr=avr6,sram < %s -mtriple=avr-none -verify-machineinstrs | FileCheck %s define i8 @load8(ptr %x) { ; CHECK-LABEL: load8: @@ -98,9 +98,33 @@ while.end: ; preds = %while.body, %entry ret i16 %r.0.lcssa } +define i16 @load16postincloopreduce(ptr %p, i16 %cnt) { +; CHECK-LABEL: load16postincloopreduce: +; CHECK: ld {{.*}}, {{[XYZ]}}+ +; CHECK: ld {{.*}}, {{[XYZ]}}+ +entry: + %cmp3 = icmp sgt i16 %cnt, 0 + br i1 %cmp3, label %for.body, label %for.cond.cleanup +for.cond.cleanup: ; preds = %for.body, %entry + %sum.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ] + ret i16 %sum.0.lcssa +for.body: ; preds = %entry, %for.body + %i.06 = phi i16 [ %inc, %for.body ], [ 0, %entry ] + %sum.05 = phi i16 [ %add, %for.body ], [ 0, %entry ] + %p.addr.04 = phi ptr [ %incdec.ptr, %for.body ], [ %p, %entry ] + %incdec.ptr = getelementptr inbounds nuw i8, ptr %p.addr.04, i16 2 + %0 = load i16, ptr %p.addr.04, align 1 + %add = add nsw i16 %0, %sum.05 + %inc = add nuw nsw i16 %i.06, 1 + %exitcond.not = icmp eq i16 %inc, %cnt + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + define i8 @load8predec(ptr %x, i8 %y) { ; CHECK-LABEL: load8predec: -; CHECK: ld {{.*}}, -{{[XYZ]}} +; TODO: ld {{.*}}, -{{[XYZ]}} +; CHECK: sbiw r26, 1 +; CHECK: ld {{.*}}, X entry: %tobool6 = icmp eq i8 %y, 0 br i1 %tobool6, label %while.end, label %while.body @@ -121,8 +145,12 @@ while.end: ; preds = %while.body, %entry define i16 @load16predec(ptr %x, i16 %y) { ; CHECK-LABEL: load16predec: -; CHECK: ld {{.*}}, -{{[XYZ]}} -; CHECK: ld {{.*}}, -{{[XYZ]}} +; TODO: ld {{.*}}, -{{[XYZ]}} +; TODO: ld {{.*}}, -{{[XYZ]}} +; CHECK: sbiw r24, 2 +; CHECK: movw r30, r24 +; CHECK: ld {{.*}}, Z +; CHECK: ldd {{.*}}, Z+1 entry: %tobool2 = icmp eq i16 %y, 0 br i1 %tobool2, label %while.end, label %while.body diff --git a/llvm/test/CodeGen/AVR/shift.ll b/llvm/test/CodeGen/AVR/shift.ll index 9836f93..1bd9b45 100644 --- a/llvm/test/CodeGen/AVR/shift.ll +++ b/llvm/test/CodeGen/AVR/shift.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=avr -mtriple=avr -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=avr-none -verify-machineinstrs | FileCheck %s ; Optimize for speed. define i8 @shift_i8_i8_speed(i8 %a, i8 %b) { diff --git a/llvm/test/CodeGen/AVR/store.ll b/llvm/test/CodeGen/AVR/store.ll index aab0270..9c41645 100644 --- a/llvm/test/CodeGen/AVR/store.ll +++ b/llvm/test/CodeGen/AVR/store.ll @@ -94,7 +94,9 @@ while.end: ; preds = %while.body, %entry define void @store8predec(ptr %x, i8 %y) { ; CHECK-LABEL: store8predec: -; CHECK: st -{{[XYZ]}}, {{.*}} +; TODO: st -{{[XYZ]}}, {{.*}} +; CHECK: sbiw r26, 1 +; CHECK: st X, {{.*}} entry: %tobool3 = icmp eq i8 %y, 0 br i1 %tobool3, label %while.end, label %while.body @@ -112,8 +114,12 @@ while.end: ; preds = %while.body, %entry define void @store16predec(ptr %x, i16 %y) { ; CHECK-LABEL: store16predec: -; CHECK: st -{{[XYZ]}}, {{.*}} -; CHECK: st -{{[XYZ]}}, {{.*}} +; TODO: st -{{[XYZ]}}, {{.*}} +; TODO: st -{{[XYZ]}}, {{.*}} +; CHECK: sbiw r24, 2 +; CHECK: movw r30, r24 +; CHECK: std Z+1, {{.*}} +; CHECK: st Z, {{.*}} entry: %tobool3 = icmp eq i16 %y, 0 br i1 %tobool3, label %while.end, label %while.body diff --git a/llvm/test/CodeGen/BPF/loop-exit-cond.ll b/llvm/test/CodeGen/BPF/loop-exit-cond.ll index 69fe714..fa6a4a0 100644 --- a/llvm/test/CodeGen/BPF/loop-exit-cond.ll +++ b/llvm/test/CodeGen/BPF/loop-exit-cond.ll @@ -35,14 +35,14 @@ define dso_local i32 @test(i32 %len, ptr %data) #0 { ; CHECK-NEXT: br i1 [[OR_COND]], label [[FOR_BODY:%.*]], label [[IF_END:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[I_05:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 1, [[ENTRY:%.*]] ] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[D]]) #[[ATTR3:[0-9]+]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[D]]) #[[ATTR3:[0-9]+]] ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[DATA]], align 1, !tbaa [[TBAA3:![0-9]+]] ; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: [[NARROW:%.*]] = select i1 [[TOBOOL_NOT]], i8 48, i8 [[TMP1]] ; CHECK-NEXT: [[CONV2:%.*]] = sext i8 [[NARROW]] to i64 ; CHECK-NEXT: store i64 [[CONV2]], ptr [[D]], align 8, !tbaa [[TBAA6:![0-9]+]] ; CHECK-NEXT: call void @foo(ptr nonnull @.str, i32 [[I_05]], ptr nonnull [[D]]) #[[ATTR3]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[D]]) #[[ATTR3]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[D]]) #[[ATTR3]] ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_05]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[IF_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] @@ -61,7 +61,7 @@ entry: br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry - call void @llvm.lifetime.start.p0(i64 4, ptr %i) #3 + call void @llvm.lifetime.start.p0(ptr %i) #3 store i32 1, ptr %i, align 4, !tbaa !3 br label %for.cond @@ -73,11 +73,11 @@ for.cond: ; preds = %for.inc, %if.then for.cond.cleanup: ; preds = %for.cond - call void @llvm.lifetime.end.p0(i64 4, ptr %i) #3 + call void @llvm.lifetime.end.p0(ptr %i) #3 br label %for.end for.body: ; preds = %for.cond - call void @llvm.lifetime.start.p0(i64 8, ptr %d) #3 + call void @llvm.lifetime.start.p0(ptr %d) #3 %3 = load ptr, ptr %data.addr, align 8, !tbaa !7 %4 = load i8, ptr %3, align 1, !tbaa !9 %conv = sext i8 %4 to i32 @@ -96,7 +96,7 @@ cond.end: ; preds = %cond.false, %cond.t store i64 %conv2, ptr %d, align 8, !tbaa !10 %5 = load i32, ptr %i, align 4, !tbaa !3 call void @foo(ptr @.str, i32 %5, ptr %d) - call void @llvm.lifetime.end.p0(i64 8, ptr %d) #3 + call void @llvm.lifetime.end.p0(ptr %d) #3 br label %for.inc for.inc: ; preds = %cond.end @@ -113,12 +113,12 @@ if.end: ; preds = %for.end, %entry } ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare dso_local void @foo(ptr, i32, ptr) #2 ; Function Attrs: argmemonly nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 +declare void @llvm.lifetime.end.p0(ptr nocapture) #1 attributes #0 = { nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" } attributes #1 = { argmemonly nofree nosync nounwind willreturn } diff --git a/llvm/test/CodeGen/BPF/vla.ll b/llvm/test/CodeGen/BPF/vla.ll index 9a22769..708b41e 100644 --- a/llvm/test/CodeGen/BPF/vla.ll +++ b/llvm/test/CodeGen/BPF/vla.ll @@ -33,17 +33,17 @@ define dso_local i32 @test1() { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) ; CHECK-NEXT: store i32 8, ptr [[A]], align 4 ; CHECK-NEXT: [[VLA:%.*]] = alloca i8, i64 68, align 1 ; CHECK-NEXT: call void @foo(ptr [[VLA]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret i32 0 ; entry: %a = alloca i32, align 4 %saved_stack = alloca ptr, align 8 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) store i32 8, ptr %a, align 4 %0 = call ptr @llvm.stacksave() store ptr %0, ptr %saved_stack, align 8 @@ -51,11 +51,11 @@ entry: call void @foo(ptr %vla) %1 = load ptr, ptr %saved_stack, align 8 call void @llvm.stackrestore(ptr %1) - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret i32 0 } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare ptr @llvm.stacksave() @@ -63,7 +63,7 @@ declare dso_local void @foo(ptr) declare void @llvm.stackrestore(ptr) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define dso_local i32 @test2(i32 %b) { ; CHECK-LABEL: @test2( @@ -73,7 +73,7 @@ define dso_local i32 @test2(i32 %b) { ; CHECK-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8 ; CHECK-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 ; CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]]) ; CHECK-NEXT: store i32 8, ptr [[A]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 8, [[TMP1]] @@ -81,7 +81,7 @@ define dso_local i32 @test2(i32 %b) { ; CHECK-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP2]], align 1 ; CHECK-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 ; CHECK-NEXT: call void @foo(ptr [[VLA]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]]) ; CHECK-NEXT: ret i32 0 ; entry: @@ -90,7 +90,7 @@ entry: %saved_stack = alloca ptr, align 8 %__vla_expr0 = alloca i64, align 8 store i32 %b, ptr %b.addr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) store i32 8, ptr %a, align 4 %0 = load i32, ptr %b.addr, align 4 %add = add nsw i32 8, %0 @@ -102,6 +102,6 @@ entry: call void @foo(ptr %vla) %3 = load ptr, ptr %saved_stack, align 8 call void @llvm.stackrestore(ptr %3) - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret i32 0 } diff --git a/llvm/test/CodeGen/DirectX/Binding/binding-overlap-7.ll b/llvm/test/CodeGen/DirectX/Binding/binding-overlap-7.ll new file mode 100644 index 0000000..25f81dd --- /dev/null +++ b/llvm/test/CodeGen/DirectX/Binding/binding-overlap-7.ll @@ -0,0 +1,35 @@ +; Use llc for this test so that we don't abort after the first error. +; RUN: not llc %s -o /dev/null 2>&1 | FileCheck %s + +; Check that there is no overlap with unbounded array in different space + + ; Buffer<double> A[2] : register(t2, space4); + ; Buffer<double> B : register(t20, space5); // does not overlap + ; Buffer<double> C[] : register(t2, space4); // overlaps with A + +; CHECK: error: resource A at register 2 overlaps with resource C at register 2 in space 4 +; CHECK-NOT: error: resource C at register 2 overlaps with resource B at register 20 in space 5 + +target triple = "dxil-pc-shadermodel6.3-library" + +@A.str = private unnamed_addr constant [2 x i8] c"A\00", align 1 +@B.str = private unnamed_addr constant [2 x i8] c"B\00", align 1 +@C.str = private unnamed_addr constant [2 x i8] c"C\00", align 1 + +define void @test_not_overlapping_in_different_spaces() { +entry: + + ; Buffer<double> A[2] : register(t2, space4); + %h0 = call target("dx.TypedBuffer", double, 0, 0, 0) + @llvm.dx.resource.handlefrombinding(i32 4, i32 2, i32 2, i32 10, i1 false, ptr @A.str) + + ; Buffer<double> B : register(t20, space5); + %h1 = call target("dx.TypedBuffer", i64, 0, 0, 0) + @llvm.dx.resource.handlefrombinding(i32 5, i32 20, i32 1, i32 0, i1 false, ptr @B.str) + + ; Buffer<double> C[] : register(t2, space4); + %h2 = call target("dx.TypedBuffer", double, 0, 0, 0) + @llvm.dx.resource.handlefrombinding(i32 4, i32 2, i32 -1, i32 10, i1 false, ptr @C.str) + + ret void +} diff --git a/llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll b/llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll index 736c86e..5cf4fe8 100644 --- a/llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll +++ b/llvm/test/CodeGen/DirectX/ShaderFlags/lifetimes-noint64op.ll @@ -15,16 +15,16 @@ target triple = "dxil-pc-shadermodel6.7-library" define void @lifetimes() #0 { %a = alloca [4 x i32], align 8 - call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %a) - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %a) + call void @llvm.lifetime.start.p0(ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) ret void } ; Function Attrs: nounwind memory(argmem: readwrite) -declare void @llvm.lifetime.start.p0(i64, ptr) #1 +declare void @llvm.lifetime.start.p0(ptr) #1 ; Function Attrs: nounwind memory(argmem: readwrite) -declare void @llvm.lifetime.end.p0(i64, ptr) #1 +declare void @llvm.lifetime.end.p0(ptr) #1 attributes #0 = { convergent norecurse nounwind "hlsl.export"} attributes #1 = { nounwind memory(argmem: readwrite) } diff --git a/llvm/test/CodeGen/DirectX/imad.ll b/llvm/test/CodeGen/DirectX/imad.ll index 5d9463d..2e612f0 100644 --- a/llvm/test/CodeGen/DirectX/imad.ll +++ b/llvm/test/CodeGen/DirectX/imad.ll @@ -1,17 +1,13 @@ -; RUN: opt -S -dxil-op-lower < %s | FileCheck %s +; RUN: opt -S -scalarizer -dxil-op-lower < %s | FileCheck %s ; Make sure dxil operation function calls for round are generated for float and half. -; CHECK:call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR:]] -; CHECK:call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]] -; CHECK:call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]] - -; CHECK: attributes #[[#ATTR]] = {{{.*}} memory(none) {{.*}}} target datalayout = "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:32-f64:64-n8:16:32:64" target triple = "dxil-pc-shadermodel6.7-library" ; Function Attrs: noinline nounwind optnone define noundef i16 @imad_short(i16 noundef %p0, i16 noundef %p1, i16 noundef %p2) #0 { entry: + ; CHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR:]] %p2.addr = alloca i16, align 2 %p1.addr = alloca i16, align 2 %p0.addr = alloca i16, align 2 @@ -31,6 +27,7 @@ declare i16 @llvm.dx.imad.i16(i16, i16, i16) #1 ; Function Attrs: noinline nounwind optnone define noundef i32 @imad_int(i32 noundef %p0, i32 noundef %p1, i32 noundef %p2) #0 { entry: + ; CHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]] %p2.addr = alloca i32, align 4 %p1.addr = alloca i32, align 4 %p0.addr = alloca i32, align 4 @@ -50,6 +47,7 @@ declare i32 @llvm.dx.imad.i32(i32, i32, i32) #1 ; Function Attrs: noinline nounwind optnone define noundef i64 @imad_int64(i64 noundef %p0, i64 noundef %p1, i64 noundef %p2) #0 { entry: + ; CHECK: call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]] %p2.addr = alloca i64, align 8 %p1.addr = alloca i64, align 8 %p0.addr = alloca i64, align 8 @@ -65,3 +63,95 @@ entry: ; Function Attrs: nocallback nofree nosync nounwind willreturn declare i64 @llvm.dx.imad.i64(i64, i64, i64) #1 + +; Function Attrs: noinline nounwind optnone +define noundef <4 x i16> @imad_int16_t4(<4 x i16> noundef %p0, <4 x i16> noundef %p1, <4 x i16> noundef %p2) #0 { +entry: + ; CHECK: extractelement <4 x i16> %p0, i64 0 + ; CHECK: extractelement <4 x i16> %p1, i64 0 + ; CHECK: extractelement <4 x i16> %p2, i64 0 + ; CHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i16> %p0, i64 1 + ; CHECK: extractelement <4 x i16> %p1, i64 1 + ; CHECK: extractelement <4 x i16> %p2, i64 1 + ; CHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i16> %p0, i64 2 + ; CHECK: extractelement <4 x i16> %p1, i64 2 + ; CHECK: extractelement <4 x i16> %p2, i64 2 + ; CHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i16> %p0, i64 3 + ; CHECK: extractelement <4 x i16> %p1, i64 3 + ; CHECK: extractelement <4 x i16> %p2, i64 3 + ; CHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]] + ; CHECK: insertelement <4 x i16> poison, i16 %{{.*}}, i64 0 + ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 1 + ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 2 + ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 3 + %dx.imad = call <4 x i16> @llvm.dx.imad.v4i16(<4 x i16> %p0, <4 x i16> %p1, <4 x i16> %p2) + ret <4 x i16> %dx.imad +} + +; Function Attrs: nocallback nofree nosync nounwind willreturn +declare <4 x i16> @llvm.dx.imad.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) #1 + +; Function Attrs: noinline nounwind optnone +define noundef <4 x i32> @imad_int4(<4 x i32> noundef %p0, <4 x i32> noundef %p1, <4 x i32> noundef %p2) #0 { +entry: + ; CHECK: extractelement <4 x i32> %p0, i64 0 + ; CHECK: extractelement <4 x i32> %p1, i64 0 + ; CHECK: extractelement <4 x i32> %p2, i64 0 + ; CHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i32> %p0, i64 1 + ; CHECK: extractelement <4 x i32> %p1, i64 1 + ; CHECK: extractelement <4 x i32> %p2, i64 1 + ; CHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i32> %p0, i64 2 + ; CHECK: extractelement <4 x i32> %p1, i64 2 + ; CHECK: extractelement <4 x i32> %p2, i64 2 + ; CHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i32> %p0, i64 3 + ; CHECK: extractelement <4 x i32> %p1, i64 3 + ; CHECK: extractelement <4 x i32> %p2, i64 3 + ; CHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]] + ; CHECK: insertelement <4 x i32> poison, i32 %{{.*}}, i64 0 + ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 1 + ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 2 + ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 3 + %dx.imad = call <4 x i32> @llvm.dx.imad.v4i32(<4 x i32> %p0, <4 x i32> %p1, <4 x i32> %p2) + ret <4 x i32> %dx.imad +} + +; Function Attrs: nocallback nofree nosync nounwind willreturn +declare <4 x i32> @llvm.dx.imad.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1 + +; Function Attrs: noinline nounwind optnone +define noundef <4 x i64> @imad_int64_t4(<4 x i64> noundef %p0, <4 x i64> noundef %p1, <4 x i64> noundef %p2) #0 { +entry: + ; CHECK: extractelement <4 x i64> %p0, i64 0 + ; CHECK: extractelement <4 x i64> %p1, i64 0 + ; CHECK: extractelement <4 x i64> %p2, i64 0 + ; CHECK: call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i64> %p0, i64 1 + ; CHECK: extractelement <4 x i64> %p1, i64 1 + ; CHECK: extractelement <4 x i64> %p2, i64 1 + ; CHECK: call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i64> %p0, i64 2 + ; CHECK: extractelement <4 x i64> %p1, i64 2 + ; CHECK: extractelement <4 x i64> %p2, i64 2 + ; CHECK: call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i64> %p0, i64 3 + ; CHECK: extractelement <4 x i64> %p1, i64 3 + ; CHECK: extractelement <4 x i64> %p2, i64 3 + ; CHECK: call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]] + ; CHECK: insertelement <4 x i64> poison, i64 %{{.*}}, i64 0 + ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 1 + ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 2 + ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 3 + %dx.imad = call <4 x i64> @llvm.dx.imad.v4i64(<4 x i64> %p0, <4 x i64> %p1, <4 x i64> %p2) + ret <4 x i64> %dx.imad +} + +; Function Attrs: nocallback nofree nosync nounwind willreturn +declare <4 x i64> @llvm.dx.imad.v4i64(<4 x i64>, <4 x i64>, <4 x i64>) #1 + +; CHECK: attributes #[[#ATTR]] = {{{.*}} memory(none) {{.*}}} diff --git a/llvm/test/CodeGen/DirectX/issue-140819_allow_forward_handle_on_alloca.ll b/llvm/test/CodeGen/DirectX/issue-140819_allow_forward_handle_on_alloca.ll new file mode 100644 index 0000000..7c0813b --- /dev/null +++ b/llvm/test/CodeGen/DirectX/issue-140819_allow_forward_handle_on_alloca.ll @@ -0,0 +1,33 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S -dxil-forward-handle-accesses %s | FileCheck %s + +%"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", i32, 1, 0) } +@global = internal unnamed_addr global %"class.hlsl::RWStructuredBuffer" poison, align 4 +@name = private unnamed_addr constant [5 x i8] c"dest\00", align 1 + + +; NOTE: intent of this test is to confirm load target("dx.RawBuffer", i32, 1, 0) +; is replaced with call @llvm.dx.resource.getpointer +define void @CSMain() local_unnamed_addr { +; CHECK-LABEL: define void @CSMain() local_unnamed_addr { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[AGG_TMP_I1_SROA_0:%.*]] = alloca target("dx.RawBuffer", i32, 1, 0), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = tail call target("dx.RawBuffer", i32, 1, 0) @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i32_1_0t(i32 0, i32 3, i32 1, i32 0, i1 false, ptr nonnull @name) +; CHECK-NEXT: store target("dx.RawBuffer", i32, 1, 0) [[TMP0]], ptr @global, align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @global, align 4 +; CHECK-NEXT: store i32 [[TMP2]], ptr [[AGG_TMP_I1_SROA_0]], align 8 +; CHECK-NEXT: [[TMP3:%.*]] = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_i32_1_0t(target("dx.RawBuffer", i32, 1, 0) [[TMP0]], i32 0) +; CHECK-NEXT: store i32 0, ptr [[TMP3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %alloca = alloca target("dx.RawBuffer", i32, 1, 0), align 8 + %handle = tail call target("dx.RawBuffer", i32, 1, 0) @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i32_1_0t(i32 0, i32 3, i32 1, i32 0, i1 false, ptr nonnull @name) + store target("dx.RawBuffer", i32, 1, 0) %handle , ptr @global, align 4 + %val = load i32, ptr @global, align 4 + store i32 %val , ptr %alloca, align 8 + %indirect = load target("dx.RawBuffer", i32, 1, 0), ptr %alloca, align 8 + %buff = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_i32_1_0t(target("dx.RawBuffer", i32, 1, 0) %indirect, i32 0) + store i32 0, ptr %buff, align 4 + ret void +} diff --git a/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.5.ll b/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.5.ll index e485fa2..b1eea30 100644 --- a/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.5.ll +++ b/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.5.ll @@ -11,9 +11,9 @@ define void @test_legal_lifetime() { %accum.i.flat = alloca [1 x i32], align 4 %gep = getelementptr i32, ptr %accum.i.flat, i32 0 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %accum.i.flat) + call void @llvm.lifetime.start.p0(ptr nonnull %accum.i.flat) store i32 0, ptr %gep, align 4 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %accum.i.flat) + call void @llvm.lifetime.end.p0(ptr nonnull %accum.i.flat) ret void } diff --git a/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll b/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll index 77133eb..256fcc0 100644 --- a/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll +++ b/llvm/test/CodeGen/DirectX/legalize-lifetimes-valver-1.6.ll @@ -13,12 +13,12 @@ ; CHECK-NEXT: [[ACCUM_I_FLAT:%.*]] = alloca [1 x i32], align 4 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[ACCUM_I_FLAT]], i32 0 ; CHECK-SM63-NEXT: store [1 x i32] undef, ptr [[ACCUM_I_FLAT]], align 4 -; CHECK-SM66-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]]) +; CHECK-SM66-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ACCUM_I_FLAT]]) ; CHECK-EMBED-NOT: bitcast ; CHECK-EMBED-NOT: lifetime ; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4 ; CHECK-SM63-NEXT: store [1 x i32] undef, ptr [[ACCUM_I_FLAT]], align 4 -; CHECK-SM66-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]]) +; CHECK-SM66-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ACCUM_I_FLAT]]) ; CHECK-EMBED-NOT: bitcast ; CHECK-EMBED-NOT: lifetime ; CHECK-NEXT: ret void @@ -26,9 +26,9 @@ define void @test_legal_lifetime() { %accum.i.flat = alloca [1 x i32], align 4 %gep = getelementptr i32, ptr %accum.i.flat, i32 0 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %accum.i.flat) + call void @llvm.lifetime.start.p0(ptr nonnull %accum.i.flat) store i32 0, ptr %gep, align 4 - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %accum.i.flat) + call void @llvm.lifetime.end.p0(ptr nonnull %accum.i.flat) ret void } diff --git a/llvm/test/CodeGen/DirectX/legalize-memset.ll b/llvm/test/CodeGen/DirectX/legalize-memset.ll index a73e737..ad45ac6 100644 --- a/llvm/test/CodeGen/DirectX/legalize-memset.ll +++ b/llvm/test/CodeGen/DirectX/legalize-memset.ll @@ -5,18 +5,14 @@ define void @replace_float_memset_test() #0 { ; CHECK-LABEL: define void @replace_float_memset_test( ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[ACCUM_I_FLAT:%.*]] = alloca [2 x float], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[ACCUM_I_FLAT]]) ; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x float], ptr [[ACCUM_I_FLAT]], i32 0, i32 0 ; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP]], align 4 ; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [2 x float], ptr [[ACCUM_I_FLAT]], i32 0, i32 1 ; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP1]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[ACCUM_I_FLAT]]) ; CHECK-NEXT: ret void ; %accum.i.flat = alloca [2 x float], align 4 - call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %accum.i.flat) call void @llvm.memset.p0.i32(ptr nonnull align 4 dereferenceable(8) %accum.i.flat, i8 0, i32 8, i1 false) - call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %accum.i.flat) ret void } @@ -24,18 +20,14 @@ define void @replace_half_memset_test() #0 { ; CHECK-LABEL: define void @replace_half_memset_test( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[ACCUM_I_FLAT:%.*]] = alloca [2 x half], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]]) ; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x half], ptr [[ACCUM_I_FLAT]], i32 0, i32 0 ; CHECK-NEXT: store half 0xH0000, ptr [[GEP]], align 2 ; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [2 x half], ptr [[ACCUM_I_FLAT]], i32 0, i32 1 ; CHECK-NEXT: store half 0xH0000, ptr [[GEP1]], align 2 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]]) ; CHECK-NEXT: ret void ; %accum.i.flat = alloca [2 x half], align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %accum.i.flat) call void @llvm.memset.p0.i32(ptr nonnull align 4 dereferenceable(8) %accum.i.flat, i8 0, i32 4, i1 false) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %accum.i.flat) ret void } @@ -43,18 +35,14 @@ define void @replace_double_memset_test() #0 { ; CHECK-LABEL: define void @replace_double_memset_test( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[ACCUM_I_FLAT:%.*]] = alloca [2 x double], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[ACCUM_I_FLAT]]) ; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x double], ptr [[ACCUM_I_FLAT]], i32 0, i32 0 ; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP]], align 8 ; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [2 x double], ptr [[ACCUM_I_FLAT]], i32 0, i32 1 ; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP1]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[ACCUM_I_FLAT]]) ; CHECK-NEXT: ret void ; %accum.i.flat = alloca [2 x double], align 4 - call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %accum.i.flat) call void @llvm.memset.p0.i32(ptr nonnull align 4 dereferenceable(8) %accum.i.flat, i8 0, i32 16, i1 false) - call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %accum.i.flat) ret void } @@ -62,18 +50,14 @@ define void @replace_int16_memset_test() #0 { ; CHECK-LABEL: define void @replace_int16_memset_test( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[CACHE_I:%.*]] = alloca [2 x i16], align 2 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[CACHE_I]]) ; CHECK-NEXT: [[GEP:%.*]] = getelementptr [2 x i16], ptr [[CACHE_I]], i32 0, i32 0 ; CHECK-NEXT: store i16 0, ptr [[GEP]], align 2 ; CHECK-NEXT: [[GEP1:%.*]] = getelementptr [2 x i16], ptr [[CACHE_I]], i32 0, i32 1 ; CHECK-NEXT: store i16 0, ptr [[GEP1]], align 2 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[CACHE_I]]) ; CHECK-NEXT: ret void ; %cache.i = alloca [2 x i16], align 2 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %cache.i) call void @llvm.memset.p0.i32(ptr nonnull align 2 dereferenceable(4) %cache.i, i8 0, i32 4, i1 false) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %cache.i) ret void } @@ -81,16 +65,12 @@ define void @replace_int_memset_test() #0 { ; CHECK-LABEL: define void @replace_int_memset_test( ; CHECK-SAME: ) #[[ATTR0]] { ; CHECK-NEXT: [[ACCUM_I_FLAT:%.*]] = alloca [1 x i32], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]]) ; CHECK-NEXT: [[GEP:%.*]] = getelementptr [1 x i32], ptr [[ACCUM_I_FLAT]], i32 0, i32 0 ; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]]) ; CHECK-NEXT: ret void ; %accum.i.flat = alloca [1 x i32], align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %accum.i.flat) call void @llvm.memset.p0.i32(ptr nonnull align 4 dereferenceable(8) %accum.i.flat, i8 0, i32 4, i1 false) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %accum.i.flat) ret void } @@ -101,25 +81,19 @@ define void @replace_int_memset_to_var_test() #0 { ; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 ; CHECK-NEXT: store i32 1, ptr [[I]], align 4 ; CHECK-NEXT: [[I8_LOAD:%.*]] = load i32, ptr [[I]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]]) ; CHECK-NEXT: [[GEP:%.*]] = getelementptr [1 x i32], ptr [[ACCUM_I_FLAT]], i32 0, i32 0 ; CHECK-NEXT: store i32 [[I8_LOAD]], ptr [[GEP]], align 4 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[ACCUM_I_FLAT]]) ; CHECK-NEXT: ret void ; %accum.i.flat = alloca [1 x i32], align 4 %i = alloca i8, align 4 store i8 1, ptr %i %i8.load = load i8, ptr %i - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %accum.i.flat) call void @llvm.memset.p0.i32(ptr nonnull align 4 dereferenceable(8) %accum.i.flat, i8 %i8.load, i32 4, i1 false) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %accum.i.flat) ret void } attributes #0 = {"hlsl.export"} -declare void @llvm.lifetime.end.p0(i64 immarg, ptr captures(none)) -declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none)) declare void @llvm.memset.p0.i32(ptr writeonly captures(none), i8, i32, i1 immarg) diff --git a/llvm/test/CodeGen/DirectX/umad.ll b/llvm/test/CodeGen/DirectX/umad.ll index 104d238..76516a2 100644 --- a/llvm/test/CodeGen/DirectX/umad.ll +++ b/llvm/test/CodeGen/DirectX/umad.ll @@ -1,17 +1,13 @@ -; RUN: opt -S -dxil-op-lower < %s | FileCheck %s +; RUN: opt -S -scalarizer -dxil-op-lower < %s | FileCheck %s ; Make sure dxil operation function calls for round are generated for float and half. -; CHECK:call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR:]] -; CHECK:call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]] -; CHECK:call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]] - -; CHECK: attributes #[[#ATTR]] = {{{.*}} memory(none) {{.*}}} target datalayout = "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:32-f64:64-n8:16:32:64" target triple = "dxil-pc-shadermodel6.7-library" ; Function Attrs: noinline nounwind optnone define noundef i16 @umad_ushort(i16 noundef %p0, i16 noundef %p1, i16 noundef %p2) #0 { entry: + ; CHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR:]] %p2.addr = alloca i16, align 2 %p1.addr = alloca i16, align 2 %p0.addr = alloca i16, align 2 @@ -31,6 +27,7 @@ declare i16 @llvm.dx.umad.i16(i16, i16, i16) #1 ; Function Attrs: noinline nounwind optnone define noundef i32 @umad_uint(i32 noundef %p0, i32 noundef %p1, i32 noundef %p2) #0 { entry: + ; CHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]] %p2.addr = alloca i32, align 4 %p1.addr = alloca i32, align 4 %p0.addr = alloca i32, align 4 @@ -50,6 +47,7 @@ declare i32 @llvm.dx.umad.i32(i32, i32, i32) #1 ; Function Attrs: noinline nounwind optnone define noundef i64 @umad_uint64(i64 noundef %p0, i64 noundef %p1, i64 noundef %p2) #0 { entry: + ; CHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]] %p2.addr = alloca i64, align 8 %p1.addr = alloca i64, align 8 %p0.addr = alloca i64, align 8 @@ -65,3 +63,95 @@ entry: ; Function Attrs: nocallback nofree nosync nounwind willreturn declare i64 @llvm.dx.umad.i64(i64, i64, i64) #1 + +; Function Attrs: noinline nounwind optnone +define noundef <4 x i16> @umad_uint16_t4(<4 x i16> noundef %p0, <4 x i16> noundef %p1, <4 x i16> noundef %p2) #0 { +entry: + ; CHECK: extractelement <4 x i16> %p0, i64 0 + ; CHECK: extractelement <4 x i16> %p1, i64 0 + ; CHECK: extractelement <4 x i16> %p2, i64 0 + ; CHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i16> %p0, i64 1 + ; CHECK: extractelement <4 x i16> %p1, i64 1 + ; CHECK: extractelement <4 x i16> %p2, i64 1 + ; CHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i16> %p0, i64 2 + ; CHECK: extractelement <4 x i16> %p1, i64 2 + ; CHECK: extractelement <4 x i16> %p2, i64 2 + ; CHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i16> %p0, i64 3 + ; CHECK: extractelement <4 x i16> %p1, i64 3 + ; CHECK: extractelement <4 x i16> %p2, i64 3 + ; CHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]] + ; CHECK: insertelement <4 x i16> poison, i16 %{{.*}}, i64 0 + ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 1 + ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 2 + ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 3 + %dx.umad = call <4 x i16> @llvm.dx.umad.v4i16(<4 x i16> %p0, <4 x i16> %p1, <4 x i16> %p2) + ret <4 x i16> %dx.umad +} + +; Function Attrs: nocallback nofree nosync nounwind willreturn +declare <4 x i16> @llvm.dx.umad.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) #1 + +; Function Attrs: noinline nounwind optnone +define noundef <4 x i32> @umad_uint4(<4 x i32> noundef %p0, <4 x i32> noundef %p1, <4 x i32> noundef %p2) #0 { +entry: + ; CHECK: extractelement <4 x i32> %p0, i64 0 + ; CHECK: extractelement <4 x i32> %p1, i64 0 + ; CHECK: extractelement <4 x i32> %p2, i64 0 + ; CHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i32> %p0, i64 1 + ; CHECK: extractelement <4 x i32> %p1, i64 1 + ; CHECK: extractelement <4 x i32> %p2, i64 1 + ; CHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i32> %p0, i64 2 + ; CHECK: extractelement <4 x i32> %p1, i64 2 + ; CHECK: extractelement <4 x i32> %p2, i64 2 + ; CHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i32> %p0, i64 3 + ; CHECK: extractelement <4 x i32> %p1, i64 3 + ; CHECK: extractelement <4 x i32> %p2, i64 3 + ; CHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]] + ; CHECK: insertelement <4 x i32> poison, i32 %{{.*}}, i64 0 + ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 1 + ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 2 + ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 3 + %dx.umad = call <4 x i32> @llvm.dx.umad.v4i32(<4 x i32> %p0, <4 x i32> %p1, <4 x i32> %p2) + ret <4 x i32> %dx.umad +} + +; Function Attrs: nocallback nofree nosync nounwind willreturn +declare <4 x i32> @llvm.dx.umad.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1 + +; Function Attrs: noinline nounwind optnone +define noundef <4 x i64> @umad_uint64_t4(<4 x i64> noundef %p0, <4 x i64> noundef %p1, <4 x i64> noundef %p2) #0 { +entry: + ; CHECK: extractelement <4 x i64> %p0, i64 0 + ; CHECK: extractelement <4 x i64> %p1, i64 0 + ; CHECK: extractelement <4 x i64> %p2, i64 0 + ; CHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i64> %p0, i64 1 + ; CHECK: extractelement <4 x i64> %p1, i64 1 + ; CHECK: extractelement <4 x i64> %p2, i64 1 + ; CHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i64> %p0, i64 2 + ; CHECK: extractelement <4 x i64> %p1, i64 2 + ; CHECK: extractelement <4 x i64> %p2, i64 2 + ; CHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]] + ; CHECK: extractelement <4 x i64> %p0, i64 3 + ; CHECK: extractelement <4 x i64> %p1, i64 3 + ; CHECK: extractelement <4 x i64> %p2, i64 3 + ; CHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]] + ; CHECK: insertelement <4 x i64> poison, i64 %{{.*}}, i64 0 + ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 1 + ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 2 + ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 3 + %dx.umad = call <4 x i64> @llvm.dx.umad.v4i64(<4 x i64> %p0, <4 x i64> %p1, <4 x i64> %p2) + ret <4 x i64> %dx.umad +} + +; Function Attrs: nocallback nofree nosync nounwind willreturn +declare <4 x i64> @llvm.dx.umad.v4i64(<4 x i64>, <4 x i64>, <4 x i64>) #1 + +; CHECK: attributes #[[#ATTR]] = {{{.*}} memory(none) {{.*}}} diff --git a/llvm/test/CodeGen/Generic/half.ll b/llvm/test/CodeGen/Generic/half.ll new file mode 100644 index 0000000..f4ea5b5 --- /dev/null +++ b/llvm/test/CodeGen/Generic/half.ll @@ -0,0 +1,87 @@ +; Simple cross-platform smoke checks for basic f16 operations. +; +; There shouldn't be any architectures that crash when trying to use `half`; +; check that here. Additionally do a small handful of smoke tests that work +; well cross-platform. + +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-apple-darwin | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; FIXME(#94434) unsupported on arm64ec +; RUN: %if aarch64-registered-target %{ ! llc %s -o - -mtriple=arm64ec-pc-windows-msvc -filetype=null %} +; RUN: %if amdgpu-registered-target %{ llc %s -o - -mtriple=amdgcn-amd-amdhsa | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if arc-registered-target %{ llc %s -o - -mtriple=arc-elf | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=arm-unknown-linux-gnueabi | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=thumbv7em-none-eabi | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if avr-registered-target %{ llc %s -o - -mtriple=avr-none | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if bpf-registered-target %{ llc %s -o - -mtriple=bpfel | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 -mcpu=ck860fv -mattr=+hard-float | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if directx-registered-target %{ llc %s -o - -mtriple=dxil-pc-shadermodel6.3-library | FileCheck %s --check-prefixes=NOCRASH %} +; RUN: %if hexagon-registered-target %{ llc %s -o - -mtriple=hexagon-unknown-linux-musl | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if lanai-registered-target %{ llc %s -o - -mtriple=lanai-unknown-unknown | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu -mattr=+f | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if m68k-registered-target %{ llc %s -o - -mtriple=m68k-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips64-unknown-linux-gnuabi64 | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips64el-unknown-linux-gnuabi64 | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mipsel-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if msp430-registered-target %{ llc %s -o - -mtriple=msp430-none-elf | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if nvptx-registered-target %{ llc %s -o - -mtriple=nvptx64-nvidia-cuda | FileCheck %s --check-prefixes=NOCRASH %} +; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if sparc-registered-target %{ llc %s -o - -mtriple=sparc-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if sparc-registered-target %{ llc %s -o - -mtriple=sparc64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if spirv-registered-target %{ llc %s -o - -mtriple=spirv-unknown-unknown | FileCheck %s --check-prefixes=NOCRASH %} +; RUN: %if systemz-registered-target %{ llc %s -o - -mtriple=s390x-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if ve-registered-target %{ llc %s -o - -mtriple=ve-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if webassembly-registered-target %{ llc %s -o - -mtriple=wasm32-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD %} +; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if xcore-registered-target %{ llc %s -o - -mtriple=xcore-unknown-unknown | FileCheck %s --check-prefixes=ALL,CHECK %} +; RUN: %if xtensa-registered-target %{ llc %s -o - -mtriple=xtensa-none-elf | FileCheck %s --check-prefixes=ALL,CHECK %} + +; Codegen tests don't work the same for graphics targets. Add a dummy directive +; for filecheck, just make sure we don't crash. +; NOCRASH: {{.*}} + +; All backends need to be able to bitcast without converting to another format, +; so we assert against __extendhfsf2, __truncsfhf2, __gnu_{h2f,f2h}_ieee. This +; doesn't catch issues on platforms with hardware f32<->f16, but those tend to +; work better anyway. +; Regression test for https://github.com/llvm/llvm-project/issues/97981. + +define half @from_bits(i16 %bits) nounwind { +; ALL-LABEL: from_bits: +; CHECK-NOT: __extend +; CHECK-NOT: __trunc +; CHECK-NOT: __gnu +; BAD: __extendhfsf2 + %f = bitcast i16 %bits to half + ret half %f +} + +define i16 @to_bits(half %f) nounwind { +; ALL-LABEL: to_bits: +; CHECK-NOT: __extend +; CHECK-NOT: __trunc +; CHECK-NOT: __gnu +; BAD: __truncsfhf2 + %bits = bitcast half %f to i16 + ret i16 %bits +} + +; Some platforms have had problems freezing. Regression test for +; https://github.com/llvm/llvm-project/issues/117337 and similar issues. + +define half @check_freeze(half %f) nounwind { +; ALL-LABEL: check_freeze: + %t0 = freeze half %f + ret half %t0 +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll b/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll index 0e172950..fed0858 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll @@ -17,14 +17,25 @@ define <32 x i8> @shuffle_v32i8(<32 x i8> %a) { ret <32 x i8> %shuffle } +define <32 x i8> @shuffle_v32i8_same_lane(<32 x i8> %a) { +; CHECK-LABEL: shuffle_v32i8_same_lane: +; CHECK: # %bb.0: +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0) +; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI1_0) +; CHECK-NEXT: xvshuf.h $xr1, $xr0, $xr0 +; CHECK-NEXT: xvori.b $xr0, $xr1, 0 +; CHECK-NEXT: ret + %shuffle = shufflevector <32 x i8> %a, <32 x i8> poison, <32 x i32> <i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31> + ret <32 x i8> %shuffle +} define <16 x i16> @shuffle_v16i16(<16 x i16> %a) { ; CHECK-LABEL: shuffle_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0) -; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI1_0) -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_1) -; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI1_1) +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_0) +; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI2_0) +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_1) +; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI2_1) ; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78 ; CHECK-NEXT: xvshuf.d $xr2, $xr0, $xr3 ; CHECK-NEXT: xvshuf.w $xr1, $xr2, $xr0 @@ -34,13 +45,25 @@ define <16 x i16> @shuffle_v16i16(<16 x i16> %a) { ret <16 x i16> %shuffle } +define <16 x i16> @shuffle_v16i16_same_lane(<16 x i16> %a) { +; CHECK-LABEL: shuffle_v16i16_same_lane: +; CHECK: # %bb.0: +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0) +; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI3_0) +; CHECK-NEXT: xvshuf.h $xr1, $xr0, $xr0 +; CHECK-NEXT: xvori.b $xr0, $xr1, 0 +; CHECK-NEXT: ret + %shuffle = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> <i32 6, i32 7, i32 0, i32 5, i32 2, i32 3, i32 6, i32 5, i32 8, i32 9, i32 10, i32 13, i32 12, i32 15, i32 13, i32 15> + ret <16 x i16> %shuffle +} + define <8 x i32> @shuffle_v8i32(<8 x i32> %a) { ; CHECK-LABEL: shuffle_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_0) -; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI2_0) -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_1) -; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI2_1) +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI4_0) +; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI4_0) +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI4_1) +; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI4_1) ; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78 ; CHECK-NEXT: xvshuf.d $xr2, $xr0, $xr3 ; CHECK-NEXT: xvshuf.d $xr1, $xr2, $xr0 @@ -50,13 +73,25 @@ define <8 x i32> @shuffle_v8i32(<8 x i32> %a) { ret <8 x i32> %shuffle } +define <8 x i32> @shuffle_v8i32_same_lane(<8 x i32> %a) { +; CHECK-LABEL: shuffle_v8i32_same_lane: +; CHECK: # %bb.0: +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0) +; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI5_0) +; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr0 +; CHECK-NEXT: xvori.b $xr0, $xr1, 0 +; CHECK-NEXT: ret + %shuffle = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 4, i32 5, i32 6, i32 7> + ret <8 x i32> %shuffle +} + define <4 x i64> @shuffle_v4i64(<4 x i64> %a) { ; CHECK-LABEL: shuffle_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0) -; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI3_0) -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_1) -; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI3_1) +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0) +; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI6_0) +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_1) +; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI6_1) ; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78 ; CHECK-NEXT: xvshuf.d $xr2, $xr0, $xr3 ; CHECK-NEXT: xvshuf.d $xr1, $xr2, $xr0 @@ -66,13 +101,25 @@ define <4 x i64> @shuffle_v4i64(<4 x i64> %a) { ret <4 x i64> %shuffle } +define <4 x i64> @shuffle_v4i64_same_lane(<4 x i64> %a) { +; CHECK-LABEL: shuffle_v4i64_same_lane: +; CHECK: # %bb.0: +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI7_0) +; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI7_0) +; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr0 +; CHECK-NEXT: xvori.b $xr0, $xr1, 0 +; CHECK-NEXT: ret + %shuffle = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> <i32 1, i32 0, i32 2, i32 3> + ret <4 x i64> %shuffle +} + define <8 x float> @shuffle_v8f32(<8 x float> %a) { ; CHECK-LABEL: shuffle_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI4_0) -; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI4_0) -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI4_1) -; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI4_1) +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI8_0) +; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI8_0) +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI8_1) +; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI8_1) ; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78 ; CHECK-NEXT: xvshuf.d $xr2, $xr0, $xr3 ; CHECK-NEXT: xvshuf.d $xr1, $xr2, $xr0 @@ -82,13 +129,26 @@ define <8 x float> @shuffle_v8f32(<8 x float> %a) { ret <8 x float> %shuffle } +define <8 x float> @shuffle_v8f32_same_lane(<8 x float> %a) { +; CHECK-LABEL: shuffle_v8f32_same_lane: +; CHECK: # %bb.0: +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI9_0) +; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI9_0) +; CHECK-NEXT: xvpermi.d $xr0, $xr0, 68 +; CHECK-NEXT: xvshuf.w $xr1, $xr0, $xr0 +; CHECK-NEXT: xvori.b $xr0, $xr1, 0 +; CHECK-NEXT: ret + %shuffle = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> <i32 3, i32 2, i32 0, i32 2, i32 3, i32 1, i32 2, i32 3> + ret <8 x float> %shuffle +} + define <4 x double> @shuffle_v4f64(<4 x double> %a) { ; CHECK-LABEL: shuffle_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0) -; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI5_0) -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_1) -; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI5_1) +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI10_0) +; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI10_0) +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI10_1) +; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI10_1) ; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78 ; CHECK-NEXT: xvshuf.d $xr2, $xr0, $xr3 ; CHECK-NEXT: xvshuf.d $xr1, $xr2, $xr0 @@ -97,3 +157,16 @@ define <4 x double> @shuffle_v4f64(<4 x double> %a) { %shuffle = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> <i32 3, i32 1, i32 2, i32 0> ret <4 x double> %shuffle } + +define <4 x double> @shuffle_v4f64_same_lane(<4 x double> %a) { +; CHECK-LABEL: shuffle_v4f64_same_lane: +; CHECK: # %bb.0: +; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_0) +; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI11_0) +; CHECK-NEXT: xvpermi.d $xr0, $xr0, 78 +; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr0 +; CHECK-NEXT: xvori.b $xr0, $xr1, 0 +; CHECK-NEXT: ret + %shuffle = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> <i32 3, i32 2, i32 0, i32 1> + ret <4 x double> %shuffle +} diff --git a/llvm/test/CodeGen/MIR/AMDGPU/long-branch-reg-all-sgpr-used.ll b/llvm/test/CodeGen/MIR/AMDGPU/long-branch-reg-all-sgpr-used.ll index 278cf01..929db4c 100644 --- a/llvm/test/CodeGen/MIR/AMDGPU/long-branch-reg-all-sgpr-used.ll +++ b/llvm/test/CodeGen/MIR/AMDGPU/long-branch-reg-all-sgpr-used.ll @@ -17,6 +17,8 @@ ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 0 +; CHECK-NEXT: numWaveDispatchVGPRs: 0 ; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99' ; CHECK-NEXT: frameOffsetReg: '$fp_reg' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' @@ -287,6 +289,8 @@ ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 0 +; CHECK-NEXT: numWaveDispatchVGPRs: 0 ; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99' ; CHECK-NEXT: frameOffsetReg: '$fp_reg' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' diff --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-after-pei.ll b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-after-pei.ll index 890ea44..f054bea 100644 --- a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-after-pei.ll +++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-after-pei.ll @@ -16,6 +16,8 @@ ; AFTER-PEI-NEXT: waveLimiter: false ; AFTER-PEI-NEXT: hasSpilledSGPRs: true ; AFTER-PEI-NEXT: hasSpilledVGPRs: false +; AFTER-PEI-NEXT: numWaveDispatchSGPRs: 0 +; AFTER-PEI-NEXT: numWaveDispatchVGPRs: 0 ; AFTER-PEI-NEXT: scratchRSrcReg: '$sgpr68_sgpr69_sgpr70_sgpr71' ; AFTER-PEI-NEXT: frameOffsetReg: '$fp_reg' ; AFTER-PEI-NEXT: stackPtrOffsetReg: '$sgpr32' diff --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg-debug.ll b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg-debug.ll index f84ef8a..924216e 100644 --- a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg-debug.ll +++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg-debug.ll @@ -17,6 +17,8 @@ ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 0 +; CHECK-NEXT: numWaveDispatchVGPRs: 0 ; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99' ; CHECK-NEXT: frameOffsetReg: '$fp_reg' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' diff --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg.ll b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg.ll index cc834d0..39f1ddd 100644 --- a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg.ll +++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-long-branch-reg.ll @@ -17,6 +17,8 @@ ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 0 +; CHECK-NEXT: numWaveDispatchVGPRs: 0 ; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99' ; CHECK-NEXT: frameOffsetReg: '$fp_reg' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' diff --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-no-ir.mir b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-no-ir.mir index 06c580e..0cb9bc0 100644 --- a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-no-ir.mir +++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info-no-ir.mir @@ -17,6 +17,8 @@ # FULL-NEXT: waveLimiter: true # FULL-NEXT: hasSpilledSGPRs: false # FULL-NEXT: hasSpilledVGPRs: false +# FULL-NEXT: numWaveDispatchSGPRs: 0 +# FULL-NEXT: numWaveDispatchVGPRs: 0 # FULL-NEXT: scratchRSrcReg: '$sgpr8_sgpr9_sgpr10_sgpr11' # FULL-NEXT: frameOffsetReg: '$sgpr12' # FULL-NEXT: stackPtrOffsetReg: '$sgpr13' @@ -127,6 +129,8 @@ body: | # FULL-NEXT: waveLimiter: false # FULL-NEXT: hasSpilledSGPRs: false # FULL-NEXT: hasSpilledVGPRs: false +# FULL-NEXT: numWaveDispatchSGPRs: 0 +# FULL-NEXT: numWaveDispatchVGPRs: 0 # FULL-NEXT: scratchRSrcReg: '$private_rsrc_reg' # FULL-NEXT: frameOffsetReg: '$fp_reg' # FULL-NEXT: stackPtrOffsetReg: '$sp_reg' @@ -206,6 +210,8 @@ body: | # FULL-NEXT: waveLimiter: false # FULL-NEXT: hasSpilledSGPRs: false # FULL-NEXT: hasSpilledVGPRs: false +# FULL-NEXT: numWaveDispatchSGPRs: 0 +# FULL-NEXT: numWaveDispatchVGPRs: 0 # FULL-NEXT: scratchRSrcReg: '$private_rsrc_reg' # FULL-NEXT: frameOffsetReg: '$fp_reg' # FULL-NEXT: stackPtrOffsetReg: '$sp_reg' @@ -286,6 +292,8 @@ body: | # FULL-NEXT: waveLimiter: false # FULL-NEXT: hasSpilledSGPRs: false # FULL-NEXT: hasSpilledVGPRs: false +# FULL-NEXT: numWaveDispatchSGPRs: 0 +# FULL-NEXT: numWaveDispatchVGPRs: 0 # FULL-NEXT: scratchRSrcReg: '$private_rsrc_reg' # FULL-NEXT: frameOffsetReg: '$fp_reg' # FULL-NEXT: stackPtrOffsetReg: '$sp_reg' diff --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info.ll b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info.ll index 4271546..ab4383b 100644 --- a/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info.ll +++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-function-info.ll @@ -20,6 +20,8 @@ ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 0 +; CHECK-NEXT: numWaveDispatchVGPRs: 0 ; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99' ; CHECK-NEXT: frameOffsetReg: '$fp_reg' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' @@ -80,6 +82,8 @@ define amdgpu_kernel void @kernel(i32 %arg0, i64 %arg1, <16 x i32> %arg2) { ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 3 +; CHECK-NEXT: numWaveDispatchVGPRs: 1 ; CHECK-NEXT: scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99' ; CHECK-NEXT: frameOffsetReg: '$fp_reg' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' @@ -144,6 +148,8 @@ define amdgpu_ps void @gds_size_shader(i32 %arg0, i32 inreg %arg1) #5 { ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 16 +; CHECK-NEXT: numWaveDispatchVGPRs: 0 ; CHECK-NEXT: scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3' ; CHECK-NEXT: frameOffsetReg: '$sgpr33' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' @@ -200,6 +206,8 @@ define void @function() { ; CHECK-NEXT: waveLimiter: false ; CHECK-NEXT: hasSpilledSGPRs: false ; CHECK-NEXT: hasSpilledVGPRs: false +; CHECK-NEXT: numWaveDispatchSGPRs: 16 +; CHECK-NEXT: numWaveDispatchVGPRs: 0 ; CHECK-NEXT: scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3' ; CHECK-NEXT: frameOffsetReg: '$sgpr33' ; CHECK-NEXT: stackPtrOffsetReg: '$sgpr32' diff --git a/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll b/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll index 4265553..9c564ff 100644 --- a/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll +++ b/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll @@ -44,8 +44,8 @@ declare void @bar(ptr) define void @foo() { %p = alloca i32 - call void @llvm.lifetime.start(i64 4, ptr %p) + call void @llvm.lifetime.start(ptr %p) call void @bar(ptr %p) - call void @llvm.lifetime.end(i64 4, ptr %p) + call void @llvm.lifetime.end(ptr %p) ret void } diff --git a/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll b/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll new file mode 100644 index 0000000..3efe9be --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll @@ -0,0 +1,80 @@ +; RUN: opt < %s -S -passes=infer-address-spaces | FileCheck %s --check-prefix=INFER
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80 | FileCheck %s --check-prefix=PTX
+; RUN: %if ptxas-12.3 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80 | %ptxas-verify -arch=sm_90 %}
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
+target triple = "nvptx64-unknown-unknown"
+
+@constant_tensormap = addrspace(4) global [64 x i8] zeroinitializer, align 64
+
+; Inference from const address space
+define void @test_infer_const_from_cast() {
+; INFER-LABEL: @test_infer_const_from_cast
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap)
+; BOTH: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap)
+; PTX-LABEL: .visible .func test_infer_const_from_cast(
+; PTX: mov.b64 %rd{{[0-9]+}}, constant_tensormap;
+; PTX: cvta.const.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}};
+; PTX: prefetch.tensormap [%rd{{[0-9]+}}];
+entry:
+ %casted = addrspacecast ptr addrspace(4) @constant_tensormap to ptr
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %casted)
+ ret void
+}
+
+; Cast from Const space to Generic
+define void @test_const_to_generic_cast(ptr addrspace(4) %const_ptr) {
+; INFER-LABEL: @test_const_to_generic_cast
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) %const_ptr)
+; PTX-LABEL: .visible .func test_const_to_generic_cast(
+; PTX: prefetch.const.tensormap [%rd{{[0-9]+}}];
+entry:
+ %cast = addrspacecast ptr addrspace(4) %const_ptr to ptr
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %cast)
+ ret void
+}
+
+; No inference possible
+define void @test_no_inference_possible(ptr %generic_ptr) {
+; INFER-LABEL: @test_no_inference_possible
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p0(ptr %generic_ptr)
+; PTX-LABEL: .visible .func test_no_inference_possible(
+; PTX: prefetch.tensormap [%rd{{[0-9]+}}];
+entry:
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %generic_ptr)
+ ret void
+}
+
+; Cast from Parameter space to Generic
+define void @test_param_to_generic_cast(ptr addrspace(101) %param_ptr) {
+; INFER-LABEL: @test_param_to_generic_cast
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
+; PTX-LABEL: .visible .func test_param_to_generic_cast(
+; PTX: prefetch.param.tensormap [%rd{{[0-9]+}}];
+entry:
+ %cast = addrspacecast ptr addrspace(101) %param_ptr to ptr
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %cast)
+ ret void
+}
+
+; Multiple casts in sequence
+define void @test_infer_through_multiple_casts() {
+; INFER-LABEL: @test_infer_through_multiple_casts
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap)
+; PTX-LABEL: .visible .func test_infer_through_multiple_casts(
+; PTX: mov.b64 %rd{{[0-9]+}}, constant_tensormap;
+; PTX: cvta.const.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}};
+; PTX: prefetch.tensormap [%rd{{[0-9]+}}];
+entry:
+ %cast1 = addrspacecast ptr addrspace(4) @constant_tensormap to ptr
+ %cast2 = addrspacecast ptr %cast1 to ptr addrspace(4)
+ %cast3 = addrspacecast ptr addrspace(4) %cast2 to ptr
+ call void @llvm.nvvm.prefetch.tensormap(ptr %cast3)
+ ret void
+}
+
+declare void @llvm.nvvm.prefetch.tensormap.p0(ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4))
+declare void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101))
+
+
diff --git a/llvm/test/CodeGen/NVPTX/prefetch.ll b/llvm/test/CodeGen/NVPTX/prefetch.ll index a64e4fe..862e26d 100644 --- a/llvm/test/CodeGen/NVPTX/prefetch.ll +++ b/llvm/test/CodeGen/NVPTX/prefetch.ll @@ -12,6 +12,10 @@ declare void @llvm.nvvm.prefetch.local.L2(ptr addrspace(5) %local_ptr) declare void @llvm.nvvm.prefetch.L1(ptr %ptr)
declare void @llvm.nvvm.prefetch.L2(ptr %ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p0(ptr %ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) %const_ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
+
declare void @llvm.nvvm.prefetch.global.L2.evict.normal(ptr addrspace(1) %global_ptr)
declare void @llvm.nvvm.prefetch.global.L2.evict.last(ptr addrspace(1) %global_ptr)
@@ -78,4 +82,43 @@ define void @prefetchu_l1(ptr %ptr) { ; CHECK-PTX64-NEXT: ret;
tail call void @llvm.nvvm.prefetchu.L1(ptr %ptr)
ret void
+}
+
+define void @prefetch_tensormap(ptr %ptr) {
+; CHECK-PTX64-LABEL: prefetch_tensormap(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_tensormap_param_0];
+; CHECK-PTX64-NEXT: prefetch.tensormap [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.tensormap.p0(ptr %ptr)
+ ret void
+}
+
+define void @prefetch_const_tensormap(ptr addrspace(4) %const_ptr) {
+; CHECK-PTX64-LABEL: prefetch_const_tensormap(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_const_tensormap_param_0];
+; CHECK-PTX64-NEXT: prefetch.const.tensormap [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) %const_ptr)
+ ret void
+}
+
+define void @prefetch_param_tensormap(ptr addrspace(101) %param_ptr) {
+; CHECK-PTX64-LABEL: prefetch_param_tensormap(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_param_tensormap_param_0];
+; CHECK-PTX64-NEXT: prefetch.param.tensormap [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
+ ret void
}
\ No newline at end of file diff --git a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll index 92cb51b..94c2637 100644 --- a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll +++ b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll @@ -2,19 +2,18 @@ ; RUN: llc < %s -mcpu=sm_80 -mattr=+ptx70 -O0 \ ; RUN: -disable-post-ra -verify-machineinstrs \ ; RUN: | FileCheck -check-prefixes CHECK,CHECK-SM80 %s -; RUN: %if ptxas-12.8 %{ llc < %s -mcpu=sm_80 -mattr=+ptx70 -O0 \ +; RUN: %if ptxas-12.9 %{ llc < %s -mcpu=sm_80 -mattr=+ptx70 -O0 \ ; RUN: -disable-post-ra -verify-machineinstrs \ ; RUN: | %ptxas-verify -arch=sm_80 %} -; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx87 -O0 \ +; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx88 -O0 \ ; RUN: -disable-post-ra -verify-machineinstrs \ ; RUN: | FileCheck -check-prefixes CHECK,CHECK-SM100 %s -; RUN: %if ptxas-12.8 %{ llc < %s -mcpu=sm_100 -mattr=+ptx87 -O0 \ +; RUN: %if ptxas-12.9 %{ llc < %s -mcpu=sm_100 -mattr=+ptx88 -O0 \ ; RUN: -disable-post-ra -verify-machineinstrs \ ; RUN: | %ptxas-verify -arch=sm_100 %} target triple = "nvptx64-nvidia-cuda" target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" -; Check straight line reduction. define half @reduce_fadd_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fadd_half( ; CHECK: { @@ -43,45 +42,22 @@ define half @reduce_fadd_half(<8 x half> %in) { } define half @reduce_fadd_half_reassoc(<8 x half> %in) { -; CHECK-SM80-LABEL: reduce_fadd_half_reassoc( -; CHECK-SM80: { -; CHECK-SM80-NEXT: .reg .b16 %rs<6>; -; CHECK-SM80-NEXT: .reg .b32 %r<10>; -; CHECK-SM80-EMPTY: -; CHECK-SM80-NEXT: // %bb.0: -; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0]; -; CHECK-SM80-NEXT: add.rn.f16x2 %r5, %r2, %r4; -; CHECK-SM80-NEXT: add.rn.f16x2 %r6, %r1, %r3; -; CHECK-SM80-NEXT: add.rn.f16x2 %r7, %r6, %r5; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; CHECK-SM80-NEXT: // implicit-def: %rs2 -; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM80-NEXT: add.rn.f16x2 %r9, %r7, %r8; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; } -; CHECK-SM80-NEXT: mov.b16 %rs4, 0x0000; -; CHECK-SM80-NEXT: add.rn.f16 %rs5, %rs3, %rs4; -; CHECK-SM80-NEXT: st.param.b16 [func_retval0], %rs5; -; CHECK-SM80-NEXT: ret; -; -; CHECK-SM100-LABEL: reduce_fadd_half_reassoc( -; CHECK-SM100: { -; CHECK-SM100-NEXT: .reg .b16 %rs<6>; -; CHECK-SM100-NEXT: .reg .b32 %r<10>; -; CHECK-SM100-EMPTY: -; CHECK-SM100-NEXT: // %bb.0: -; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0]; -; CHECK-SM100-NEXT: add.rn.f16x2 %r5, %r2, %r4; -; CHECK-SM100-NEXT: add.rn.f16x2 %r6, %r1, %r3; -; CHECK-SM100-NEXT: add.rn.f16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: add.rn.f16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: mov.b16 %rs4, 0x0000; -; CHECK-SM100-NEXT: add.rn.f16 %rs5, %rs3, %rs4; -; CHECK-SM100-NEXT: st.param.b16 [func_retval0], %rs5; -; CHECK-SM100-NEXT: ret; +; CHECK-LABEL: reduce_fadd_half_reassoc( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<6>; +; CHECK-NEXT: .reg .b32 %r<8>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0]; +; CHECK-NEXT: add.rn.f16x2 %r5, %r2, %r4; +; CHECK-NEXT: add.rn.f16x2 %r6, %r1, %r3; +; CHECK-NEXT: add.rn.f16x2 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: add.rn.f16 %rs3, %rs1, %rs2; +; CHECK-NEXT: mov.b16 %rs4, 0x0000; +; CHECK-NEXT: add.rn.f16 %rs5, %rs3, %rs4; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs5; +; CHECK-NEXT: ret; %res = call reassoc half @llvm.vector.reduce.fadd(half 0.0, <8 x half> %in) ret half %res } @@ -109,7 +85,6 @@ define half @reduce_fadd_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. define float @reduce_fadd_float(<8 x float> %in) { ; CHECK-LABEL: reduce_fadd_float( ; CHECK: { @@ -148,15 +123,15 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) { ; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fadd_float_reassoc_param_0]; ; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd4; ; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-SM80-NEXT: add.rn.f32 %r5, %r3, %r1; +; CHECK-SM80-NEXT: add.rn.f32 %r5, %r4, %r2; ; CHECK-SM80-NEXT: mov.b64 {%r6, %r7}, %rd3; ; CHECK-SM80-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-SM80-NEXT: add.rn.f32 %r10, %r8, %r6; -; CHECK-SM80-NEXT: add.rn.f32 %r11, %r4, %r2; -; CHECK-SM80-NEXT: add.rn.f32 %r12, %r9, %r7; -; CHECK-SM80-NEXT: add.rn.f32 %r13, %r12, %r11; -; CHECK-SM80-NEXT: add.rn.f32 %r14, %r10, %r5; -; CHECK-SM80-NEXT: add.rn.f32 %r15, %r14, %r13; +; CHECK-SM80-NEXT: add.rn.f32 %r10, %r9, %r7; +; CHECK-SM80-NEXT: add.rn.f32 %r11, %r10, %r5; +; CHECK-SM80-NEXT: add.rn.f32 %r12, %r3, %r1; +; CHECK-SM80-NEXT: add.rn.f32 %r13, %r8, %r6; +; CHECK-SM80-NEXT: add.rn.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: add.rn.f32 %r15, %r14, %r11; ; CHECK-SM80-NEXT: add.rn.f32 %r16, %r15, 0f00000000; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r16; ; CHECK-SM80-NEXT: ret; @@ -164,7 +139,7 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) { ; CHECK-SM100-LABEL: reduce_fadd_float_reassoc( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b32 %r<5>; -; CHECK-SM100-NEXT: .reg .b64 %rd<10>; +; CHECK-SM100-NEXT: .reg .b64 %rd<8>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fadd_float_reassoc_param_0+16]; @@ -172,11 +147,8 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) { ; CHECK-SM100-NEXT: add.rn.f32x2 %rd5, %rd2, %rd4; ; CHECK-SM100-NEXT: add.rn.f32x2 %rd6, %rd1, %rd3; ; CHECK-SM100-NEXT: add.rn.f32x2 %rd7, %rd6, %rd5; -; CHECK-SM100-NEXT: mov.b64 {_, %r1}, %rd7; -; CHECK-SM100-NEXT: // implicit-def: %r2 -; CHECK-SM100-NEXT: mov.b64 %rd8, {%r1, %r2}; -; CHECK-SM100-NEXT: add.rn.f32x2 %rd9, %rd7, %rd8; -; CHECK-SM100-NEXT: mov.b64 {%r3, _}, %rd9; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd7; +; CHECK-SM100-NEXT: add.rn.f32 %r3, %r1, %r2; ; CHECK-SM100-NEXT: add.rn.f32 %r4, %r3, 0f00000000; ; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-SM100-NEXT: ret; @@ -229,7 +201,6 @@ define float @reduce_fadd_float_reassoc_nonpow2(<7 x float> %in) { ret float %res } -; Check straight line reduction. define half @reduce_fmul_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fmul_half( ; CHECK: { @@ -256,41 +227,20 @@ define half @reduce_fmul_half(<8 x half> %in) { } define half @reduce_fmul_half_reassoc(<8 x half> %in) { -; CHECK-SM80-LABEL: reduce_fmul_half_reassoc( -; CHECK-SM80: { -; CHECK-SM80-NEXT: .reg .b16 %rs<4>; -; CHECK-SM80-NEXT: .reg .b32 %r<10>; -; CHECK-SM80-EMPTY: -; CHECK-SM80-NEXT: // %bb.0: -; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0]; -; CHECK-SM80-NEXT: mul.rn.f16x2 %r5, %r2, %r4; -; CHECK-SM80-NEXT: mul.rn.f16x2 %r6, %r1, %r3; -; CHECK-SM80-NEXT: mul.rn.f16x2 %r7, %r6, %r5; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; CHECK-SM80-NEXT: // implicit-def: %rs2 -; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM80-NEXT: mul.rn.f16x2 %r9, %r7, %r8; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; } -; CHECK-SM80-NEXT: st.param.b16 [func_retval0], %rs3; -; CHECK-SM80-NEXT: ret; -; -; CHECK-SM100-LABEL: reduce_fmul_half_reassoc( -; CHECK-SM100: { -; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<10>; -; CHECK-SM100-EMPTY: -; CHECK-SM100-NEXT: // %bb.0: -; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0]; -; CHECK-SM100-NEXT: mul.rn.f16x2 %r5, %r2, %r4; -; CHECK-SM100-NEXT: mul.rn.f16x2 %r6, %r1, %r3; -; CHECK-SM100-NEXT: mul.rn.f16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: mul.rn.f16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: st.param.b16 [func_retval0], %rs3; -; CHECK-SM100-NEXT: ret; +; CHECK-LABEL: reduce_fmul_half_reassoc( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<8>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0]; +; CHECK-NEXT: mul.rn.f16x2 %r5, %r2, %r4; +; CHECK-NEXT: mul.rn.f16x2 %r6, %r1, %r3; +; CHECK-NEXT: mul.rn.f16x2 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: mul.rn.f16 %rs3, %rs1, %rs2; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs3; +; CHECK-NEXT: ret; %res = call reassoc half @llvm.vector.reduce.fmul(half 1.0, <8 x half> %in) ret half %res } @@ -321,7 +271,6 @@ define half @reduce_fmul_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. define float @reduce_fmul_float(<8 x float> %in) { ; CHECK-LABEL: reduce_fmul_float( ; CHECK: { @@ -359,22 +308,22 @@ define float @reduce_fmul_float_reassoc(<8 x float> %in) { ; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmul_float_reassoc_param_0]; ; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd4; ; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-SM80-NEXT: mul.rn.f32 %r5, %r3, %r1; +; CHECK-SM80-NEXT: mul.rn.f32 %r5, %r4, %r2; ; CHECK-SM80-NEXT: mov.b64 {%r6, %r7}, %rd3; ; CHECK-SM80-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-SM80-NEXT: mul.rn.f32 %r10, %r8, %r6; -; CHECK-SM80-NEXT: mul.rn.f32 %r11, %r4, %r2; -; CHECK-SM80-NEXT: mul.rn.f32 %r12, %r9, %r7; -; CHECK-SM80-NEXT: mul.rn.f32 %r13, %r12, %r11; -; CHECK-SM80-NEXT: mul.rn.f32 %r14, %r10, %r5; -; CHECK-SM80-NEXT: mul.rn.f32 %r15, %r14, %r13; +; CHECK-SM80-NEXT: mul.rn.f32 %r10, %r9, %r7; +; CHECK-SM80-NEXT: mul.rn.f32 %r11, %r10, %r5; +; CHECK-SM80-NEXT: mul.rn.f32 %r12, %r3, %r1; +; CHECK-SM80-NEXT: mul.rn.f32 %r13, %r8, %r6; +; CHECK-SM80-NEXT: mul.rn.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: mul.rn.f32 %r15, %r14, %r11; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-SM80-NEXT: ret; ; ; CHECK-SM100-LABEL: reduce_fmul_float_reassoc( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b32 %r<4>; -; CHECK-SM100-NEXT: .reg .b64 %rd<10>; +; CHECK-SM100-NEXT: .reg .b64 %rd<8>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmul_float_reassoc_param_0+16]; @@ -382,11 +331,8 @@ define float @reduce_fmul_float_reassoc(<8 x float> %in) { ; CHECK-SM100-NEXT: mul.rn.f32x2 %rd5, %rd2, %rd4; ; CHECK-SM100-NEXT: mul.rn.f32x2 %rd6, %rd1, %rd3; ; CHECK-SM100-NEXT: mul.rn.f32x2 %rd7, %rd6, %rd5; -; CHECK-SM100-NEXT: mov.b64 {_, %r1}, %rd7; -; CHECK-SM100-NEXT: // implicit-def: %r2 -; CHECK-SM100-NEXT: mov.b64 %rd8, {%r1, %r2}; -; CHECK-SM100-NEXT: mul.rn.f32x2 %rd9, %rd7, %rd8; -; CHECK-SM100-NEXT: mov.b64 {%r3, _}, %rd9; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd7; +; CHECK-SM100-NEXT: mul.rn.f32 %r3, %r1, %r2; ; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmul(float 1.0, <8 x float> %in) @@ -436,7 +382,6 @@ define float @reduce_fmul_float_reassoc_nonpow2(<7 x float> %in) { ret float %res } -; Check straight line reduction. define half @reduce_fmax_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fmax_half( ; CHECK: { @@ -501,84 +446,256 @@ define half @reduce_fmax_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. -define float @reduce_fmax_float(<8 x float> %in) { -; -; CHECK-LABEL: reduce_fmax_float( +define half @reduce_fmax_half_nnan(<8 x half> %in) { +; CHECK-LABEL: reduce_fmax_half_nnan( ; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<8>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: max.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: max.f32 %r10, %r9, %r7; -; CHECK-NEXT: max.f32 %r11, %r10, %r5; -; CHECK-NEXT: max.f32 %r12, %r3, %r1; -; CHECK-NEXT: max.f32 %r13, %r8, %r6; -; CHECK-NEXT: max.f32 %r14, %r13, %r12; -; CHECK-NEXT: max.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_half_nnan_param_0]; +; CHECK-NEXT: max.f16x2 %r5, %r2, %r4; +; CHECK-NEXT: max.f16x2 %r6, %r1, %r3; +; CHECK-NEXT: max.f16x2 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: max.f16 %rs3, %rs1, %rs2; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs3; ; CHECK-NEXT: ret; - %res = call float @llvm.vector.reduce.fmax(<8 x float> %in) - ret float %res + %res = call nnan half @llvm.vector.reduce.fmax(<8 x half> %in) + ret half %res } -define float @reduce_fmax_float_reassoc(<8 x float> %in) { -; -; CHECK-LABEL: reduce_fmax_float_reassoc( +define half @reduce_fmax_half_nnan_nonpow2(<7 x half> %in) { +; CHECK-LABEL: reduce_fmax_half_nnan_nonpow2( ; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-NEXT: .reg .b16 %rs<12>; +; CHECK-NEXT: .reg .b32 %r<8>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: max.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: max.f32 %r10, %r9, %r7; -; CHECK-NEXT: max.f32 %r11, %r10, %r5; -; CHECK-NEXT: max.f32 %r12, %r3, %r1; -; CHECK-NEXT: max.f32 %r13, %r8, %r6; -; CHECK-NEXT: max.f32 %r14, %r13, %r12; -; CHECK-NEXT: max.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-NEXT: ld.param.b32 %r1, [reduce_fmax_half_nnan_nonpow2_param_0+8]; +; CHECK-NEXT: mov.b32 {%rs5, %rs6}, %r1; +; CHECK-NEXT: ld.param.v2.b32 {%r2, %r3}, [reduce_fmax_half_nnan_nonpow2_param_0]; +; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r3; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r2; +; CHECK-NEXT: ld.param.b16 %rs7, [reduce_fmax_half_nnan_nonpow2_param_0+12]; +; CHECK-NEXT: max.f16x2 %r4, %r2, %r1; +; CHECK-NEXT: mov.b16 %rs8, 0xFC00; +; CHECK-NEXT: mov.b32 %r5, {%rs7, %rs8}; +; CHECK-NEXT: max.f16x2 %r6, %r3, %r5; +; CHECK-NEXT: max.f16x2 %r7, %r4, %r6; +; CHECK-NEXT: mov.b32 {%rs9, %rs10}, %r7; +; CHECK-NEXT: max.f16 %rs11, %rs9, %rs10; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs11; ; CHECK-NEXT: ret; + %res = call nnan half @llvm.vector.reduce.fmax(<7 x half> %in) + ret half %res +} + +define float @reduce_fmax_float(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmax_float( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: max.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: max.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: max.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: max.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: max.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmax_float( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: max.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: max.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: max.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: max.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; + %res = call float @llvm.vector.reduce.fmax(<8 x float> %in) + ret float %res +} + +define float @reduce_fmax_float_reassoc(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmax_float_reassoc( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: max.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: max.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: max.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: max.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: max.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmax_float_reassoc( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: max.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: max.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: max.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: max.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmax(<8 x float> %in) ret float %res } define float @reduce_fmax_float_reassoc_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmax_float_reassoc_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0]; +; CHECK-SM80-NEXT: max.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: max.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: max.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: max.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: max.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: max.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fmax_float_reassoc_nonpow2( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<14>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0]; -; CHECK-NEXT: max.f32 %r8, %r3, %r7; -; CHECK-NEXT: max.f32 %r9, %r1, %r5; -; CHECK-NEXT: max.f32 %r10, %r9, %r8; -; CHECK-NEXT: max.f32 %r11, %r2, %r6; -; CHECK-NEXT: max.f32 %r12, %r11, %r4; -; CHECK-NEXT: max.f32 %r13, %r10, %r12; -; CHECK-NEXT: st.param.b32 [func_retval0], %r13; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fmax_float_reassoc_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0]; +; CHECK-SM100-NEXT: max.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: max.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: max.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmax(<7 x float> %in) ret float %res } -; Check straight line reduction. +define float @reduce_fmax_float_nnan(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmax_float_nnan( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_nnan_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_nnan_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: max.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: max.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: max.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: max.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: max.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmax_float_nnan( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_nnan_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_nnan_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: max.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: max.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: max.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: max.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; + %res = call nnan float @llvm.vector.reduce.fmax(<8 x float> %in) + ret float %res +} + +define float @reduce_fmax_float_nnan_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmax_float_nnan_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmax_float_nnan_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_nnan_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_nnan_nonpow2_param_0]; +; CHECK-SM80-NEXT: max.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: max.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: max.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: max.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: max.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: max.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmax_float_nnan_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmax_float_nnan_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_nnan_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_nnan_nonpow2_param_0]; +; CHECK-SM100-NEXT: max.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: max.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: max.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; + %res = call nnan float @llvm.vector.reduce.fmax(<7 x float> %in) + ret float %res +} + define half @reduce_fmin_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fmin_half( ; CHECK: { @@ -643,84 +760,256 @@ define half @reduce_fmin_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. -define float @reduce_fmin_float(<8 x float> %in) { -; -; CHECK-LABEL: reduce_fmin_float( +define half @reduce_fmin_half_nnan(<8 x half> %in) { +; CHECK-LABEL: reduce_fmin_half_nnan( ; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<8>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: min.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: min.f32 %r10, %r9, %r7; -; CHECK-NEXT: min.f32 %r11, %r10, %r5; -; CHECK-NEXT: min.f32 %r12, %r3, %r1; -; CHECK-NEXT: min.f32 %r13, %r8, %r6; -; CHECK-NEXT: min.f32 %r14, %r13, %r12; -; CHECK-NEXT: min.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_half_nnan_param_0]; +; CHECK-NEXT: min.f16x2 %r5, %r2, %r4; +; CHECK-NEXT: min.f16x2 %r6, %r1, %r3; +; CHECK-NEXT: min.f16x2 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: min.f16 %rs3, %rs1, %rs2; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs3; ; CHECK-NEXT: ret; - %res = call float @llvm.vector.reduce.fmin(<8 x float> %in) - ret float %res + %res = call nnan half @llvm.vector.reduce.fmin(<8 x half> %in) + ret half %res } -define float @reduce_fmin_float_reassoc(<8 x float> %in) { -; -; CHECK-LABEL: reduce_fmin_float_reassoc( +define half @reduce_fmin_half_nnan_nonpow2(<7 x half> %in) { +; CHECK-LABEL: reduce_fmin_half_nnan_nonpow2( ; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-NEXT: .reg .b16 %rs<12>; +; CHECK-NEXT: .reg .b32 %r<8>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: min.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: min.f32 %r10, %r9, %r7; -; CHECK-NEXT: min.f32 %r11, %r10, %r5; -; CHECK-NEXT: min.f32 %r12, %r3, %r1; -; CHECK-NEXT: min.f32 %r13, %r8, %r6; -; CHECK-NEXT: min.f32 %r14, %r13, %r12; -; CHECK-NEXT: min.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-NEXT: ld.param.b32 %r1, [reduce_fmin_half_nnan_nonpow2_param_0+8]; +; CHECK-NEXT: mov.b32 {%rs5, %rs6}, %r1; +; CHECK-NEXT: ld.param.v2.b32 {%r2, %r3}, [reduce_fmin_half_nnan_nonpow2_param_0]; +; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r3; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r2; +; CHECK-NEXT: ld.param.b16 %rs7, [reduce_fmin_half_nnan_nonpow2_param_0+12]; +; CHECK-NEXT: min.f16x2 %r4, %r2, %r1; +; CHECK-NEXT: mov.b16 %rs8, 0x7C00; +; CHECK-NEXT: mov.b32 %r5, {%rs7, %rs8}; +; CHECK-NEXT: min.f16x2 %r6, %r3, %r5; +; CHECK-NEXT: min.f16x2 %r7, %r4, %r6; +; CHECK-NEXT: mov.b32 {%rs9, %rs10}, %r7; +; CHECK-NEXT: min.f16 %rs11, %rs9, %rs10; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs11; ; CHECK-NEXT: ret; + %res = call nnan half @llvm.vector.reduce.fmin(<7 x half> %in) + ret half %res +} + +define float @reduce_fmin_float(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmin_float( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: min.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: min.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: min.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: min.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: min.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmin_float( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: min.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: min.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: min.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: min.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; + %res = call float @llvm.vector.reduce.fmin(<8 x float> %in) + ret float %res +} + +define float @reduce_fmin_float_reassoc(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmin_float_reassoc( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: min.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: min.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: min.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: min.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: min.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmin_float_reassoc( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: min.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: min.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: min.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: min.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmin(<8 x float> %in) ret float %res } define float @reduce_fmin_float_reassoc_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmin_float_reassoc_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0]; +; CHECK-SM80-NEXT: min.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: min.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: min.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: min.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: min.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: min.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fmin_float_reassoc_nonpow2( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<14>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0]; -; CHECK-NEXT: min.f32 %r8, %r3, %r7; -; CHECK-NEXT: min.f32 %r9, %r1, %r5; -; CHECK-NEXT: min.f32 %r10, %r9, %r8; -; CHECK-NEXT: min.f32 %r11, %r2, %r6; -; CHECK-NEXT: min.f32 %r12, %r11, %r4; -; CHECK-NEXT: min.f32 %r13, %r10, %r12; -; CHECK-NEXT: st.param.b32 [func_retval0], %r13; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fmin_float_reassoc_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0]; +; CHECK-SM100-NEXT: min.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: min.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: min.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmin(<7 x float> %in) ret float %res } -; Check straight-line reduction. +define float @reduce_fmin_float_nnan(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmin_float_nnan( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_nnan_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_nnan_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: min.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: min.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: min.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: min.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: min.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmin_float_nnan( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_nnan_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_nnan_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: min.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: min.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: min.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: min.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; + %res = call nnan float @llvm.vector.reduce.fmin(<8 x float> %in) + ret float %res +} + +define float @reduce_fmin_float_nnan_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmin_float_nnan_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmin_float_nnan_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_nnan_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_nnan_nonpow2_param_0]; +; CHECK-SM80-NEXT: min.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: min.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: min.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: min.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: min.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: min.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmin_float_nnan_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmin_float_nnan_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_nnan_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_nnan_nonpow2_param_0]; +; CHECK-SM100-NEXT: min.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: min.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: min.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; + %res = call nnan float @llvm.vector.reduce.fmin(<7 x float> %in) + ret float %res +} + define half @reduce_fmaximum_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fmaximum_half( ; CHECK: { @@ -785,84 +1074,131 @@ define half @reduce_fmaximum_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. define float @reduce_fmaximum_float(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmaximum_float( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: max.NaN.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: max.NaN.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: max.NaN.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.NaN.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: max.NaN.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: max.NaN.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.NaN.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fmaximum_float( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: max.NaN.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r7; -; CHECK-NEXT: max.NaN.f32 %r11, %r10, %r5; -; CHECK-NEXT: max.NaN.f32 %r12, %r3, %r1; -; CHECK-NEXT: max.NaN.f32 %r13, %r8, %r6; -; CHECK-NEXT: max.NaN.f32 %r14, %r13, %r12; -; CHECK-NEXT: max.NaN.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fmaximum_float( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: max.NaN.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: max.NaN.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: max.NaN.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: max.NaN.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call float @llvm.vector.reduce.fmaximum(<8 x float> %in) ret float %res } define float @reduce_fmaximum_float_reassoc(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmaximum_float_reassoc( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: max.NaN.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: max.NaN.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: max.NaN.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.NaN.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: max.NaN.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: max.NaN.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.NaN.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fmaximum_float_reassoc( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: max.NaN.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r7; -; CHECK-NEXT: max.NaN.f32 %r11, %r10, %r5; -; CHECK-NEXT: max.NaN.f32 %r12, %r3, %r1; -; CHECK-NEXT: max.NaN.f32 %r13, %r8, %r6; -; CHECK-NEXT: max.NaN.f32 %r14, %r13, %r12; -; CHECK-NEXT: max.NaN.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fmaximum_float_reassoc( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: max.NaN.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: max.NaN.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: max.NaN.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: max.NaN.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmaximum(<8 x float> %in) ret float %res } define float @reduce_fmaximum_float_reassoc_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmaximum_float_reassoc_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0]; +; CHECK-SM80-NEXT: max.NaN.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: max.NaN.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: max.NaN.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: max.NaN.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: max.NaN.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: max.NaN.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fmaximum_float_reassoc_nonpow2( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<14>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0]; -; CHECK-NEXT: max.NaN.f32 %r8, %r3, %r7; -; CHECK-NEXT: max.NaN.f32 %r9, %r1, %r5; -; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r8; -; CHECK-NEXT: max.NaN.f32 %r11, %r2, %r6; -; CHECK-NEXT: max.NaN.f32 %r12, %r11, %r4; -; CHECK-NEXT: max.NaN.f32 %r13, %r10, %r12; -; CHECK-NEXT: st.param.b32 [func_retval0], %r13; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fmaximum_float_reassoc_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0]; +; CHECK-SM100-NEXT: max.NaN.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: max.NaN.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: max.NaN.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmaximum(<7 x float> %in) ret float %res } -; Check straight-line reduction. define half @reduce_fminimum_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fminimum_half( ; CHECK: { @@ -927,79 +1263,127 @@ define half @reduce_fminimum_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. define float @reduce_fminimum_float(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fminimum_float( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: min.NaN.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: min.NaN.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: min.NaN.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.NaN.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: min.NaN.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: min.NaN.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.NaN.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fminimum_float( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: min.NaN.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r7; -; CHECK-NEXT: min.NaN.f32 %r11, %r10, %r5; -; CHECK-NEXT: min.NaN.f32 %r12, %r3, %r1; -; CHECK-NEXT: min.NaN.f32 %r13, %r8, %r6; -; CHECK-NEXT: min.NaN.f32 %r14, %r13, %r12; -; CHECK-NEXT: min.NaN.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fminimum_float( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: min.NaN.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: min.NaN.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: min.NaN.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: min.NaN.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call float @llvm.vector.reduce.fminimum(<8 x float> %in) ret float %res } define float @reduce_fminimum_float_reassoc(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fminimum_float_reassoc( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: min.NaN.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: min.NaN.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: min.NaN.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.NaN.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: min.NaN.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: min.NaN.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.NaN.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fminimum_float_reassoc( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: min.NaN.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r7; -; CHECK-NEXT: min.NaN.f32 %r11, %r10, %r5; -; CHECK-NEXT: min.NaN.f32 %r12, %r3, %r1; -; CHECK-NEXT: min.NaN.f32 %r13, %r8, %r6; -; CHECK-NEXT: min.NaN.f32 %r14, %r13, %r12; -; CHECK-NEXT: min.NaN.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fminimum_float_reassoc( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: min.NaN.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: min.NaN.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: min.NaN.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: min.NaN.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fminimum(<8 x float> %in) ret float %res } define float @reduce_fminimum_float_reassoc_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fminimum_float_reassoc_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0]; +; CHECK-SM80-NEXT: min.NaN.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: min.NaN.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: min.NaN.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: min.NaN.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: min.NaN.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: min.NaN.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fminimum_float_reassoc_nonpow2( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<14>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0]; -; CHECK-NEXT: min.NaN.f32 %r8, %r3, %r7; -; CHECK-NEXT: min.NaN.f32 %r9, %r1, %r5; -; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r8; -; CHECK-NEXT: min.NaN.f32 %r11, %r2, %r6; -; CHECK-NEXT: min.NaN.f32 %r12, %r11, %r4; -; CHECK-NEXT: min.NaN.f32 %r13, %r10, %r12; -; CHECK-NEXT: st.param.b32 [func_retval0], %r13; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fminimum_float_reassoc_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0]; +; CHECK-SM100-NEXT: min.NaN.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: min.NaN.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: min.NaN.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fminimum(<7 x float> %in) ret float %res } @@ -1014,15 +1398,15 @@ define i16 @reduce_add_i16(<8 x i16> %in) { ; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i16_param_0]; ; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-SM80-NEXT: add.s16 %rs5, %rs3, %rs1; +; CHECK-SM80-NEXT: add.s16 %rs5, %rs4, %rs2; ; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-SM80-NEXT: add.s16 %rs10, %rs8, %rs6; -; CHECK-SM80-NEXT: add.s16 %rs11, %rs4, %rs2; -; CHECK-SM80-NEXT: add.s16 %rs12, %rs9, %rs7; -; CHECK-SM80-NEXT: add.s16 %rs13, %rs12, %rs11; -; CHECK-SM80-NEXT: add.s16 %rs14, %rs10, %rs5; -; CHECK-SM80-NEXT: add.s16 %rs15, %rs14, %rs13; +; CHECK-SM80-NEXT: add.s16 %rs10, %rs9, %rs7; +; CHECK-SM80-NEXT: add.s16 %rs11, %rs10, %rs5; +; CHECK-SM80-NEXT: add.s16 %rs12, %rs3, %rs1; +; CHECK-SM80-NEXT: add.s16 %rs13, %rs8, %rs6; +; CHECK-SM80-NEXT: add.s16 %rs14, %rs13, %rs12; +; CHECK-SM80-NEXT: add.s16 %rs15, %rs14, %rs11; ; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-SM80-NEXT: ret; @@ -1030,20 +1414,17 @@ define i16 @reduce_add_i16(<8 x i16> %in) { ; CHECK-SM100-LABEL: reduce_add_i16( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-NEXT: .reg .b32 %r<9>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i16_param_0]; ; CHECK-SM100-NEXT: add.s16x2 %r5, %r2, %r4; ; CHECK-SM100-NEXT: add.s16x2 %r6, %r1, %r3; ; CHECK-SM100-NEXT: add.s16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: add.s16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-SM100-NEXT: add.s16 %rs3, %rs1, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-SM100-NEXT: ret; %res = call i16 @llvm.vector.reduce.add(<8 x i16> %in) ret i16 %res @@ -1103,13 +1484,13 @@ define i32 @reduce_add_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_add_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i32_param_0]; -; CHECK-NEXT: add.s32 %r9, %r3, %r7; -; CHECK-NEXT: add.s32 %r10, %r1, %r5; -; CHECK-NEXT: add.s32 %r11, %r4, %r8; -; CHECK-NEXT: add.s32 %r12, %r2, %r6; -; CHECK-NEXT: add.s32 %r13, %r12, %r11; -; CHECK-NEXT: add.s32 %r14, %r10, %r9; -; CHECK-NEXT: add.s32 %r15, %r14, %r13; +; CHECK-NEXT: add.s32 %r9, %r4, %r8; +; CHECK-NEXT: add.s32 %r10, %r2, %r6; +; CHECK-NEXT: add.s32 %r11, %r10, %r9; +; CHECK-NEXT: add.s32 %r12, %r3, %r7; +; CHECK-NEXT: add.s32 %r13, %r1, %r5; +; CHECK-NEXT: add.s32 %r14, %r13, %r12; +; CHECK-NEXT: add.s32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.add(<8 x i32> %in) @@ -1147,15 +1528,15 @@ define i16 @reduce_mul_i16(<8 x i16> %in) { ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i16_param_0]; ; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-NEXT: mul.lo.s16 %rs5, %rs3, %rs1; +; CHECK-NEXT: mul.lo.s16 %rs5, %rs4, %rs2; ; CHECK-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-NEXT: mul.lo.s16 %rs10, %rs8, %rs6; -; CHECK-NEXT: mul.lo.s16 %rs11, %rs4, %rs2; -; CHECK-NEXT: mul.lo.s16 %rs12, %rs9, %rs7; -; CHECK-NEXT: mul.lo.s16 %rs13, %rs12, %rs11; -; CHECK-NEXT: mul.lo.s16 %rs14, %rs10, %rs5; -; CHECK-NEXT: mul.lo.s16 %rs15, %rs14, %rs13; +; CHECK-NEXT: mul.lo.s16 %rs10, %rs9, %rs7; +; CHECK-NEXT: mul.lo.s16 %rs11, %rs10, %rs5; +; CHECK-NEXT: mul.lo.s16 %rs12, %rs3, %rs1; +; CHECK-NEXT: mul.lo.s16 %rs13, %rs8, %rs6; +; CHECK-NEXT: mul.lo.s16 %rs14, %rs13, %rs12; +; CHECK-NEXT: mul.lo.s16 %rs15, %rs14, %rs11; ; CHECK-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-NEXT: ret; @@ -1194,13 +1575,13 @@ define i32 @reduce_mul_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_mul_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i32_param_0]; -; CHECK-NEXT: mul.lo.s32 %r9, %r3, %r7; -; CHECK-NEXT: mul.lo.s32 %r10, %r1, %r5; -; CHECK-NEXT: mul.lo.s32 %r11, %r4, %r8; -; CHECK-NEXT: mul.lo.s32 %r12, %r2, %r6; -; CHECK-NEXT: mul.lo.s32 %r13, %r12, %r11; -; CHECK-NEXT: mul.lo.s32 %r14, %r10, %r9; -; CHECK-NEXT: mul.lo.s32 %r15, %r14, %r13; +; CHECK-NEXT: mul.lo.s32 %r9, %r4, %r8; +; CHECK-NEXT: mul.lo.s32 %r10, %r2, %r6; +; CHECK-NEXT: mul.lo.s32 %r11, %r10, %r9; +; CHECK-NEXT: mul.lo.s32 %r12, %r3, %r7; +; CHECK-NEXT: mul.lo.s32 %r13, %r1, %r5; +; CHECK-NEXT: mul.lo.s32 %r14, %r13, %r12; +; CHECK-NEXT: mul.lo.s32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.mul(<8 x i32> %in) @@ -1238,15 +1619,15 @@ define i16 @reduce_umax_i16(<8 x i16> %in) { ; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i16_param_0]; ; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-SM80-NEXT: max.u16 %rs5, %rs3, %rs1; +; CHECK-SM80-NEXT: max.u16 %rs5, %rs4, %rs2; ; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-SM80-NEXT: max.u16 %rs10, %rs8, %rs6; -; CHECK-SM80-NEXT: max.u16 %rs11, %rs4, %rs2; -; CHECK-SM80-NEXT: max.u16 %rs12, %rs9, %rs7; -; CHECK-SM80-NEXT: max.u16 %rs13, %rs12, %rs11; -; CHECK-SM80-NEXT: max.u16 %rs14, %rs10, %rs5; -; CHECK-SM80-NEXT: max.u16 %rs15, %rs14, %rs13; +; CHECK-SM80-NEXT: max.u16 %rs10, %rs9, %rs7; +; CHECK-SM80-NEXT: max.u16 %rs11, %rs10, %rs5; +; CHECK-SM80-NEXT: max.u16 %rs12, %rs3, %rs1; +; CHECK-SM80-NEXT: max.u16 %rs13, %rs8, %rs6; +; CHECK-SM80-NEXT: max.u16 %rs14, %rs13, %rs12; +; CHECK-SM80-NEXT: max.u16 %rs15, %rs14, %rs11; ; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-SM80-NEXT: ret; @@ -1254,20 +1635,17 @@ define i16 @reduce_umax_i16(<8 x i16> %in) { ; CHECK-SM100-LABEL: reduce_umax_i16( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-NEXT: .reg .b32 %r<9>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i16_param_0]; ; CHECK-SM100-NEXT: max.u16x2 %r5, %r2, %r4; ; CHECK-SM100-NEXT: max.u16x2 %r6, %r1, %r3; ; CHECK-SM100-NEXT: max.u16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: max.u16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-SM100-NEXT: max.u16 %rs3, %rs1, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-SM100-NEXT: ret; %res = call i16 @llvm.vector.reduce.umax(<8 x i16> %in) ret i16 %res @@ -1327,13 +1705,13 @@ define i32 @reduce_umax_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umax_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i32_param_0]; -; CHECK-NEXT: max.u32 %r9, %r3, %r7; -; CHECK-NEXT: max.u32 %r10, %r1, %r5; -; CHECK-NEXT: max.u32 %r11, %r4, %r8; -; CHECK-NEXT: max.u32 %r12, %r2, %r6; -; CHECK-NEXT: max.u32 %r13, %r12, %r11; -; CHECK-NEXT: max.u32 %r14, %r10, %r9; -; CHECK-NEXT: max.u32 %r15, %r14, %r13; +; CHECK-NEXT: max.u32 %r9, %r4, %r8; +; CHECK-NEXT: max.u32 %r10, %r2, %r6; +; CHECK-NEXT: max.u32 %r11, %r10, %r9; +; CHECK-NEXT: max.u32 %r12, %r3, %r7; +; CHECK-NEXT: max.u32 %r13, %r1, %r5; +; CHECK-NEXT: max.u32 %r14, %r13, %r12; +; CHECK-NEXT: max.u32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.umax(<8 x i32> %in) @@ -1371,15 +1749,15 @@ define i16 @reduce_umin_i16(<8 x i16> %in) { ; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i16_param_0]; ; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-SM80-NEXT: min.u16 %rs5, %rs3, %rs1; +; CHECK-SM80-NEXT: min.u16 %rs5, %rs4, %rs2; ; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-SM80-NEXT: min.u16 %rs10, %rs8, %rs6; -; CHECK-SM80-NEXT: min.u16 %rs11, %rs4, %rs2; -; CHECK-SM80-NEXT: min.u16 %rs12, %rs9, %rs7; -; CHECK-SM80-NEXT: min.u16 %rs13, %rs12, %rs11; -; CHECK-SM80-NEXT: min.u16 %rs14, %rs10, %rs5; -; CHECK-SM80-NEXT: min.u16 %rs15, %rs14, %rs13; +; CHECK-SM80-NEXT: min.u16 %rs10, %rs9, %rs7; +; CHECK-SM80-NEXT: min.u16 %rs11, %rs10, %rs5; +; CHECK-SM80-NEXT: min.u16 %rs12, %rs3, %rs1; +; CHECK-SM80-NEXT: min.u16 %rs13, %rs8, %rs6; +; CHECK-SM80-NEXT: min.u16 %rs14, %rs13, %rs12; +; CHECK-SM80-NEXT: min.u16 %rs15, %rs14, %rs11; ; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-SM80-NEXT: ret; @@ -1387,20 +1765,17 @@ define i16 @reduce_umin_i16(<8 x i16> %in) { ; CHECK-SM100-LABEL: reduce_umin_i16( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-NEXT: .reg .b32 %r<9>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i16_param_0]; ; CHECK-SM100-NEXT: min.u16x2 %r5, %r2, %r4; ; CHECK-SM100-NEXT: min.u16x2 %r6, %r1, %r3; ; CHECK-SM100-NEXT: min.u16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: min.u16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-SM100-NEXT: min.u16 %rs3, %rs1, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-SM100-NEXT: ret; %res = call i16 @llvm.vector.reduce.umin(<8 x i16> %in) ret i16 %res @@ -1460,13 +1835,13 @@ define i32 @reduce_umin_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umin_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i32_param_0]; -; CHECK-NEXT: min.u32 %r9, %r3, %r7; -; CHECK-NEXT: min.u32 %r10, %r1, %r5; -; CHECK-NEXT: min.u32 %r11, %r4, %r8; -; CHECK-NEXT: min.u32 %r12, %r2, %r6; -; CHECK-NEXT: min.u32 %r13, %r12, %r11; -; CHECK-NEXT: min.u32 %r14, %r10, %r9; -; CHECK-NEXT: min.u32 %r15, %r14, %r13; +; CHECK-NEXT: min.u32 %r9, %r4, %r8; +; CHECK-NEXT: min.u32 %r10, %r2, %r6; +; CHECK-NEXT: min.u32 %r11, %r10, %r9; +; CHECK-NEXT: min.u32 %r12, %r3, %r7; +; CHECK-NEXT: min.u32 %r13, %r1, %r5; +; CHECK-NEXT: min.u32 %r14, %r13, %r12; +; CHECK-NEXT: min.u32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.umin(<8 x i32> %in) @@ -1504,15 +1879,15 @@ define i16 @reduce_smax_i16(<8 x i16> %in) { ; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i16_param_0]; ; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-SM80-NEXT: max.s16 %rs5, %rs3, %rs1; +; CHECK-SM80-NEXT: max.s16 %rs5, %rs4, %rs2; ; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-SM80-NEXT: max.s16 %rs10, %rs8, %rs6; -; CHECK-SM80-NEXT: max.s16 %rs11, %rs4, %rs2; -; CHECK-SM80-NEXT: max.s16 %rs12, %rs9, %rs7; -; CHECK-SM80-NEXT: max.s16 %rs13, %rs12, %rs11; -; CHECK-SM80-NEXT: max.s16 %rs14, %rs10, %rs5; -; CHECK-SM80-NEXT: max.s16 %rs15, %rs14, %rs13; +; CHECK-SM80-NEXT: max.s16 %rs10, %rs9, %rs7; +; CHECK-SM80-NEXT: max.s16 %rs11, %rs10, %rs5; +; CHECK-SM80-NEXT: max.s16 %rs12, %rs3, %rs1; +; CHECK-SM80-NEXT: max.s16 %rs13, %rs8, %rs6; +; CHECK-SM80-NEXT: max.s16 %rs14, %rs13, %rs12; +; CHECK-SM80-NEXT: max.s16 %rs15, %rs14, %rs11; ; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-SM80-NEXT: ret; @@ -1520,20 +1895,17 @@ define i16 @reduce_smax_i16(<8 x i16> %in) { ; CHECK-SM100-LABEL: reduce_smax_i16( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-NEXT: .reg .b32 %r<9>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i16_param_0]; ; CHECK-SM100-NEXT: max.s16x2 %r5, %r2, %r4; ; CHECK-SM100-NEXT: max.s16x2 %r6, %r1, %r3; ; CHECK-SM100-NEXT: max.s16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: max.s16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-SM100-NEXT: max.s16 %rs3, %rs1, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-SM100-NEXT: ret; %res = call i16 @llvm.vector.reduce.smax(<8 x i16> %in) ret i16 %res @@ -1593,13 +1965,13 @@ define i32 @reduce_smax_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smax_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i32_param_0]; -; CHECK-NEXT: max.s32 %r9, %r3, %r7; -; CHECK-NEXT: max.s32 %r10, %r1, %r5; -; CHECK-NEXT: max.s32 %r11, %r4, %r8; -; CHECK-NEXT: max.s32 %r12, %r2, %r6; -; CHECK-NEXT: max.s32 %r13, %r12, %r11; -; CHECK-NEXT: max.s32 %r14, %r10, %r9; -; CHECK-NEXT: max.s32 %r15, %r14, %r13; +; CHECK-NEXT: max.s32 %r9, %r4, %r8; +; CHECK-NEXT: max.s32 %r10, %r2, %r6; +; CHECK-NEXT: max.s32 %r11, %r10, %r9; +; CHECK-NEXT: max.s32 %r12, %r3, %r7; +; CHECK-NEXT: max.s32 %r13, %r1, %r5; +; CHECK-NEXT: max.s32 %r14, %r13, %r12; +; CHECK-NEXT: max.s32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.smax(<8 x i32> %in) @@ -1637,15 +2009,15 @@ define i16 @reduce_smin_i16(<8 x i16> %in) { ; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i16_param_0]; ; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-SM80-NEXT: min.s16 %rs5, %rs3, %rs1; +; CHECK-SM80-NEXT: min.s16 %rs5, %rs4, %rs2; ; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-SM80-NEXT: min.s16 %rs10, %rs8, %rs6; -; CHECK-SM80-NEXT: min.s16 %rs11, %rs4, %rs2; -; CHECK-SM80-NEXT: min.s16 %rs12, %rs9, %rs7; -; CHECK-SM80-NEXT: min.s16 %rs13, %rs12, %rs11; -; CHECK-SM80-NEXT: min.s16 %rs14, %rs10, %rs5; -; CHECK-SM80-NEXT: min.s16 %rs15, %rs14, %rs13; +; CHECK-SM80-NEXT: min.s16 %rs10, %rs9, %rs7; +; CHECK-SM80-NEXT: min.s16 %rs11, %rs10, %rs5; +; CHECK-SM80-NEXT: min.s16 %rs12, %rs3, %rs1; +; CHECK-SM80-NEXT: min.s16 %rs13, %rs8, %rs6; +; CHECK-SM80-NEXT: min.s16 %rs14, %rs13, %rs12; +; CHECK-SM80-NEXT: min.s16 %rs15, %rs14, %rs11; ; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-SM80-NEXT: ret; @@ -1653,20 +2025,17 @@ define i16 @reduce_smin_i16(<8 x i16> %in) { ; CHECK-SM100-LABEL: reduce_smin_i16( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-NEXT: .reg .b32 %r<9>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i16_param_0]; ; CHECK-SM100-NEXT: min.s16x2 %r5, %r2, %r4; ; CHECK-SM100-NEXT: min.s16x2 %r6, %r1, %r3; ; CHECK-SM100-NEXT: min.s16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: min.s16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-SM100-NEXT: min.s16 %rs3, %rs1, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-SM100-NEXT: ret; %res = call i16 @llvm.vector.reduce.smin(<8 x i16> %in) ret i16 %res @@ -1726,13 +2095,13 @@ define i32 @reduce_smin_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smin_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i32_param_0]; -; CHECK-NEXT: min.s32 %r9, %r3, %r7; -; CHECK-NEXT: min.s32 %r10, %r1, %r5; -; CHECK-NEXT: min.s32 %r11, %r4, %r8; -; CHECK-NEXT: min.s32 %r12, %r2, %r6; -; CHECK-NEXT: min.s32 %r13, %r12, %r11; -; CHECK-NEXT: min.s32 %r14, %r10, %r9; -; CHECK-NEXT: min.s32 %r15, %r14, %r13; +; CHECK-NEXT: min.s32 %r9, %r4, %r8; +; CHECK-NEXT: min.s32 %r10, %r2, %r6; +; CHECK-NEXT: min.s32 %r11, %r10, %r9; +; CHECK-NEXT: min.s32 %r12, %r3, %r7; +; CHECK-NEXT: min.s32 %r13, %r1, %r5; +; CHECK-NEXT: min.s32 %r14, %r13, %r12; +; CHECK-NEXT: min.s32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.smin(<8 x i32> %in) @@ -1761,43 +2130,21 @@ define i32 @reduce_smin_i32_nonpow2(<7 x i32> %in) { } define i16 @reduce_and_i16(<8 x i16> %in) { -; CHECK-SM80-LABEL: reduce_and_i16( -; CHECK-SM80: { -; CHECK-SM80-NEXT: .reg .b16 %rs<4>; -; CHECK-SM80-NEXT: .reg .b32 %r<11>; -; CHECK-SM80-EMPTY: -; CHECK-SM80-NEXT: // %bb.0: -; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i16_param_0]; -; CHECK-SM80-NEXT: and.b32 %r5, %r2, %r4; -; CHECK-SM80-NEXT: and.b32 %r6, %r1, %r3; -; CHECK-SM80-NEXT: and.b32 %r7, %r6, %r5; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; CHECK-SM80-NEXT: // implicit-def: %rs2 -; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM80-NEXT: and.b32 %r9, %r7, %r8; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; } -; CHECK-SM80-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM80-NEXT: ret; -; -; CHECK-SM100-LABEL: reduce_and_i16( -; CHECK-SM100: { -; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; -; CHECK-SM100-EMPTY: -; CHECK-SM100-NEXT: // %bb.0: -; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i16_param_0]; -; CHECK-SM100-NEXT: and.b32 %r5, %r2, %r4; -; CHECK-SM100-NEXT: and.b32 %r6, %r1, %r3; -; CHECK-SM100-NEXT: and.b32 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: and.b32 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM100-NEXT: ret; +; CHECK-LABEL: reduce_and_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i16_param_0]; +; CHECK-NEXT: and.b32 %r5, %r2, %r4; +; CHECK-NEXT: and.b32 %r6, %r1, %r3; +; CHECK-NEXT: and.b32 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: and.b16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; +; CHECK-NEXT: ret; %res = call i16 @llvm.vector.reduce.and(<8 x i16> %in) ret i16 %res } @@ -1837,13 +2184,13 @@ define i32 @reduce_and_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_and_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i32_param_0]; -; CHECK-NEXT: and.b32 %r9, %r3, %r7; -; CHECK-NEXT: and.b32 %r10, %r1, %r5; -; CHECK-NEXT: and.b32 %r11, %r4, %r8; -; CHECK-NEXT: and.b32 %r12, %r2, %r6; -; CHECK-NEXT: and.b32 %r13, %r12, %r11; -; CHECK-NEXT: and.b32 %r14, %r10, %r9; -; CHECK-NEXT: and.b32 %r15, %r14, %r13; +; CHECK-NEXT: and.b32 %r9, %r4, %r8; +; CHECK-NEXT: and.b32 %r10, %r2, %r6; +; CHECK-NEXT: and.b32 %r11, %r10, %r9; +; CHECK-NEXT: and.b32 %r12, %r3, %r7; +; CHECK-NEXT: and.b32 %r13, %r1, %r5; +; CHECK-NEXT: and.b32 %r14, %r13, %r12; +; CHECK-NEXT: and.b32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.and(<8 x i32> %in) @@ -1872,43 +2219,21 @@ define i32 @reduce_and_i32_nonpow2(<7 x i32> %in) { } define i16 @reduce_or_i16(<8 x i16> %in) { -; CHECK-SM80-LABEL: reduce_or_i16( -; CHECK-SM80: { -; CHECK-SM80-NEXT: .reg .b16 %rs<4>; -; CHECK-SM80-NEXT: .reg .b32 %r<11>; -; CHECK-SM80-EMPTY: -; CHECK-SM80-NEXT: // %bb.0: -; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i16_param_0]; -; CHECK-SM80-NEXT: or.b32 %r5, %r2, %r4; -; CHECK-SM80-NEXT: or.b32 %r6, %r1, %r3; -; CHECK-SM80-NEXT: or.b32 %r7, %r6, %r5; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; CHECK-SM80-NEXT: // implicit-def: %rs2 -; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM80-NEXT: or.b32 %r9, %r7, %r8; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; } -; CHECK-SM80-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM80-NEXT: ret; -; -; CHECK-SM100-LABEL: reduce_or_i16( -; CHECK-SM100: { -; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; -; CHECK-SM100-EMPTY: -; CHECK-SM100-NEXT: // %bb.0: -; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i16_param_0]; -; CHECK-SM100-NEXT: or.b32 %r5, %r2, %r4; -; CHECK-SM100-NEXT: or.b32 %r6, %r1, %r3; -; CHECK-SM100-NEXT: or.b32 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: or.b32 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM100-NEXT: ret; +; CHECK-LABEL: reduce_or_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i16_param_0]; +; CHECK-NEXT: or.b32 %r5, %r2, %r4; +; CHECK-NEXT: or.b32 %r6, %r1, %r3; +; CHECK-NEXT: or.b32 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: or.b16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; +; CHECK-NEXT: ret; %res = call i16 @llvm.vector.reduce.or(<8 x i16> %in) ret i16 %res } @@ -1948,13 +2273,13 @@ define i32 @reduce_or_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_or_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i32_param_0]; -; CHECK-NEXT: or.b32 %r9, %r3, %r7; -; CHECK-NEXT: or.b32 %r10, %r1, %r5; -; CHECK-NEXT: or.b32 %r11, %r4, %r8; -; CHECK-NEXT: or.b32 %r12, %r2, %r6; -; CHECK-NEXT: or.b32 %r13, %r12, %r11; -; CHECK-NEXT: or.b32 %r14, %r10, %r9; -; CHECK-NEXT: or.b32 %r15, %r14, %r13; +; CHECK-NEXT: or.b32 %r9, %r4, %r8; +; CHECK-NEXT: or.b32 %r10, %r2, %r6; +; CHECK-NEXT: or.b32 %r11, %r10, %r9; +; CHECK-NEXT: or.b32 %r12, %r3, %r7; +; CHECK-NEXT: or.b32 %r13, %r1, %r5; +; CHECK-NEXT: or.b32 %r14, %r13, %r12; +; CHECK-NEXT: or.b32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.or(<8 x i32> %in) @@ -1983,43 +2308,21 @@ define i32 @reduce_or_i32_nonpow2(<7 x i32> %in) { } define i16 @reduce_xor_i16(<8 x i16> %in) { -; CHECK-SM80-LABEL: reduce_xor_i16( -; CHECK-SM80: { -; CHECK-SM80-NEXT: .reg .b16 %rs<4>; -; CHECK-SM80-NEXT: .reg .b32 %r<11>; -; CHECK-SM80-EMPTY: -; CHECK-SM80-NEXT: // %bb.0: -; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i16_param_0]; -; CHECK-SM80-NEXT: xor.b32 %r5, %r2, %r4; -; CHECK-SM80-NEXT: xor.b32 %r6, %r1, %r3; -; CHECK-SM80-NEXT: xor.b32 %r7, %r6, %r5; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; CHECK-SM80-NEXT: // implicit-def: %rs2 -; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM80-NEXT: xor.b32 %r9, %r7, %r8; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; } -; CHECK-SM80-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM80-NEXT: ret; -; -; CHECK-SM100-LABEL: reduce_xor_i16( -; CHECK-SM100: { -; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; -; CHECK-SM100-EMPTY: -; CHECK-SM100-NEXT: // %bb.0: -; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i16_param_0]; -; CHECK-SM100-NEXT: xor.b32 %r5, %r2, %r4; -; CHECK-SM100-NEXT: xor.b32 %r6, %r1, %r3; -; CHECK-SM100-NEXT: xor.b32 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: xor.b32 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM100-NEXT: ret; +; CHECK-LABEL: reduce_xor_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i16_param_0]; +; CHECK-NEXT: xor.b32 %r5, %r2, %r4; +; CHECK-NEXT: xor.b32 %r6, %r1, %r3; +; CHECK-NEXT: xor.b32 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: xor.b16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; +; CHECK-NEXT: ret; %res = call i16 @llvm.vector.reduce.xor(<8 x i16> %in) ret i16 %res } @@ -2059,13 +2362,13 @@ define i32 @reduce_xor_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_xor_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i32_param_0]; -; CHECK-NEXT: xor.b32 %r9, %r3, %r7; -; CHECK-NEXT: xor.b32 %r10, %r1, %r5; -; CHECK-NEXT: xor.b32 %r11, %r4, %r8; -; CHECK-NEXT: xor.b32 %r12, %r2, %r6; -; CHECK-NEXT: xor.b32 %r13, %r12, %r11; -; CHECK-NEXT: xor.b32 %r14, %r10, %r9; -; CHECK-NEXT: xor.b32 %r15, %r14, %r13; +; CHECK-NEXT: xor.b32 %r9, %r4, %r8; +; CHECK-NEXT: xor.b32 %r10, %r2, %r6; +; CHECK-NEXT: xor.b32 %r11, %r10, %r9; +; CHECK-NEXT: xor.b32 %r12, %r3, %r7; +; CHECK-NEXT: xor.b32 %r13, %r1, %r5; +; CHECK-NEXT: xor.b32 %r14, %r13, %r12; +; CHECK-NEXT: xor.b32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.xor(<8 x i32> %in) diff --git a/llvm/test/CodeGen/NVPTX/variadics-lowering.ll b/llvm/test/CodeGen/NVPTX/variadics-lowering.ll index 5502980..1d69f8d 100644 --- a/llvm/test/CodeGen/NVPTX/variadics-lowering.ll +++ b/llvm/test/CodeGen/NVPTX/variadics-lowering.ll @@ -119,7 +119,7 @@ define dso_local i32 @foo() { ; CHECK-NEXT: [[CONV:%.*]] = sext i8 1 to i32 ; CHECK-NEXT: [[CONV1:%.*]] = sext i16 1 to i32 ; CHECK-NEXT: [[CONV2:%.*]] = fpext float 1.000000e+00 to double -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 40, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; CHECK-NEXT: store i32 [[CONV]], ptr [[TMP0]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1 @@ -133,7 +133,7 @@ define dso_local i32 @foo() { ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 6 ; CHECK-NEXT: store double 1.000000e+00, ptr [[TMP5]], align 8 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics1(i32 noundef 1, ptr [[VARARG_BUFFER]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 40, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: ret i32 [[CALL]] ; entry: @@ -208,7 +208,7 @@ define dso_local i32 @bar() { ; CHECK-NEXT: [[S1_SROA_2_0_COPYLOAD:%.*]] = load i8, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 4), align 4 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[S1_SROA_3]], ptr align 1 getelementptr inbounds (i8, ptr @__const.bar.s1, i64 5), i64 3, i1 false) ; CHECK-NEXT: [[S1_SROA_31_0_COPYLOAD:%.*]] = load i64, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 8), align 8 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; CHECK-NEXT: store i32 [[S1_SROA_0_0_COPYLOAD]], ptr [[TMP0]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1 @@ -216,7 +216,7 @@ define dso_local i32 @bar() { ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 3 ; CHECK-NEXT: store i64 [[S1_SROA_31_0_COPYLOAD]], ptr [[TMP2]], align 8 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics2(i32 noundef 1, ptr [[VARARG_BUFFER]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: ret i32 [[CALL]] ; entry: @@ -274,11 +274,11 @@ define dso_local i32 @baz() { ; CHECK-LABEL: define dso_local i32 @baz() { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[BAZ_VARARG:%.*]], align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[BAZ_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; CHECK-NEXT: store <4 x i32> splat (i32 1), ptr [[TMP0]], align 16 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics3(i32 noundef 1, ptr [[VARARG_BUFFER]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: ret i32 [[CALL]] ; entry: @@ -333,11 +333,11 @@ define dso_local void @qux() { ; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S2:%.*]], align 8 ; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[QUX_VARARG:%.*]], align 8 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[S]], ptr align 8 @__const.qux.s, i64 16, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[QUX_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0 ; CHECK-NEXT: store i64 1, ptr [[TMP0]], align 8 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics4(ptr noundef byval([[STRUCT_S2]]) align 8 [[S]], ptr [[VARARG_BUFFER]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]]) ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir b/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir index 41e2124..2796cdb 100644 --- a/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir +++ b/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir @@ -1,6 +1,12 @@ # RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu -start-after \ # RUN: virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \ # RUN: -o - | FileCheck %s +# RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu -start-after \ +# RUN: virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \ +# RUN: -o - | FileCheck %s +# RUN: llc -mcpu=pwr10 -mtriple=powerpc64le-unknown-linux-gnu -start-after \ +# RUN: virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \ +# RUN: -o - | FileCheck %s --- | ; ModuleID = 'a.ll' @@ -30,7 +36,7 @@ ; Function Attrs: nounwind declare void @llvm.stackprotector(ptr, ptr) #1 - attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+htm,+power8-vector,+vsx,-power9-vector" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind } !llvm.ident = !{!0} diff --git a/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll b/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll index 9ffb4fd..258ddf6 100644 --- a/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll +++ b/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll @@ -37,9 +37,9 @@ define signext i8 @test_chars(i8 signext %c1, i8 signext %c2, i8 signext %c3, i8 ; 32BIT: bb.0.entry: ; 32BIT-NEXT: liveins: $r3, $r4, $r5, $r6 ; 32BIT-NEXT: {{ $}} - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r4 - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r5 - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r6 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r4 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r5 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r6 ; 32BIT-NEXT: renamable $r3 = EXTSB killed renamable $r3 ; 32BIT-NEXT: BLR implicit $lr, implicit $rm, implicit $r3 ; @@ -47,9 +47,9 @@ define signext i8 @test_chars(i8 signext %c1, i8 signext %c2, i8 signext %c3, i8 ; 64BIT: bb.0.entry: ; 64BIT-NEXT: liveins: $x3, $x4, $x5, $x6 ; 64BIT-NEXT: {{ $}} - ; 64BIT-NEXT: renamable $r3 = ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3 - ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r5, implicit killed $x5 - ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r5, implicit killed $x5 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3 ; 64BIT-NEXT: renamable $x3 = EXTSB8 killed renamable $x3 ; 64BIT-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3 entry: @@ -96,9 +96,9 @@ define signext i8 @test_chars_mix(i8 signext %c1, i8 zeroext %c2, i8 zeroext %c3 ; 32BIT: bb.0.entry: ; 32BIT-NEXT: liveins: $r3, $r4, $r5, $r6 ; 32BIT-NEXT: {{ $}} - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r4 - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r5 - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r6 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r4 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r5 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r6 ; 32BIT-NEXT: renamable $r3 = EXTSB killed renamable $r3 ; 32BIT-NEXT: BLR implicit $lr, implicit $rm, implicit $r3 ; @@ -106,9 +106,9 @@ define signext i8 @test_chars_mix(i8 signext %c1, i8 zeroext %c2, i8 zeroext %c3 ; 64BIT: bb.0.entry: ; 64BIT-NEXT: liveins: $x3, $x4, $x5, $x6 ; 64BIT-NEXT: {{ $}} - ; 64BIT-NEXT: renamable $r3 = ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3 - ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r5, implicit killed $x5 - ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r5, implicit killed $x5 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3 ; 64BIT-NEXT: renamable $x3 = EXTSB8 killed renamable $x3 ; 64BIT-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3 entry: diff --git a/llvm/test/CodeGen/PowerPC/aix-nest-param.ll b/llvm/test/CodeGen/PowerPC/aix-nest-param.ll index 1863eaf..bfc7fbb 100644 --- a/llvm/test/CodeGen/PowerPC/aix-nest-param.ll +++ b/llvm/test/CodeGen/PowerPC/aix-nest-param.ll @@ -1,5 +1,5 @@ -; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s -; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s +; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s +; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s define ptr @nest_receiver(ptr nest %arg) nounwind { ret ptr %arg @@ -9,5 +9,10 @@ define ptr @nest_caller(ptr %arg) nounwind { %result = call ptr @nest_receiver(ptr nest %arg) ret ptr %result } +; CHECK-LABEL: .nest_receiver: +; CHECK: mr 3, 11 +; CHECK: blr -; CHECK: LLVM ERROR: Nest arguments are unimplemented. +; CHECK-LABEL: .nest_caller: +; CHECK: mr 11, 3 +; CHECK: bl .nest_receiver diff --git a/llvm/test/CodeGen/PowerPC/aix-trampoline.ll b/llvm/test/CodeGen/PowerPC/aix-trampoline.ll index b71f6b5..19df220 100644 --- a/llvm/test/CodeGen/PowerPC/aix-trampoline.ll +++ b/llvm/test/CodeGen/PowerPC/aix-trampoline.ll @@ -1,7 +1,7 @@ -; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s -; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s - -; CHECK: LLVM ERROR: INIT_TRAMPOLINE operation is not supported on AIX. +; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | \ +; RUN: FileCheck %s --check-prefix=32BIT +; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 -mattr=-altivec | \ +; RUN: FileCheck %s --check-prefix=64BIT define void @create_trampoline(ptr %buffer, ptr %nval) nounwind { entry: @@ -12,3 +12,17 @@ entry: declare i32 @nested(i32); declare void @llvm.init.trampoline(ptr, ptr, ptr) nounwind + +; 32BIT: stw 4, 8(3) +; 32BIT: lwz [[FuncDesc:[0-9]+]], L..C0(2) +; 32BIT-DAG: lwz [[SCRATCH1:[0-9]+]], 0([[FuncDesc]]) +; 32BIT-DAG: lwz [[SCRATCH2:[0-9]+]], 4([[FuncDesc]]) +; 32BIT-DAG: stw [[SCRATCH1]], 0(3) +; 32BIT-DAG: stw [[SCRATCH2]], 4(3) + +; 64BIT: std 4, 16(3) +; 64BIT-DAG: ld [[FuncDesc:[0-9]+]], L..C0(2) +; 64BIT-DAG: ld [[SCRATCH1:[0-9]+]], 0([[FuncDesc]]) +; 64BIT-DAG: ld [[SCRATCH2:[0-9]+]], 8([[FuncDesc]]) +; 64BIT-DAG: std [[SCRATCH1]], 0(3) +; 64BIT-DAG: std [[SCRATCH2]], 8(3) diff --git a/llvm/test/CodeGen/PowerPC/check-zero-vector.ll b/llvm/test/CodeGen/PowerPC/check-zero-vector.ll index 59173e2..d8e66d6 100644 --- a/llvm/test/CodeGen/PowerPC/check-zero-vector.ll +++ b/llvm/test/CodeGen/PowerPC/check-zero-vector.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu \ ; RUN: < %s | FileCheck %s --check-prefix=POWERPC_64LE @@ -7,240 +8,90 @@ ; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc-ibm-aix \ ; RUN: < %s | FileCheck %s --check-prefix=POWERPC_32 -define i32 @test_Greater_than(ptr %colauths, i32 signext %ncols) { -; This testcase is manually reduced to isolate the critical code blocks. -; It is designed to check for vector comparison specifically for zero vectors. -; In the vector.body section, we are expecting a comparison instruction (vcmpequh), -; merge instructions (vmrghh and vmrglh) which use exactly 2 vectors. -; The output of the merge instruction is being used by xxland and finally -; accumulated by vadduwm instruction. - +define i32 @test_Greater_than(ptr %colauths) { +; This testcase is for the special case of zero-vector comparisons. +; Currently the generated code does a comparison (vcmpequh) and then a negation (xxlnor). +; This pattern is expected to be optimized in a future patch. ; POWERPC_64LE-LABEL: test_Greater_than: -; POWERPC_64LE: .LBB0_6: # %vector.body -; POWERPC_64LE-NEXT: # -; POWERPC_64LE-NEXT: lxv [[R1:[0-9]+]], -64(4) -; POWERPC_64LE-NEXT: vcmpequh [[R2:[0-9]+]], [[R2]], [[R3:[0-9]+]] -; POWERPC_64LE-NEXT: xxlnor [[R1]], [[R1]], [[R1]] -; POWERPC_64LE-NEXT: vmrghh [[R4:[0-9]+]], [[R2]], [[R2]] -; POWERPC_64LE-NEXT: vmrglh [[R2]], [[R2]], [[R2]] -; POWERPC_64LE-NEXT: xxland [[R5:[0-9]+]], [[R5]], [[R6:[0-9]+]] -; POWERPC_64LE-NEXT: xxland [[R1]], [[R1]], [[R6]] -; POWERPC_64LE-NEXT: vadduwm [[R7:[0-9]+]], [[R7]], [[R4]] -; POWERPC_64LE: .LBB0_10: # %vec.epilog.vector.body -; POWERPC_64LE-NEXT: # -; POWERPC_64LE-NEXT: lxv [[R8:[0-9]+]], 0(4) -; POWERPC_64LE-NEXT: addi 4, 4, 16 -; POWERPC_64LE-NEXT: vcmpequh [[R9:[0-9]+]], [[R9]], [[R10:[0-9]+]] -; POWERPC_64LE-NEXT: xxlnor [[R8]], [[R8]], [[R8]] -; POWERPC_64LE-NEXT: vmrglh [[R11:[0-9]+]], [[R9]], [[R9]] -; POWERPC_64LE-NEXT: vmrghh [[R9]], [[R9]], [[R9]] -; POWERPC_64LE-NEXT: xxland [[R12:[0-9]+]], [[R12]], [[R6]] -; POWERPC_64LE-NEXT: xxland [[R8]], [[R8]], [[R6]] -; POWERPC_64LE-NEXT: vadduwm [[R7]], [[R7]], [[R9]] -; POWERPC_64LE-NEXT: vadduwm [[R3]], [[R3]], [[R11]] -; POWERPC_64LE-NEXT: bdnz .LBB0_10 -; POWERPC_64LE: blr +; POWERPC_64LE: # %bb.0: # %entry +; POWERPC_64LE-NEXT: lfd 0, 0(3) +; POWERPC_64LE-NEXT: xxlxor 35, 35, 35 +; POWERPC_64LE-NEXT: li 4, 0 +; POWERPC_64LE-NEXT: li 3, 4 +; POWERPC_64LE-NEXT: xxswapd 34, 0 +; POWERPC_64LE-NEXT: vcmpequh 2, 2, 3 +; POWERPC_64LE-NEXT: xxlnor 34, 34, 34 +; POWERPC_64LE-NEXT: vmrglh 3, 2, 2 +; POWERPC_64LE-NEXT: vextuwrx 4, 4, 2 +; POWERPC_64LE-NEXT: vextuwrx 3, 3, 3 +; POWERPC_64LE-NEXT: clrlwi 4, 4, 31 +; POWERPC_64LE-NEXT: rlwimi 4, 3, 1, 30, 30 +; POWERPC_64LE-NEXT: mfvsrwz 3, 35 +; POWERPC_64LE-NEXT: rlwimi 4, 3, 2, 29, 29 +; POWERPC_64LE-NEXT: li 3, 12 +; POWERPC_64LE-NEXT: vextuwrx 3, 3, 3 +; POWERPC_64LE-NEXT: rlwimi 4, 3, 3, 28, 28 +; POWERPC_64LE-NEXT: stb 4, -1(1) +; POWERPC_64LE-NEXT: lbz 3, -1(1) +; POWERPC_64LE-NEXT: popcntd 3, 3 +; POWERPC_64LE-NEXT: blr ; ; POWERPC_64-LABEL: test_Greater_than: -; POWERPC_64: L..BB0_6: # %vector.body -; POWERPC_64-NEXT: # -; POWERPC_64-NEXT: lxv [[R1:[0-9]+]], -64(4) -; POWERPC_64-NEXT: vcmpequh [[R2:[0-9]+]], [[R2]], [[R3:[0-9]+]] -; POWERPC_64-NEXT: xxlnor [[R1]], [[R1]], [[R1]] -; POWERPC_64-NEXT: vmrglh [[R4:[0-9]+]], [[R2]], [[R2]] -; POWERPC_64-NEXT: vmrghh [[R2]], [[R2]], [[R2]] -; POWERPC_64-NEXT: xxland [[R5:[0-9]+]], [[R5]], [[R6:[0-9]+]] -; POWERPC_64-NEXT: xxland [[R1]], [[R1]], [[R6]] -; POWERPC_64-NEXT: vadduwm [[R7:[0-9]+]], [[R7]], [[R4]] -; POWERPC_64: L..BB0_10: # %vec.epilog.vector.body -; POWERPC_64-NEXT: # -; POWERPC_64-NEXT: lxv [[R8:[0-9]+]], 0(4) -; POWERPC_64-NEXT: addi 4, 4, 16 -; POWERPC_64-NEXT: vcmpequh [[R9:[0-9]+]], [[R9]], [[R10:[0-9]+]] -; POWERPC_64-NEXT: xxlnor [[R8]], [[R8]], [[R8]] -; POWERPC_64-NEXT: vmrghh [[R11:[0-9]+]], [[R9]], [[R9]] -; POWERPC_64-NEXT: vmrglh [[R9]], [[R9]], [[R9]] -; POWERPC_64-NEXT: xxland [[R12:[0-9]+]], [[R12]], [[R6]] -; POWERPC_64-NEXT: xxland [[R8]], [[R8]], [[R6]] -; POWERPC_64-NEXT: vadduwm [[R7]], [[R7]], [[R9]] -; POWERPC_64-NEXT: vadduwm [[R3]], [[R3]], [[R11]] -; POWERPC_64-NEXT: bdnz L..BB0_10 -; POWERPC_64: blr +; POWERPC_64: # %bb.0: # %entry +; POWERPC_64-NEXT: lxsd 2, 0(3) +; POWERPC_64-NEXT: xxlxor 35, 35, 35 +; POWERPC_64-NEXT: li 4, 12 +; POWERPC_64-NEXT: li 3, 8 +; POWERPC_64-NEXT: vcmpequh 2, 2, 3 +; POWERPC_64-NEXT: xxlnor 34, 34, 34 +; POWERPC_64-NEXT: vmrghh 2, 2, 2 +; POWERPC_64-NEXT: vextuwlx 4, 4, 2 +; POWERPC_64-NEXT: vextuwlx 3, 3, 2 +; POWERPC_64-NEXT: clrlwi 4, 4, 31 +; POWERPC_64-NEXT: rlwimi 4, 3, 1, 30, 30 +; POWERPC_64-NEXT: mfvsrwz 3, 34 +; POWERPC_64-NEXT: rlwimi 4, 3, 2, 29, 29 +; POWERPC_64-NEXT: li 3, 0 +; POWERPC_64-NEXT: vextuwlx 3, 3, 2 +; POWERPC_64-NEXT: rlwimi 4, 3, 3, 28, 28 +; POWERPC_64-NEXT: stb 4, -1(1) +; POWERPC_64-NEXT: lbz 3, -1(1) +; POWERPC_64-NEXT: popcntd 3, 3 +; POWERPC_64-NEXT: blr ; ; POWERPC_32-LABEL: test_Greater_than: -; POWERPC_32: L..BB0_7: # %vector.body -; POWERPC_32-NEXT: # -; POWERPC_32-NEXT: lxv [[R1:[0-9]+]], 0(10) -; POWERPC_32-NEXT: addic [[R13:[0-9]+]], [[R13]], 64 -; POWERPC_32-NEXT: addze [[R14:[0-9]+]], [[R14]] -; POWERPC_32-NEXT: xor [[R15:[0-9]+]], [[R13]], [[R16:[0-9]+]] -; POWERPC_32-NEXT: or. [[R15]], [[R15]], [[R14]] -; POWERPC_32-NEXT: vcmpequh [[R2:[0-9]+]], [[R2]], [[R3:[0-9]+]] -; POWERPC_32-NEXT: xxlnor [[R1]], [[R1]], [[R1]] -; POWERPC_32-NEXT: vmrglh [[R4:[0-9]+]], [[R2]], [[R2]] -; POWERPC_32-NEXT: vmrghh [[R2]], [[R2]], [[R2]] -; POWERPC_32-NEXT: xxland [[R5:[0-9]+]], [[R5]], [[R6:[0-9]+]] -; POWERPC_32-NEXT: xxland [[R1]], [[R1]], [[R6]] -; POWERPC_32-NEXT: vadduwm [[R7:[0-9]+]], [[R7]], [[R4]] -; POWERPC_32: L..BB0_11: # %vec.epilog.vector.body -; POWERPC_32-NEXT: # -; POWERPC_32-NEXT: slwi [[R14]], [[R13]], 1 -; POWERPC_32-NEXT: addic [[R13]], [[R13]], 8 -; POWERPC_32-NEXT: addze [[R17:[0-9]+]], [[R17]] -; POWERPC_32-NEXT: lxvx [[R8:[0-9]+]], [[R18:[0-9]+]], [[R14]] -; POWERPC_32-NEXT: xor [[R14]], [[R13]], [[R16]] -; POWERPC_32-NEXT: or. [[R14]], [[R14]], [[R17]] -; POWERPC_32-NEXT: vcmpequh [[R9:[0-9]+]], [[R9]], [[R3]] -; POWERPC_32-NEXT: xxlnor [[R8]], [[R8]], [[R8]] -; POWERPC_32-NEXT: vmrghh [[R11:[0-9]+]], [[R9]], [[R9]] -; POWERPC_32-NEXT: vmrglh [[R9]], [[R9]], [[R9]] -; POWERPC_32-NEXT: xxland [[R12:[0-9]+]], [[R12]], [[R6]] -; POWERPC_32-NEXT: xxland [[R8]], [[R8]], [[R6]] -; POWERPC_32-NEXT: vadduwm [[R7]], [[R7]], [[R9]] -; POWERPC_32-NEXT: vadduwm [[R19:[0-9]+]], [[R19]], [[R11]] -; POWERPC_32-NEXT: bne 0, L..BB0_11 -; POWERPC_32: blr - entry: - %cmp5 = icmp sgt i32 %ncols, 0 - br i1 %cmp5, label %iter.check, label %for.cond.cleanup - -iter.check: ; preds = %entry - %wide.trip.count = zext nneg i32 %ncols to i64 - %min.iters.check = icmp ult i32 %ncols, 8 - br i1 %min.iters.check, label %for.body.preheader, label %vector.main.loop.iter.check - -for.body.preheader: ; preds = %vec.epilog.iter.check, %vec.epilog.middle.block, %iter.check - %indvars.iv.ph = phi i64 [ 0, %iter.check ], [ %n.vec, %vec.epilog.iter.check ], [ %n.vec31, %vec.epilog.middle.block ] - %num_cols_needed.06.ph = phi i32 [ 0, %iter.check ], [ %33, %vec.epilog.iter.check ], [ %40, %vec.epilog.middle.block ] - br label %for.body - -vector.main.loop.iter.check: ; preds = %iter.check - %min.iters.check9 = icmp ult i32 %ncols, 64 - br i1 %min.iters.check9, label %vec.epilog.ph, label %vector.ph - -vector.ph: ; preds = %vector.main.loop.iter.check - %n.vec = and i64 %wide.trip.count, 2147483584 - br label %vector.body - -vector.body: ; preds = %vector.body, %vector.ph - %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] - %vec.phi = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %24, %vector.body ] - %vec.phi10 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %25, %vector.body ] - %vec.phi11 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %26, %vector.body ] - %vec.phi12 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %27, %vector.body ] - %vec.phi13 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %28, %vector.body ] - %vec.phi14 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %29, %vector.body ] - %vec.phi15 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %30, %vector.body ] - %vec.phi16 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %31, %vector.body ] - %0 = getelementptr inbounds nuw i16, ptr %colauths, i64 %index - %1 = getelementptr inbounds nuw i8, ptr %0, i64 16 - %2 = getelementptr inbounds nuw i8, ptr %0, i64 32 - %3 = getelementptr inbounds nuw i8, ptr %0, i64 48 - %4 = getelementptr inbounds nuw i8, ptr %0, i64 64 - %5 = getelementptr inbounds nuw i8, ptr %0, i64 80 - %6 = getelementptr inbounds nuw i8, ptr %0, i64 96 - %7 = getelementptr inbounds nuw i8, ptr %0, i64 112 - %wide.load = load <8 x i16>, ptr %0, align 2, !tbaa !5 - %wide.load17 = load <8 x i16>, ptr %1, align 2, !tbaa !5 - %wide.load18 = load <8 x i16>, ptr %2, align 2, !tbaa !5 - %wide.load19 = load <8 x i16>, ptr %3, align 2, !tbaa !5 - %wide.load20 = load <8 x i16>, ptr %4, align 2, !tbaa !5 - %wide.load21 = load <8 x i16>, ptr %5, align 2, !tbaa !5 - %wide.load22 = load <8 x i16>, ptr %6, align 2, !tbaa !5 - %wide.load23 = load <8 x i16>, ptr %7, align 2, !tbaa !5 - %8 = icmp ne <8 x i16> %wide.load, zeroinitializer - %9 = icmp ne <8 x i16> %wide.load17, zeroinitializer - %10 = icmp ne <8 x i16> %wide.load18, zeroinitializer - %11 = icmp ne <8 x i16> %wide.load19, zeroinitializer - %12 = icmp ne <8 x i16> %wide.load20, zeroinitializer - %13 = icmp ne <8 x i16> %wide.load21, zeroinitializer - %14 = icmp ne <8 x i16> %wide.load22, zeroinitializer - %15 = icmp ne <8 x i16> %wide.load23, zeroinitializer - %16 = zext <8 x i1> %8 to <8 x i32> - %17 = zext <8 x i1> %9 to <8 x i32> - %18 = zext <8 x i1> %10 to <8 x i32> - %19 = zext <8 x i1> %11 to <8 x i32> - %20 = zext <8 x i1> %12 to <8 x i32> - %21 = zext <8 x i1> %13 to <8 x i32> - %22 = zext <8 x i1> %14 to <8 x i32> - %23 = zext <8 x i1> %15 to <8 x i32> - %24 = add <8 x i32> %vec.phi, %16 - %25 = add <8 x i32> %vec.phi10, %17 - %26 = add <8 x i32> %vec.phi11, %18 - %27 = add <8 x i32> %vec.phi12, %19 - %28 = add <8 x i32> %vec.phi13, %20 - %29 = add <8 x i32> %vec.phi14, %21 - %30 = add <8 x i32> %vec.phi15, %22 - %31 = add <8 x i32> %vec.phi16, %23 - %index.next = add nuw i64 %index, 64 - %32 = icmp eq i64 %index.next, %n.vec - br i1 %32, label %middle.block, label %vector.body, !llvm.loop !9 - -middle.block: ; preds = %vector.body - %bin.rdx = add <8 x i32> %25, %24 - %bin.rdx24 = add <8 x i32> %26, %bin.rdx - %bin.rdx25 = add <8 x i32> %27, %bin.rdx24 - %bin.rdx26 = add <8 x i32> %28, %bin.rdx25 - %bin.rdx27 = add <8 x i32> %29, %bin.rdx26 - %bin.rdx28 = add <8 x i32> %30, %bin.rdx27 - %bin.rdx29 = add <8 x i32> %31, %bin.rdx28 - %33 = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %bin.rdx29) - %cmp.n = icmp eq i64 %n.vec, %wide.trip.count - br i1 %cmp.n, label %for.cond.cleanup, label %vec.epilog.iter.check - -vec.epilog.iter.check: ; preds = %middle.block - %n.vec.remaining = and i64 %wide.trip.count, 56 - %min.epilog.iters.check = icmp eq i64 %n.vec.remaining, 0 - br i1 %min.epilog.iters.check, label %for.body.preheader, label %vec.epilog.ph - -vec.epilog.ph: ; preds = %vec.epilog.iter.check, %vector.main.loop.iter.check - %vec.epilog.resume.val = phi i64 [ %n.vec, %vec.epilog.iter.check ], [ 0, %vector.main.loop.iter.check ] - %bc.merge.rdx = phi i32 [ %33, %vec.epilog.iter.check ], [ 0, %vector.main.loop.iter.check ] - %n.vec31 = and i64 %wide.trip.count, 2147483640 - %34 = insertelement <8 x i32> <i32 poison, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, i32 %bc.merge.rdx, i64 0 - br label %vec.epilog.vector.body - -vec.epilog.vector.body: ; preds = %vec.epilog.vector.body, %vec.epilog.ph - %index32 = phi i64 [ %vec.epilog.resume.val, %vec.epilog.ph ], [ %index.next35, %vec.epilog.vector.body ] - %vec.phi33 = phi <8 x i32> [ %34, %vec.epilog.ph ], [ %38, %vec.epilog.vector.body ] - %35 = getelementptr inbounds nuw i16, ptr %colauths, i64 %index32 - %wide.load34 = load <8 x i16>, ptr %35, align 2, !tbaa !5 - %36 = icmp ne <8 x i16> %wide.load34, zeroinitializer - %37 = zext <8 x i1> %36 to <8 x i32> - %38 = add <8 x i32> %vec.phi33, %37 - %index.next35 = add nuw i64 %index32, 8 - %39 = icmp eq i64 %index.next35, %n.vec31 - br i1 %39, label %vec.epilog.middle.block, label %vec.epilog.vector.body, !llvm.loop !13 - -vec.epilog.middle.block: ; preds = %vec.epilog.vector.body - %40 = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %38) - %cmp.n36 = icmp eq i64 %n.vec31, %wide.trip.count - br i1 %cmp.n36, label %for.cond.cleanup, label %for.body.preheader - -for.cond.cleanup: ; preds = %for.body, %middle.block, %vec.epilog.middle.block, %entry - %num_cols_needed.0.lcssa = phi i32 [ 0, %entry ], [ %33, %middle.block ], [ %40, %vec.epilog.middle.block ], [ %spec.select, %for.body ] - ret i32 %num_cols_needed.0.lcssa - -for.body: ; preds = %for.body.preheader, %for.body - %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] - %num_cols_needed.06 = phi i32 [ %spec.select, %for.body ], [ %num_cols_needed.06.ph, %for.body.preheader ] - %arrayidx = getelementptr inbounds nuw i16, ptr %colauths, i64 %indvars.iv - %41 = load i16, ptr %arrayidx, align 2, !tbaa !5 - %tobool.not = icmp ne i16 %41, 0 - %inc = zext i1 %tobool.not to i32 - %spec.select = add nuw nsw i32 %num_cols_needed.06, %inc - %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 - %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count - br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !14 +; POWERPC_32: # %bb.0: # %entry +; POWERPC_32-NEXT: li 4, 4 +; POWERPC_32-NEXT: lxvwsx 1, 0, 3 +; POWERPC_32-NEXT: xxlxor 35, 35, 35 +; POWERPC_32-NEXT: lxvwsx 0, 3, 4 +; POWERPC_32-NEXT: xxmrghw 34, 1, 0 +; POWERPC_32-NEXT: vcmpequh 2, 2, 3 +; POWERPC_32-NEXT: xxlnor 34, 34, 34 +; POWERPC_32-NEXT: vmrghh 2, 2, 2 +; POWERPC_32-NEXT: stxv 34, -32(1) +; POWERPC_32-NEXT: lwz 3, -20(1) +; POWERPC_32-NEXT: lwz 4, -24(1) +; POWERPC_32-NEXT: clrlwi 3, 3, 31 +; POWERPC_32-NEXT: rlwimi 3, 4, 1, 30, 30 +; POWERPC_32-NEXT: lwz 4, -28(1) +; POWERPC_32-NEXT: rlwimi 3, 4, 2, 29, 29 +; POWERPC_32-NEXT: lwz 4, -32(1) +; POWERPC_32-NEXT: rlwimi 3, 4, 3, 28, 28 +; POWERPC_32-NEXT: popcntw 3, 3 +; POWERPC_32-NEXT: blr +entry: + %0 = load <4 x i16>, ptr %colauths, align 2, !tbaa !5 + %1 = icmp ne <4 x i16> %0, zeroinitializer + %2 = bitcast <4 x i1> %1 to i4 + %3 = tail call range(i4 0, 5) i4 @llvm.ctpop.i4(i4 %2) + %4 = zext nneg i4 %3 to i32 + ret i32 %4 } +declare i4 @llvm.ctpop.i4(i4) #1 + !5 = !{!6, !6, i64 0} !6 = !{!"short", !7, i64 0} !7 = !{!"omnipotent char", !8, i64 0} !8 = !{!"Simple C/C++ TBAA"} -!9 = distinct !{!9, !10, !11, !12} -!10 = !{!"llvm.loop.mustprogress"} -!11 = !{!"llvm.loop.isvectorized", i32 1} -!12 = !{!"llvm.loop.unroll.runtime.disable"} -!13 = distinct !{!13, !10, !11, !12} -!14 = distinct !{!14, !10, !12, !11} diff --git a/llvm/test/CodeGen/PowerPC/memintr32.ll b/llvm/test/CodeGen/PowerPC/memintr32.ll index c07a5af..4f0a996 100644 --- a/llvm/test/CodeGen/PowerPC/memintr32.ll +++ b/llvm/test/CodeGen/PowerPC/memintr32.ll @@ -11,7 +11,7 @@ define i32 @memcmp_test(ptr nocapture noundef readonly %ptr1, ptr nocapture noun ; CHECK-AIX-32-P9-NEXT: mflr r0 ; CHECK-AIX-32-P9-NEXT: stwu r1, -64(r1) ; CHECK-AIX-32-P9-NEXT: stw r0, 72(r1) -; CHECK-AIX-32-P9-NEXT: bl .memcmp[PR] +; CHECK-AIX-32-P9-NEXT: bl .___memcmp[PR] ; CHECK-AIX-32-P9-NEXT: nop ; CHECK-AIX-32-P9-NEXT: addi r1, r1, 64 ; CHECK-AIX-32-P9-NEXT: lwz r0, 8(r1) diff --git a/llvm/test/CodeGen/PowerPC/memintr64.ll b/llvm/test/CodeGen/PowerPC/memintr64.ll index b3a6650..0b0e556 100644 --- a/llvm/test/CodeGen/PowerPC/memintr64.ll +++ b/llvm/test/CodeGen/PowerPC/memintr64.ll @@ -39,7 +39,7 @@ define noundef i32 @_Z11memcmp_testPKvS0_m(ptr noundef readonly captures(none) % ; CHECK-AIX-64-P9-NEXT: mflr r0 ; CHECK-AIX-64-P9-NEXT: stdu r1, -112(r1) ; CHECK-AIX-64-P9-NEXT: std r0, 128(r1) -; CHECK-AIX-64-P9-NEXT: bl .memcmp[PR] +; CHECK-AIX-64-P9-NEXT: bl .___memcmp64[PR] ; CHECK-AIX-64-P9-NEXT: nop ; CHECK-AIX-64-P9-NEXT: addi r1, r1, 112 ; CHECK-AIX-64-P9-NEXT: ld r0, 16(r1) diff --git a/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll b/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll index 232014d..a9503f7 100644 --- a/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll +++ b/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll @@ -2,22 +2,87 @@ ; Verify whether the generated assembly for the following function includes the mtvsrbmi instruction. ; vector unsigned char v00FF() ; { -; vector unsigned char x = { 0xFF, 0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 }; -; return x; +; vector unsigned char x = { 0xFF, 0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 }; +; return x; +; } +; vector unsigned short short00FF() +; { +; vector unsigned short x = { 0xFF, 0,0,0, 0,0,0,0}; +; return x; +; } +; vector unsigned int int00FF() +; { +; vector unsigned int x = { 0xFF, 0,0,0}; +; return x; +; } +; vector unsigned long long longlong00FF() +; { +; vector unsigned long long x = { 0xFF, 0}; +; return x; ; } ; RUN: llc < %s -ppc-asm-full-reg-names -mtriple=powerpc-ibm-aix -mcpu=pwr10 -verify-machineinstrs \ -; RUN: | FileCheck %s --check-prefix=CHECK +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-BE + +; RUN: llc < %s -ppc-asm-full-reg-names -mtriple=powerpc64le-unknown-gnu-linux -mcpu=pwr10 -verify-machineinstrs \ +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-LE + +; CHECK-NOT: .byte 255 +; CHECK-NOT: .byte 0 define dso_local noundef range(i8 -1, 1) <16 x i8> @_Z5v00FFv() { -; CHECK-NOT: L..CPI0_0: -; CHECK-NOT: .byte 255 # 0xff -; CHECK-NOT: .byte 0 # 0x0 - -; CHECK-LABEL: _Z5v00FFv: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: mtvsrbmi v2, 1 -; CHECK-NEXT: blr +; CHECK-BE-LABEL: _Z5v00FFv: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: mtvsrbmi v2, 32768 +; CHECK-BE-NEXT: blr +; +; CHECK-LE-LABEL: _Z5v00FFv: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: mtvsrbmi v2, 1 +; CHECK-LE-NEXT: blr + entry: ret <16 x i8> <i8 -1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0> } + +define dso_local noundef range(i16 0, 256) <8 x i16> @_Z9short00FFv() { +; CHECK-BE-LABEL: _Z9short00FFv: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: mtvsrbmi v2, 16384 +; CHECK-BE-NEXT: blr +; +; CHECK-LE-LABEL: _Z9short00FFv: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: mtvsrbmi v2, 1 +; CHECK-LE-NEXT: blr +entry: + ret <8 x i16> <i16 255, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0> +} + +define dso_local noundef range(i32 0, 256) <4 x i32> @_Z7int00FFv() { +; CHECK-BE-LABEL: _Z7int00FFv: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: mtvsrbmi v2, 4096 +; CHECK-BE-NEXT: blr +; +; CHECK-LE-LABEL: _Z7int00FFv: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: mtvsrbmi v2, 1 +; CHECK-LE-NEXT: blr +entry: + ret <4 x i32> <i32 255, i32 0, i32 0, i32 0> +} + +define dso_local noundef range(i64 0, 256) <2 x i64> @_Z12longlong00FFv() { +; CHECK-BE-LABEL: _Z12longlong00FFv: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: mtvsrbmi v2, 256 +; CHECK-BE-NEXT: blr +; +; CHECK-LE-LABEL: _Z12longlong00FFv: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: mtvsrbmi v2, 1 +; CHECK-LE-NEXT: blr +entry: + ret <2 x i64> <i64 255, i64 0> +} diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll index b94665b..fb53921 100644 --- a/llvm/test/CodeGen/RISCV/features-info.ll +++ b/llvm/test/CodeGen/RISCV/features-info.ll @@ -6,13 +6,21 @@ ; CHECK-NEXT: 32bit - Implements RV32. ; CHECK-NEXT: 64bit - Implements RV64. ; CHECK-NEXT: a - 'A' (Atomic Instructions). +; CHECK-NEXT: add-load-fusion - Enable ADD(.UW) + load macrofusion. +; CHECK-NEXT: addi-load-fusion - Enable ADDI + load macrofusion. ; CHECK-NEXT: andes45 - Andes 45-Series processors. ; CHECK-NEXT: auipc-addi-fusion - Enable AUIPC+ADDI macrofusion. +; CHECK-NEXT: auipc-load-fusion - Enable AUIPC + load macrofusion. ; CHECK-NEXT: b - 'B' (the collection of the Zba, Zbb, Zbs extensions). +; CHECK-NEXT: bfext-fusion - Enable SLLI+SRLI (bitfield extract) macrofusion. ; CHECK-NEXT: c - 'C' (Compressed Instructions). ; CHECK-NEXT: conditional-cmv-fusion - Enable branch+c.mv fusion. ; CHECK-NEXT: d - 'D' (Double-Precision Floating-Point). ; CHECK-NEXT: disable-latency-sched-heuristic - Disable latency scheduling heuristic. +; CHECK-NEXT: disable-misched-load-clustering - Disable load clustering in the machine scheduler. +; CHECK-NEXT: disable-misched-store-clustering - Disable store clustering in the machine scheduler. +; CHECK-NEXT: disable-postmisched-load-clustering - Disable PostRA load clustering in the machine scheduler. +; CHECK-NEXT: disable-postmisched-store-clustering - Disable PostRA store clustering in the machine scheduler. ; CHECK-NEXT: dlen-factor-2 - Vector unit DLEN(data path width) is half of VLEN. ; CHECK-NEXT: e - 'E' (Embedded Instruction Set with 16 GPRs). ; CHECK-NEXT: exact-asm - Enable Exact Assembly (Disables Compression and Relaxation). @@ -58,6 +66,7 @@ ; CHECK-NEXT: ld-add-fusion - Enable LD+ADD macrofusion. ; CHECK-NEXT: log-vrgather - Has vrgather.vv with LMUL*log2(LMUL) latency ; CHECK-NEXT: lui-addi-fusion - Enable LUI+ADDI macro fusion. +; CHECK-NEXT: lui-load-fusion - Enable LUI + load macrofusion. ; CHECK-NEXT: m - 'M' (Integer Multiplication and Division). ; CHECK-NEXT: mips-p8700 - MIPS p8700 processor. ; CHECK-NEXT: no-default-unroll - Disable default unroll preference.. @@ -130,6 +139,7 @@ ; CHECK-NEXT: shvsatpa - 'Shvsatpa' (vsatp supports all modes supported by satp). ; CHECK-NEXT: shvstvala - 'Shvstvala' (vstval provides all needed values). ; CHECK-NEXT: shvstvecd - 'Shvstvecd' (vstvec supports Direct mode). +; CHECK-NEXT: shxadd-load-fusion - Enable SH(1|2|3)ADD(.UW) + load macrofusion. ; CHECK-NEXT: sifive7 - SiFive 7-Series processors. ; CHECK-NEXT: smaia - 'Smaia' (Advanced Interrupt Architecture Machine Level). ; CHECK-NEXT: smcdeleg - 'Smcdeleg' (Counter Delegation Machine Level). diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll index facb544..0c152e6 100644 --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -2262,12 +2262,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32IZHINX-NEXT: addi a2, a3, -1 ; RV32IZHINX-NEXT: .LBB10_4: # %start ; RV32IZHINX-NEXT: feq.s a3, s0, s0 -; RV32IZHINX-NEXT: neg a4, a1 -; RV32IZHINX-NEXT: neg a1, s1 +; RV32IZHINX-NEXT: neg a4, s1 +; RV32IZHINX-NEXT: neg a5, a1 ; RV32IZHINX-NEXT: neg a3, a3 -; RV32IZHINX-NEXT: and a0, a1, a0 +; RV32IZHINX-NEXT: and a0, a4, a0 ; RV32IZHINX-NEXT: and a1, a3, a2 -; RV32IZHINX-NEXT: or a0, a4, a0 +; RV32IZHINX-NEXT: or a0, a5, a0 ; RV32IZHINX-NEXT: and a0, a3, a0 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -2309,12 +2309,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32IZDINXZHINX-NEXT: addi a2, a3, -1 ; RV32IZDINXZHINX-NEXT: .LBB10_4: # %start ; RV32IZDINXZHINX-NEXT: feq.s a3, s0, s0 -; RV32IZDINXZHINX-NEXT: neg a4, a1 -; RV32IZDINXZHINX-NEXT: neg a1, s1 +; RV32IZDINXZHINX-NEXT: neg a4, s1 +; RV32IZDINXZHINX-NEXT: neg a5, a1 ; RV32IZDINXZHINX-NEXT: neg a3, a3 -; RV32IZDINXZHINX-NEXT: and a0, a1, a0 +; RV32IZDINXZHINX-NEXT: and a0, a4, a0 ; RV32IZDINXZHINX-NEXT: and a1, a3, a2 -; RV32IZDINXZHINX-NEXT: or a0, a4, a0 +; RV32IZDINXZHINX-NEXT: or a0, a5, a0 ; RV32IZDINXZHINX-NEXT: and a0, a3, a0 ; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZDINXZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -2653,12 +2653,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; CHECK32-IZHINXMIN-NEXT: addi a2, a3, -1 ; CHECK32-IZHINXMIN-NEXT: .LBB10_4: # %start ; CHECK32-IZHINXMIN-NEXT: feq.s a3, s0, s0 -; CHECK32-IZHINXMIN-NEXT: neg a4, a1 -; CHECK32-IZHINXMIN-NEXT: neg a1, s1 +; CHECK32-IZHINXMIN-NEXT: neg a4, s1 +; CHECK32-IZHINXMIN-NEXT: neg a5, a1 ; CHECK32-IZHINXMIN-NEXT: neg a3, a3 -; CHECK32-IZHINXMIN-NEXT: and a0, a1, a0 +; CHECK32-IZHINXMIN-NEXT: and a0, a4, a0 ; CHECK32-IZHINXMIN-NEXT: and a1, a3, a2 -; CHECK32-IZHINXMIN-NEXT: or a0, a4, a0 +; CHECK32-IZHINXMIN-NEXT: or a0, a5, a0 ; CHECK32-IZHINXMIN-NEXT: and a0, a3, a0 ; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; CHECK32-IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -2701,12 +2701,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; CHECK32-IZDINXZHINXMIN-NEXT: addi a2, a3, -1 ; CHECK32-IZDINXZHINXMIN-NEXT: .LBB10_4: # %start ; CHECK32-IZDINXZHINXMIN-NEXT: feq.s a3, s0, s0 -; CHECK32-IZDINXZHINXMIN-NEXT: neg a4, a1 -; CHECK32-IZDINXZHINXMIN-NEXT: neg a1, s1 +; CHECK32-IZDINXZHINXMIN-NEXT: neg a4, s1 +; CHECK32-IZDINXZHINXMIN-NEXT: neg a5, a1 ; CHECK32-IZDINXZHINXMIN-NEXT: neg a3, a3 -; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a1, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a4, a0 ; CHECK32-IZDINXZHINXMIN-NEXT: and a1, a3, a2 -; CHECK32-IZDINXZHINXMIN-NEXT: or a0, a4, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: or a0, a5, a0 ; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a3, a0 ; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; CHECK32-IZDINXZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -2972,18 +2972,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill -; RV32IZHINX-NEXT: fcvt.s.h a0, a0 -; RV32IZHINX-NEXT: lui a1, 391168 -; RV32IZHINX-NEXT: addi a1, a1, -1 -; RV32IZHINX-NEXT: fle.s a2, zero, a0 -; RV32IZHINX-NEXT: flt.s a1, a1, a0 -; RV32IZHINX-NEXT: neg s0, a1 -; RV32IZHINX-NEXT: neg s1, a2 +; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: fle.s a0, zero, s0 +; RV32IZHINX-NEXT: neg s1, a0 +; RV32IZHINX-NEXT: mv a0, s0 ; RV32IZHINX-NEXT: call __fixunssfdi ; RV32IZHINX-NEXT: and a0, s1, a0 +; RV32IZHINX-NEXT: lui a2, 391168 ; RV32IZHINX-NEXT: and a1, s1, a1 -; RV32IZHINX-NEXT: or a0, s0, a0 -; RV32IZHINX-NEXT: or a1, s0, a1 +; RV32IZHINX-NEXT: addi a2, a2, -1 +; RV32IZHINX-NEXT: flt.s a2, a2, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: or a1, a2, a1 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -3005,18 +3006,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZDINXZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IZDINXZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill -; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 -; RV32IZDINXZHINX-NEXT: lui a1, 391168 -; RV32IZDINXZHINX-NEXT: addi a1, a1, -1 -; RV32IZDINXZHINX-NEXT: fle.s a2, zero, a0 -; RV32IZDINXZHINX-NEXT: flt.s a1, a1, a0 -; RV32IZDINXZHINX-NEXT: neg s0, a1 -; RV32IZDINXZHINX-NEXT: neg s1, a2 +; RV32IZDINXZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZDINXZHINX-NEXT: fle.s a0, zero, s0 +; RV32IZDINXZHINX-NEXT: neg s1, a0 +; RV32IZDINXZHINX-NEXT: mv a0, s0 ; RV32IZDINXZHINX-NEXT: call __fixunssfdi ; RV32IZDINXZHINX-NEXT: and a0, s1, a0 +; RV32IZDINXZHINX-NEXT: lui a2, 391168 ; RV32IZDINXZHINX-NEXT: and a1, s1, a1 -; RV32IZDINXZHINX-NEXT: or a0, s0, a0 -; RV32IZDINXZHINX-NEXT: or a1, s0, a1 +; RV32IZDINXZHINX-NEXT: addi a2, a2, -1 +; RV32IZDINXZHINX-NEXT: flt.s a2, a2, s0 +; RV32IZDINXZHINX-NEXT: neg a2, a2 +; RV32IZDINXZHINX-NEXT: or a0, a2, a0 +; RV32IZDINXZHINX-NEXT: or a1, a2, a1 ; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZDINXZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IZDINXZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -3217,18 +3219,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; CHECK32-IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; CHECK32-IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill -; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 -; CHECK32-IZHINXMIN-NEXT: lui a1, 391168 -; CHECK32-IZHINXMIN-NEXT: addi a1, a1, -1 -; CHECK32-IZHINXMIN-NEXT: fle.s a2, zero, a0 -; CHECK32-IZHINXMIN-NEXT: flt.s a1, a1, a0 -; CHECK32-IZHINXMIN-NEXT: neg s0, a1 -; CHECK32-IZHINXMIN-NEXT: neg s1, a2 +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; CHECK32-IZHINXMIN-NEXT: fle.s a0, zero, s0 +; CHECK32-IZHINXMIN-NEXT: neg s1, a0 +; CHECK32-IZHINXMIN-NEXT: mv a0, s0 ; CHECK32-IZHINXMIN-NEXT: call __fixunssfdi ; CHECK32-IZHINXMIN-NEXT: and a0, s1, a0 +; CHECK32-IZHINXMIN-NEXT: lui a2, 391168 ; CHECK32-IZHINXMIN-NEXT: and a1, s1, a1 -; CHECK32-IZHINXMIN-NEXT: or a0, s0, a0 -; CHECK32-IZHINXMIN-NEXT: or a1, s0, a1 +; CHECK32-IZHINXMIN-NEXT: addi a2, a2, -1 +; CHECK32-IZHINXMIN-NEXT: flt.s a2, a2, s0 +; CHECK32-IZHINXMIN-NEXT: neg a2, a2 +; CHECK32-IZHINXMIN-NEXT: or a0, a2, a0 +; CHECK32-IZHINXMIN-NEXT: or a1, a2, a1 ; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; CHECK32-IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; CHECK32-IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -3251,18 +3254,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; CHECK32-IZDINXZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; CHECK32-IZDINXZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill -; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 -; CHECK32-IZDINXZHINXMIN-NEXT: lui a1, 391168 -; CHECK32-IZDINXZHINXMIN-NEXT: addi a1, a1, -1 -; CHECK32-IZDINXZHINXMIN-NEXT: fle.s a2, zero, a0 -; CHECK32-IZDINXZHINXMIN-NEXT: flt.s a1, a1, a0 -; CHECK32-IZDINXZHINXMIN-NEXT: neg s0, a1 -; CHECK32-IZDINXZHINXMIN-NEXT: neg s1, a2 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h s0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fle.s a0, zero, s0 +; CHECK32-IZDINXZHINXMIN-NEXT: neg s1, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: mv a0, s0 ; CHECK32-IZDINXZHINXMIN-NEXT: call __fixunssfdi ; CHECK32-IZDINXZHINXMIN-NEXT: and a0, s1, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: lui a2, 391168 ; CHECK32-IZDINXZHINXMIN-NEXT: and a1, s1, a1 -; CHECK32-IZDINXZHINXMIN-NEXT: or a0, s0, a0 -; CHECK32-IZDINXZHINXMIN-NEXT: or a1, s0, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: addi a2, a2, -1 +; CHECK32-IZDINXZHINXMIN-NEXT: flt.s a2, a2, s0 +; CHECK32-IZDINXZHINXMIN-NEXT: neg a2, a2 +; CHECK32-IZDINXZHINXMIN-NEXT: or a0, a2, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: or a1, a2, a1 ; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; CHECK32-IZDINXZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; CHECK32-IZDINXZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/macro-fusions.mir b/llvm/test/CodeGen/RISCV/macro-fusions.mir index 1346414..ae5b52d 100644 --- a/llvm/test/CodeGen/RISCV/macro-fusions.mir +++ b/llvm/test/CodeGen/RISCV/macro-fusions.mir @@ -2,7 +2,12 @@ # RUN: llc -mtriple=riscv64-linux-gnu -x=mir < %s \ # RUN: -debug-only=machine-scheduler -start-before=machine-scheduler 2>&1 \ # RUN: -mattr=+lui-addi-fusion,+auipc-addi-fusion,+zexth-fusion,+zextw-fusion,+shifted-zextw-fusion,+ld-add-fusion \ +# RUN: -mattr=+add-load-fusion,+auipc-load-fusion,+lui-load-fusion,+addi-load-fusion \ +# RUN: -mattr=+zba,+shxadd-load-fusion \ # RUN: | FileCheck %s +# RUN: llc -mtriple=riscv64-linux-gnu -x=mir < %s \ +# RUN: -debug-only=machine-scheduler -start-before=machine-scheduler 2>&1 \ +# RUN: -mattr=+zba,+bfext-fusion | FileCheck --check-prefixes=CHECK-BFEXT %s # CHECK: lui_addi:%bb.0 # CHECK: Macro fuse: {{.*}}LUI - ADDI @@ -174,3 +179,1374 @@ body: | $x11 = COPY %5 PseudoRET ... + +# CHECK: add_lb +# CHECK: Macro fuse: {{.*}}ADD - LB +--- +name: add_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: add_lh +# CHECK: Macro fuse: {{.*}}ADD - LH +--- +name: add_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: add_lw +# CHECK: Macro fuse: {{.*}}ADD - LW +--- +name: add_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: add_lbu +# CHECK: Macro fuse: {{.*}}ADD - LBU +--- +name: add_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: add_lhu +# CHECK: Macro fuse: {{.*}}ADD - LHU +--- +name: add_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: add_lwu +# CHECK: Macro fuse: {{.*}}ADD - LWU +--- +name: add_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: auipc_lb +# CHECK: Macro fuse: {{.*}}AUIPC - LB +--- +name: auipc_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LB %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_lh +# CHECK: Macro fuse: {{.*}}AUIPC - LH +--- +name: auipc_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LH %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_lw +# CHECK: Macro fuse: {{.*}}AUIPC - LW +--- +name: auipc_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LW %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_ld +# CHECK: Macro fuse: {{.*}}AUIPC - LD +--- +name: auipc_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LD %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_lbu +# CHECK: Macro fuse: {{.*}}AUIPC - LBU +--- +name: auipc_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LBU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_lhu +# CHECK: Macro fuse: {{.*}}AUIPC - LHU +--- +name: auipc_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LHU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_lwu +# CHECK: Macro fuse: {{.*}}AUIPC - LWU +--- +name: auipc_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LWU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lb +# CHECK: Macro fuse: {{.*}}LUI - LB +--- +name: lui_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LB %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lh +# CHECK: Macro fuse: {{.*}}LUI - LH +--- +name: lui_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LH %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lw +# CHECK: Macro fuse: {{.*}}LUI - LW +--- +name: lui_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LW %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_ld +# CHECK: Macro fuse: {{.*}}LUI - LD +--- +name: lui_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LD %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lbu +# CHECK: Macro fuse: {{.*}}LUI - LBU +--- +name: lui_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LBU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lhu +# CHECK: Macro fuse: {{.*}}LUI - LHU +--- +name: lui_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LHU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lwu +# CHECK: Macro fuse: {{.*}}LUI - LWU +--- +name: lui_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LWU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK-BFEXT: bitfield_extract +# CHECK-BFEXT: Macro fuse: {{.*}}SLLI - SRLI +--- +name: bitfield_extract +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = SLLI %1, 31 + %3:gpr = XORI %1, 3 + %4:gpr = SRLI %2, 48 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: addi_lb +# CHECK: Macro fuse: {{.*}}ADDI - LB +--- +name: addi_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_lh +# CHECK: Macro fuse: {{.*}}ADDI - LH +--- +name: addi_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_lw +# CHECK: Macro fuse: {{.*}}ADDI - LW +--- +name: addi_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_ld +# CHECK: Macro fuse: {{.*}}ADDI - LD +--- +name: addi_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_lbu +# CHECK: Macro fuse: {{.*}}ADDI - LBU +--- +name: addi_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_lhu +# CHECK: Macro fuse: {{.*}}ADDI - LHU +--- +name: addi_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_lwu +# CHECK: Macro fuse: {{.*}}ADDI - LWU +--- +name: addi_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lb +# CHECK: Macro fuse: {{.*}}ADD_UW - LB +--- +name: adduw_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lh +# CHECK: Macro fuse: {{.*}}ADD_UW - LH +--- +name: adduw_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lw +# CHECK: Macro fuse: {{.*}}ADD_UW - LW +--- +name: adduw_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_ld +# CHECK: Macro fuse: {{.*}}ADD_UW - LD +--- +name: adduw_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lbu +# CHECK: Macro fuse: {{.*}}ADD_UW - LBU +--- +name: adduw_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lhu +# CHECK: Macro fuse: {{.*}}ADD_UW - LHU +--- +name: adduw_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lwu +# CHECK: Macro fuse: {{.*}}ADD_UW - LWU +--- +name: adduw_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lb +# CHECK: Macro fuse: {{.*}}SH1ADD - LB +--- +name: sh1add_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lb +# CHECK: Macro fuse: {{.*}}SH2ADD - LB +--- +name: sh2add_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lb +# CHECK: Macro fuse: {{.*}}SH3ADD - LB +--- +name: sh3add_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lh +# CHECK: Macro fuse: {{.*}}SH1ADD - LH +--- +name: sh1add_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lh +# CHECK: Macro fuse: {{.*}}SH2ADD - LH +--- +name: sh2add_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lh +# CHECK: Macro fuse: {{.*}}SH3ADD - LH +--- +name: sh3add_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lw +# CHECK: Macro fuse: {{.*}}SH1ADD - LW +--- +name: sh1add_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lw +# CHECK: Macro fuse: {{.*}}SH2ADD - LW +--- +name: sh2add_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lw +# CHECK: Macro fuse: {{.*}}SH3ADD - LW +--- +name: sh3add_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_ld +# CHECK: Macro fuse: {{.*}}SH1ADD - LD +--- +name: sh1add_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_ld +# CHECK: Macro fuse: {{.*}}SH2ADD - LD +--- +name: sh2add_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_ld +# CHECK: Macro fuse: {{.*}}SH3ADD - LD +--- +name: sh3add_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lbu +# CHECK: Macro fuse: {{.*}}SH1ADD - LBU +--- +name: sh1add_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lbu +# CHECK: Macro fuse: {{.*}}SH2ADD - LBU +--- +name: sh2add_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lbu +# CHECK: Macro fuse: {{.*}}SH3ADD - LBU +--- +name: sh3add_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lhu +# CHECK: Macro fuse: {{.*}}SH1ADD - LHU +--- +name: sh1add_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lhu +# CHECK: Macro fuse: {{.*}}SH2ADD - LHU +--- +name: sh2add_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lhu +# CHECK: Macro fuse: {{.*}}SH3ADD - LHU +--- +name: sh3add_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lwu +# CHECK: Macro fuse: {{.*}}SH1ADD - LWU +--- +name: sh1add_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lwu +# CHECK: Macro fuse: {{.*}}SH2ADD - LWU +--- +name: sh2add_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lwu +# CHECK: Macro fuse: {{.*}}SH3ADD - LWU +--- +name: sh3add_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lb +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LB +--- +name: sh1adduw_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lb +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LB +--- +name: sh2adduw_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lb +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LB +--- +name: sh3adduw_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lh +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LH +--- +name: sh1adduw_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lh +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LH +--- +name: sh2adduw_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lh +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LH +--- +name: sh3adduw_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lw +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LW +--- +name: sh1adduw_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lw +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LW +--- +name: sh2adduw_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lw +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LW +--- +name: sh3adduw_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_ld +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LD +--- +name: sh1adduw_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_ld +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LD +--- +name: sh2adduw_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_ld +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LD +--- +name: sh3adduw_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lbu +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LBU +--- +name: sh1adduw_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lbu +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LBU +--- +name: sh2adduw_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lbu +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LBU +--- +name: sh3adduw_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lhu +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LHU +--- +name: sh1adduw_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lhu +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LHU +--- +name: sh2adduw_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lhu +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LHU +--- +name: sh3adduw_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lwu +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LWU +--- +name: sh1adduw_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lwu +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LWU +--- +name: sh2adduw_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lwu +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LWU +--- +name: sh3adduw_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... diff --git a/llvm/test/CodeGen/RISCV/misched-load-clustering.ll b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll index 160f0ae..abdc1ba 100644 --- a/llvm/test/CodeGen/RISCV/misched-load-clustering.ll +++ b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll @@ -1,17 +1,42 @@ ; REQUIRES: asserts -; RUN: llc -mtriple=riscv32 -verify-misched -riscv-misched-load-store-clustering=false \ +; +; Disable all misched clustering +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \ ; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ ; RUN: | FileCheck -check-prefix=NOCLUSTER %s -; RUN: llc -mtriple=riscv64 -verify-misched -riscv-misched-load-store-clustering=false \ +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \ ; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ ; RUN: | FileCheck -check-prefix=NOCLUSTER %s +; +; ST misched clustering only +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=STCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=STCLUSTER %s +; +; LD misched clustering only ; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-store-clustering \ ; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ ; RUN: | FileCheck -check-prefix=LDCLUSTER %s ; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-store-clustering \ ; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ ; RUN: | FileCheck -check-prefix=LDCLUSTER %s - +; +; Default misched cluster settings (i.e. both LD and ST clustering) +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s define i32 @load_clustering_1(ptr nocapture %p) { ; NOCLUSTER: ********** MI Scheduling ********** @@ -22,6 +47,14 @@ define i32 @load_clustering_1(ptr nocapture %p) { ; NOCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4 ; NOCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16 ; +; STCLUSTER: ********** MI Scheduling ********** +; STCLUSTER-LABEL: load_clustering_1:%bb.0 +; STCLUSTER: *** Final schedule for %bb.0 *** +; STCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12 +; STCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8 +; STCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4 +; STCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16 +; ; LDCLUSTER: ********** MI Scheduling ********** ; LDCLUSTER-LABEL: load_clustering_1:%bb.0 ; LDCLUSTER: *** Final schedule for %bb.0 *** @@ -29,6 +62,14 @@ define i32 @load_clustering_1(ptr nocapture %p) { ; LDCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8 ; LDCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12 ; LDCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16 +; +; DEFAULTCLUSTER: ********** MI Scheduling ********** +; DEFAULTCLUSTER-LABEL: load_clustering_1:%bb.0 +; DEFAULTCLUSTER: *** Final schedule for %bb.0 *** +; DEFAULTCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4 +; DEFAULTCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8 +; DEFAULTCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12 +; DEFAULTCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16 entry: %arrayidx0 = getelementptr inbounds i32, ptr %p, i32 3 %val0 = load i32, ptr %arrayidx0 diff --git a/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir b/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir index 21398d3..01960f9 100644 --- a/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir +++ b/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir @@ -1,10 +1,12 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 # RUN: llc -mtriple=riscv64 -x mir -mcpu=sifive-p470 -verify-misched -enable-post-misched=false \ -# RUN: -riscv-postmisched-load-store-clustering=false -debug-only=machine-scheduler \ +# RUN: -mattr=+disable-postmisched-load-clustering \ +# RUN: -mattr=+disable-postmisched-store-clustering -debug-only=machine-scheduler \ # RUN: -start-before=machine-scheduler -stop-after=postmisched -misched-regpressure=false -o - 2>&1 < %s \ # RUN: | FileCheck -check-prefix=NOPOSTMISCHED %s # RUN: llc -mtriple=riscv64 -x mir -mcpu=sifive-p470 -mattr=+use-postra-scheduler -verify-misched -enable-post-misched=true \ -# RUN: -riscv-postmisched-load-store-clustering=false -debug-only=machine-scheduler \ +# RUN: -mattr=+disable-postmisched-load-clustering \ +# RUN: -mattr=+disable-postmisched-store-clustering -debug-only=machine-scheduler \ # RUN: -start-before=machine-scheduler -stop-after=postmisched -misched-regpressure=false -o - 2>&1 < %s \ # RUN: | FileCheck -check-prefix=NOCLUSTER %s # RUN: llc -mtriple=riscv64 -x mir -mcpu=sifive-p470 -mattr=+use-postra-scheduler -verify-misched -enable-post-misched=true \ diff --git a/llvm/test/CodeGen/RISCV/misched-store-clustering.ll b/llvm/test/CodeGen/RISCV/misched-store-clustering.ll new file mode 100644 index 0000000..02e853d --- /dev/null +++ b/llvm/test/CodeGen/RISCV/misched-store-clustering.ll @@ -0,0 +1,83 @@ +; REQUIRES: asserts +; +; Disable all misched clustering +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=NOCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=NOCLUSTER %s +; +; ST misched clustering only +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=STCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=STCLUSTER %s +; +; LD misched clustering only +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-store-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=LDCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-store-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=LDCLUSTER %s +; +; Default misched cluster settings (i.e. both LD and ST clustering) +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s + +define i32 @store_clustering_1(ptr nocapture %p, i32 %v) { +; NOCLUSTER: ********** MI Scheduling ********** +; NOCLUSTER-LABEL: store_clustering_1:%bb.0 +; NOCLUSTER: *** Final schedule for %bb.0 *** +; NOCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0) +; NOCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1) +; NOCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2) +; NOCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3) +; +; STCLUSTER: ********** MI Scheduling ********** +; STCLUSTER-LABEL: store_clustering_1:%bb.0 +; STCLUSTER: *** Final schedule for %bb.0 *** +; STCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2) +; STCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1) +; STCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0) +; STCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3) +; +; LDCLUSTER: ********** MI Scheduling ********** +; LDCLUSTER-LABEL: store_clustering_1:%bb.0 +; LDCLUSTER: *** Final schedule for %bb.0 *** +; LDCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0) +; LDCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1) +; LDCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2) +; LDCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3) +; +; DEFAULTCLUSTER: ********** MI Scheduling ********** +; DEFAULTCLUSTER-LABEL: store_clustering_1:%bb.0 +; DEFAULTCLUSTER: *** Final schedule for %bb.0 *** +; DEFAULTCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2) +; DEFAULTCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1) +; DEFAULTCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0) +; DEFAULTCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3) +entry: + %arrayidx0 = getelementptr inbounds i32, ptr %p, i32 3 + store i32 %v, ptr %arrayidx0 + %arrayidx1 = getelementptr inbounds i32, ptr %p, i32 2 + store i32 %v, ptr %arrayidx1 + %arrayidx2 = getelementptr inbounds i32, ptr %p, i32 1 + store i32 %v, ptr %arrayidx2 + %arrayidx3 = getelementptr inbounds i32, ptr %p, i32 4 + store i32 %v, ptr %arrayidx3 + ret i32 %v +} diff --git a/llvm/test/CodeGen/RISCV/rv32zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbkb.ll index 7ebbd78..42d326e 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbkb.ll @@ -350,10 +350,43 @@ define i32 @pack_lo_packh_hi_packh(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, ret i32 %j } +define i32 @pack_lo_packh_hi_packh_2(i8 %0, i8 %1, i8 %2, i8 %3) nounwind { +; RV32I-LABEL: pack_lo_packh_hi_packh_2: +; RV32I: # %bb.0: +; RV32I-NEXT: zext.b a0, a0 +; RV32I-NEXT: zext.b a1, a1 +; RV32I-NEXT: zext.b a2, a2 +; RV32I-NEXT: slli a3, a3, 24 +; RV32I-NEXT: slli a1, a1, 8 +; RV32I-NEXT: slli a2, a2, 16 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: or a2, a2, a3 +; RV32I-NEXT: or a0, a0, a2 +; RV32I-NEXT: ret +; +; RV32ZBKB-LABEL: pack_lo_packh_hi_packh_2: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: packh a0, a0, a1 +; RV32ZBKB-NEXT: packh a1, a2, a3 +; RV32ZBKB-NEXT: pack a0, a0, a1 +; RV32ZBKB-NEXT: ret + %a = zext i8 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = zext i8 %3 to i32 + %e = shl i32 %b, 8 + %f = shl i32 %c, 16 + %g = shl i32 %d, 24 + %h = or i32 %a, %e + %i = or i32 %h, %f + %j = or i32 %i, %g + ret i32 %j +} + define i32 @pack_lo_zext_hi_packh(i16 zeroext %0, i8 zeroext %1, i8 zeroext %2) nounwind { ; RV32I-LABEL: pack_lo_zext_hi_packh: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a1, a2, 16 +; RV32I-NEXT: slli a1, a1, 16 ; RV32I-NEXT: slli a2, a2, 24 ; RV32I-NEXT: or a1, a2, a1 ; RV32I-NEXT: or a0, a1, a0 @@ -361,14 +394,14 @@ define i32 @pack_lo_zext_hi_packh(i16 zeroext %0, i8 zeroext %1, i8 zeroext %2) ; ; RV32ZBKB-LABEL: pack_lo_zext_hi_packh: ; RV32ZBKB: # %bb.0: -; RV32ZBKB-NEXT: packh a1, a2, a2 +; RV32ZBKB-NEXT: packh a1, a1, a2 ; RV32ZBKB-NEXT: pack a0, a0, a1 ; RV32ZBKB-NEXT: ret %a = zext i16 %0 to i32 %b = zext i8 %1 to i32 %c = zext i8 %2 to i32 %d = shl i32 %c, 8 - %e = or i32 %c, %d + %e = or i32 %b, %d %f = shl i32 %e, 16 %g = or i32 %f, %a ret i32 %g @@ -379,7 +412,7 @@ define i32 @pack_lo_zext_hi_packh(i16 zeroext %0, i8 zeroext %1, i8 zeroext %2) define i32 @pack_lo_noext_hi_packh(i32 %a, i8 zeroext %1, i8 zeroext %2) nounwind { ; RV32I-LABEL: pack_lo_noext_hi_packh: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a1, a2, 16 +; RV32I-NEXT: slli a1, a1, 16 ; RV32I-NEXT: slli a2, a2, 24 ; RV32I-NEXT: or a1, a2, a1 ; RV32I-NEXT: or a0, a1, a0 @@ -387,14 +420,40 @@ define i32 @pack_lo_noext_hi_packh(i32 %a, i8 zeroext %1, i8 zeroext %2) nounwin ; ; RV32ZBKB-LABEL: pack_lo_noext_hi_packh: ; RV32ZBKB: # %bb.0: -; RV32ZBKB-NEXT: packh a1, a2, a2 +; RV32ZBKB-NEXT: packh a1, a1, a2 +; RV32ZBKB-NEXT: slli a1, a1, 16 +; RV32ZBKB-NEXT: or a0, a1, a0 +; RV32ZBKB-NEXT: ret + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = shl i32 %c, 8 + %e = or i32 %b, %d + %f = shl i32 %e, 16 + %g = or i32 %f, %a + ret i32 %g +} + +; Make sure we can match packh+slli without having the input bytes zero extended. +define i32 @pack_lo_noext_hi_packh_nozeroext(i32 %a, i8 %1, i8 %2) nounwind { +; RV32I-LABEL: pack_lo_noext_hi_packh_nozeroext: +; RV32I: # %bb.0: +; RV32I-NEXT: zext.b a1, a1 +; RV32I-NEXT: slli a2, a2, 24 +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: or a0, a2, a0 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: ret +; +; RV32ZBKB-LABEL: pack_lo_noext_hi_packh_nozeroext: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: packh a1, a1, a2 ; RV32ZBKB-NEXT: slli a1, a1, 16 ; RV32ZBKB-NEXT: or a0, a1, a0 ; RV32ZBKB-NEXT: ret %b = zext i8 %1 to i32 %c = zext i8 %2 to i32 %d = shl i32 %c, 8 - %e = or i32 %c, %d + %e = or i32 %b, %d %f = shl i32 %e, 16 %g = or i32 %f, %a ret i32 %g diff --git a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll index 57061e1..f89d1abf 100644 --- a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll +++ b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll @@ -253,8 +253,8 @@ define i128 @fptosi_sat_f16_to_i128(half %a) nounwind { ; RV64IZHINX-NEXT: srli a1, a2, 1 ; RV64IZHINX-NEXT: .LBB4_4: ; RV64IZHINX-NEXT: feq.s a2, s0, s0 -; RV64IZHINX-NEXT: neg a3, a3 ; RV64IZHINX-NEXT: neg a4, s1 +; RV64IZHINX-NEXT: neg a3, a3 ; RV64IZHINX-NEXT: neg a2, a2 ; RV64IZHINX-NEXT: and a0, a4, a0 ; RV64IZHINX-NEXT: and a1, a2, a1 @@ -334,18 +334,19 @@ define i128 @fptoui_sat_f16_to_i128(half %a) nounwind { ; RV64IZHINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64IZHINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64IZHINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill -; RV64IZHINX-NEXT: fcvt.s.h a0, a0 -; RV64IZHINX-NEXT: lui a1, 522240 -; RV64IZHINX-NEXT: addi a1, a1, -1 -; RV64IZHINX-NEXT: fle.s a2, zero, a0 -; RV64IZHINX-NEXT: flt.s a1, a1, a0 -; RV64IZHINX-NEXT: neg s0, a1 -; RV64IZHINX-NEXT: neg s1, a2 +; RV64IZHINX-NEXT: fcvt.s.h s0, a0 +; RV64IZHINX-NEXT: fle.s a0, zero, s0 +; RV64IZHINX-NEXT: neg s1, a0 +; RV64IZHINX-NEXT: mv a0, s0 ; RV64IZHINX-NEXT: call __fixunssfti ; RV64IZHINX-NEXT: and a0, s1, a0 +; RV64IZHINX-NEXT: lui a2, 522240 ; RV64IZHINX-NEXT: and a1, s1, a1 -; RV64IZHINX-NEXT: or a0, s0, a0 -; RV64IZHINX-NEXT: or a1, s0, a1 +; RV64IZHINX-NEXT: addi a2, a2, -1 +; RV64IZHINX-NEXT: flt.s a2, a2, s0 +; RV64IZHINX-NEXT: neg a2, a2 +; RV64IZHINX-NEXT: or a0, a2, a0 +; RV64IZHINX-NEXT: or a1, a2, a1 ; RV64IZHINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64IZHINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64IZHINX-NEXT: ld s1, 8(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll index 818ea72..4537d18 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll @@ -392,3 +392,247 @@ define i64 @zext_i16_to_i64(i16 %a) nounwind { %1 = zext i16 %a to i64 ret i64 %1 } + +define void @pack_lo_packh_hi_packh(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, i8 zeroext %3, ptr %p) nounwind { +; RV64I-LABEL: pack_lo_packh_hi_packh: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a1, 8 +; RV64I-NEXT: slli a2, a2, 16 +; RV64I-NEXT: slli a3, a3, 24 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: or a2, a2, a3 +; RV64I-NEXT: or a0, a0, a2 +; RV64I-NEXT: sw a0, 0(a4) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_lo_packh_hi_packh: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a0, a0, a1 +; RV64ZBKB-NEXT: packh a1, a2, a3 +; RV64ZBKB-NEXT: packw a0, a0, a1 +; RV64ZBKB-NEXT: sw a0, 0(a4) +; RV64ZBKB-NEXT: ret + %a = zext i8 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = zext i8 %3 to i32 + %e = shl i32 %b, 8 + %f = shl i32 %c, 16 + %g = shl i32 %d, 24 + %h = or i32 %a, %e + %i = or i32 %h, %f + %j = or i32 %i, %g + store i32 %j, ptr %p + ret void +} + +define void @pack_lo_packh_hi_packh_2(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, i8 zeroext %3, ptr %p) nounwind { +; RV64I-LABEL: pack_lo_packh_hi_packh_2: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a1, 8 +; RV64I-NEXT: slli a2, a2, 16 +; RV64I-NEXT: slli a3, a3, 24 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: or a2, a2, a3 +; RV64I-NEXT: or a0, a2, a0 +; RV64I-NEXT: sw a0, 0(a4) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_lo_packh_hi_packh_2: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a0, a0, a1 +; RV64ZBKB-NEXT: packh a1, a3, a2 +; RV64ZBKB-NEXT: packw a0, a0, a1 +; RV64ZBKB-NEXT: sw a0, 0(a4) +; RV64ZBKB-NEXT: ret + %a = zext i8 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = zext i8 %3 to i32 + %e = shl i32 %b, 8 + %f = shl i32 %c, 16 + %g = shl i32 %d, 24 + %h = or i32 %a, %e + %i = or i32 %g, %h + %j = or i32 %f, %i + store i32 %j, ptr %p + ret void +} + +define void @pack_lo_packh_hi_packh_3(i8 %0, i8 %1, i8 %2, i8 %3, ptr %p) nounwind { +; RV64I-LABEL: pack_lo_packh_hi_packh_3: +; RV64I: # %bb.0: +; RV64I-NEXT: zext.b a0, a0 +; RV64I-NEXT: zext.b a1, a1 +; RV64I-NEXT: zext.b a2, a2 +; RV64I-NEXT: slli a3, a3, 24 +; RV64I-NEXT: slli a1, a1, 8 +; RV64I-NEXT: slli a2, a2, 16 +; RV64I-NEXT: or a0, a3, a0 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: or a0, a2, a0 +; RV64I-NEXT: sw a0, 0(a4) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_lo_packh_hi_packh_3: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a0, a0, a1 +; RV64ZBKB-NEXT: packh a1, a3, a2 +; RV64ZBKB-NEXT: packw a0, a0, a1 +; RV64ZBKB-NEXT: sw a0, 0(a4) +; RV64ZBKB-NEXT: ret + %a = zext i8 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = zext i8 %3 to i32 + %e = shl i32 %b, 8 + %f = shl i32 %c, 16 + %g = shl i32 %d, 24 + %h = or i32 %a, %e + %i = or i32 %g, %h + %j = or i32 %f, %i + store i32 %j, ptr %p + ret void +} + +define i32 @pack_lo_packh_hi_packh_4(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, i8 zeroext %3, ptr %p) nounwind { +; RV64I-LABEL: pack_lo_packh_hi_packh_4: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a1, 8 +; RV64I-NEXT: slli a2, a2, 16 +; RV64I-NEXT: slliw a3, a3, 24 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: or a2, a2, a3 +; RV64I-NEXT: or a0, a0, a2 +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_lo_packh_hi_packh_4: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a0, a0, a1 +; RV64ZBKB-NEXT: packh a1, a3, a2 +; RV64ZBKB-NEXT: packw a0, a0, a1 +; RV64ZBKB-NEXT: ret + %a = zext i8 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = zext i8 %3 to i32 + %e = shl i32 %b, 8 + %f = shl i32 %c, 16 + %g = shl i32 %d, 24 + %h = or i32 %a, %e + %i = or i32 %h, %f + %j = or i32 %i, %g + ret i32 %j +} + +define void @pack_lo_zext_hi_packh(i16 zeroext %0, i8 zeroext %1, i8 zeroext %2, ptr %p) nounwind { +; RV64I-LABEL: pack_lo_zext_hi_packh: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a1, 16 +; RV64I-NEXT: slli a2, a2, 24 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: sw a0, 0(a3) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_lo_zext_hi_packh: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a1, a1, a2 +; RV64ZBKB-NEXT: packw a0, a0, a1 +; RV64ZBKB-NEXT: sw a0, 0(a3) +; RV64ZBKB-NEXT: ret + %a = zext i16 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = shl i32 %c, 8 + %e = or i32 %b, %d + %f = shl i32 %e, 16 + %g = or i32 %f, %a + store i32 %g, ptr %p + ret void +} + +; Negative test, %a isn't extended so we can't use packw for the outer or, but +; we can use packh for the high half. +define void @pack_lo_noext_hi_packh(i32 %a, i8 zeroext %1, i8 zeroext %2, ptr %p) nounwind { +; RV64I-LABEL: pack_lo_noext_hi_packh: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a1, 16 +; RV64I-NEXT: slli a2, a2, 24 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: sw a0, 0(a3) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_lo_noext_hi_packh: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a1, a1, a2 +; RV64ZBKB-NEXT: slli a1, a1, 16 +; RV64ZBKB-NEXT: or a0, a1, a0 +; RV64ZBKB-NEXT: sw a0, 0(a3) +; RV64ZBKB-NEXT: ret + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = shl i32 %c, 8 + %e = or i32 %b, %d + %f = shl i32 %e, 16 + %g = or i32 %f, %a + store i32 %g, ptr %p + ret void +} + +; Make sure we can match packh+slli without having the input bytes zero extended. +define void @pack_i32_lo_noext_hi_packh_nozeroext(i32 %a, i8 %1, i8 %2, ptr %p) nounwind { +; RV64I-LABEL: pack_i32_lo_noext_hi_packh_nozeroext: +; RV64I: # %bb.0: +; RV64I-NEXT: zext.b a1, a1 +; RV64I-NEXT: slli a2, a2, 24 +; RV64I-NEXT: slli a1, a1, 16 +; RV64I-NEXT: or a0, a2, a0 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: sw a0, 0(a3) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_i32_lo_noext_hi_packh_nozeroext: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a1, a1, a2 +; RV64ZBKB-NEXT: slli a1, a1, 16 +; RV64ZBKB-NEXT: or a0, a1, a0 +; RV64ZBKB-NEXT: sw a0, 0(a3) +; RV64ZBKB-NEXT: ret + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = shl i32 %c, 8 + %e = or i32 %b, %d + %f = shl i32 %e, 16 + %g = or i32 %f, %a + store i32 %g, ptr %p + ret void +} + +; Make sure we can match packh+slli without having the input bytes zero extended. +define i64 @pack_i64_lo_noext_hi_packh_nozeroext(i64 %a, i8 %1, i8 %2, ptr %p) nounwind { +; RV64I-LABEL: pack_i64_lo_noext_hi_packh_nozeroext: +; RV64I: # %bb.0: +; RV64I-NEXT: zext.b a1, a1 +; RV64I-NEXT: zext.b a2, a2 +; RV64I-NEXT: slli a1, a1, 16 +; RV64I-NEXT: slli a2, a2, 24 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_i64_lo_noext_hi_packh_nozeroext: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a1, a1, a2 +; RV64ZBKB-NEXT: slli a1, a1, 16 +; RV64ZBKB-NEXT: or a0, a1, a0 +; RV64ZBKB-NEXT: ret + %b = zext i8 %1 to i64 + %c = zext i8 %2 to i64 + %d = shl i64 %c, 8 + %e = or i64 %b, %d + %f = shl i64 %e, 16 + %g = or i64 %f, %a + ret i64 %g +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ssegN-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ssegN-store.ll new file mode 100644 index 0000000..abf2894 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ssegN-store.ll @@ -0,0 +1,72 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s + +define void @store_factor2(<8 x i8> %v0, <8 x i8> %v1, ptr %ptr, i64 %stride) { +; CHECK-LABEL: store_factor2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: ret + call void @llvm.riscv.sseg2.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8) + ret void +} + +define void @store_factor3(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, ptr %ptr, i64 %stride) { +; CHECK-LABEL: store_factor3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: ret + call void @llvm.riscv.sseg3.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8) + ret void +} + +define void @store_factor4(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, ptr %ptr, i64 %stride) { +; CHECK-LABEL: store_factor4: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: ret + call void @llvm.riscv.sseg4.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8) + ret void +} + +define void @store_factor5(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, ptr %ptr, i64 %stride) { +; CHECK-LABEL: store_factor5: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: ret + call void @llvm.riscv.sseg5.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8) + ret void +} + +define void @store_factor6(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, ptr %ptr, i64 %stride) { +; CHECK-LABEL: store_factor6: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: ret + call void @llvm.riscv.sseg6.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8) + ret void +} + +define void @store_factor7(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, ptr %ptr, i64 %stride) { +; CHECK-LABEL: store_factor7: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: ret + call void @llvm.riscv.sseg7.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8) + ret void +} + +define void @store_factor8(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, <8 x i8> %v7, ptr %ptr, i64 %stride) { +; CHECK-LABEL: store_factor8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: ret + call void @llvm.riscv.sseg8.store.mask.v8i8.i64.i64(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, <8 x i8> %v7, ptr %ptr, i64 %stride, <8 x i1> splat (i1 true), i64 8) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll index 7990dfc..4c84304 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll @@ -366,8 +366,8 @@ define void @test1(ptr nocapture noundef writeonly %dst, i32 noundef signext %i_ ; RV64X60-NEXT: # => This Inner Loop Header: Depth=2 ; RV64X60-NEXT: vl2r.v v8, (s2) ; RV64X60-NEXT: vl2r.v v10, (s3) -; RV64X60-NEXT: sub s1, s1, t3 ; RV64X60-NEXT: vaaddu.vv v8, v8, v10 +; RV64X60-NEXT: sub s1, s1, t3 ; RV64X60-NEXT: vs2r.v v8, (s4) ; RV64X60-NEXT: add s4, s4, t3 ; RV64X60-NEXT: add s3, s3, t3 diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll index c9c49e8..cb046cd 100644 --- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll +++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll @@ -204,18 +204,16 @@ define i64 @load_i64(ptr %p) { ; RV64IZBKB-NEXT: lbu a2, 5(a0) ; RV64IZBKB-NEXT: lbu a3, 6(a0) ; RV64IZBKB-NEXT: lbu a4, 7(a0) -; RV64IZBKB-NEXT: lbu a5, 0(a0) -; RV64IZBKB-NEXT: lbu a6, 1(a0) -; RV64IZBKB-NEXT: lbu a7, 2(a0) -; RV64IZBKB-NEXT: lbu a0, 3(a0) +; RV64IZBKB-NEXT: lbu a5, 1(a0) +; RV64IZBKB-NEXT: lbu a6, 2(a0) +; RV64IZBKB-NEXT: lbu a7, 3(a0) +; RV64IZBKB-NEXT: lbu a0, 0(a0) +; RV64IZBKB-NEXT: packh a3, a3, a4 ; RV64IZBKB-NEXT: packh a1, a1, a2 -; RV64IZBKB-NEXT: packh a2, a3, a4 -; RV64IZBKB-NEXT: packh a3, a5, a6 -; RV64IZBKB-NEXT: packh a0, a7, a0 -; RV64IZBKB-NEXT: slli a2, a2, 16 -; RV64IZBKB-NEXT: slli a0, a0, 16 -; RV64IZBKB-NEXT: or a1, a2, a1 -; RV64IZBKB-NEXT: or a0, a0, a3 +; RV64IZBKB-NEXT: packh a2, a6, a7 +; RV64IZBKB-NEXT: packh a0, a0, a5 +; RV64IZBKB-NEXT: packw a1, a1, a3 +; RV64IZBKB-NEXT: packw a0, a0, a2 ; RV64IZBKB-NEXT: pack a0, a0, a1 ; RV64IZBKB-NEXT: ret ; diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll new file mode 100644 index 0000000..00e9185 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll @@ -0,0 +1,75 @@ +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv1.6-vulkan1.3-library %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv1.6-vulkan1.3-library %s -o - -filetype=obj | spirv-val --target-env vulkan1.3 %} + +@.str = private unnamed_addr constant [2 x i8] c"b\00", align 1 +@.str.2 = private unnamed_addr constant [2 x i8] c"c\00", align 1 +@.str.4 = private unnamed_addr constant [2 x i8] c"d\00", align 1 +@.str.6 = private unnamed_addr constant [2 x i8] c"e\00", align 1 +@.str.8 = private unnamed_addr constant [2 x i8] c"f\00", align 1 +@.str.10 = private unnamed_addr constant [2 x i8] c"g\00", align 1 +@.str.12 = private unnamed_addr constant [2 x i8] c"h\00", align 1 +@.str.14 = private unnamed_addr constant [2 x i8] c"i\00", align 1 + +; CHECK-DAG: OpName [[b:%[0-9]+]] "b" +; CHECK-DAG: OpName [[c:%[0-9]+]] "c" +; CHECK-DAG: OpName [[d:%[0-9]+]] "d" +; CHECK-DAG: OpName [[e:%[0-9]+]] "e" +; CHECK-DAG: OpName [[f:%[0-9]+]] "f" +; CHECK-DAG: OpName [[g:%[0-9]+]] "g" +; CHECK-DAG: OpName [[h:%[0-9]+]] "h" +; CHECK-DAG: OpName [[i:%[0-9]+]] "i" +; CHECK-DAG: OpDecorate [[b]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[b]] Binding 1 +; CHECK-DAG: OpDecorate [[c]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[c]] Binding 0 +; CHECK-DAG: OpDecorate [[d]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[d]] Binding 3 +; CHECK-DAG: OpDecorate [[e]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[e]] Binding 2 +; CHECK-DAG: OpDecorate [[f]] DescriptorSet 10 +; CHECK-DAG: OpDecorate [[f]] Binding 1 +; CHECK-DAG: OpDecorate [[g]] DescriptorSet 10 +; CHECK-DAG: OpDecorate [[g]] Binding 0 +; CHECK-DAG: OpDecorate [[h]] DescriptorSet 10 +; CHECK-DAG: OpDecorate [[h]] Binding 3 +; CHECK-DAG: OpDecorate [[i]] DescriptorSet 10 +; CHECK-DAG: OpDecorate [[i]] Binding 2 + + +define void @main() local_unnamed_addr #0 { +entry: + %0 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str) + %1 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str.2) + %2 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 1, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str.4) + %3 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str.6) + %4 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 1, i32 1, i32 0, i1 false, ptr nonnull @.str.8) + %5 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 2, i32 10, i32 1, i32 0, i1 false, ptr nonnull @.str.10) + %6 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 3, i32 10, i32 1, i32 0, i1 false, ptr nonnull @.str.12) + %7 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str.14) + %8 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %1, i32 0) + %9 = load i32, ptr addrspace(11) %8, align 4 + %10 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %2, i32 0) + %11 = load i32, ptr addrspace(11) %10, align 4 + %add.i = add nsw i32 %11, %9 + %12 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %3, i32 0) + %13 = load i32, ptr addrspace(11) %12, align 4 + %add4.i = add nsw i32 %add.i, %13 + %14 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %4, i32 0) + %15 = load i32, ptr addrspace(11) %14, align 4 + %add6.i = add nsw i32 %add4.i, %15 + %16 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %5, i32 0) + %17 = load i32, ptr addrspace(11) %16, align 4 + %add8.i = add nsw i32 %add6.i, %17 + %18 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %6, i32 0) + %19 = load i32, ptr addrspace(11) %18, align 4 + %add10.i = add nsw i32 %add8.i, %19 + %20 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %7, i32 0) + %21 = load i32, ptr addrspace(11) %20, align 4 + %add12.i = add nsw i32 %add10.i, %21 + %22 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %0, i32 0) + store i32 %add12.i, ptr addrspace(11) %22, align 4 + ret void +} + + +attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
\ No newline at end of file diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll index 483d707..3d93eca 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll @@ -17,11 +17,11 @@ ; CL: OpFunction ; CL: %[[#FooVar:]] = OpVariable ; CL-NEXT: %[[#Casted1:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]] -; CL-NEXT: OpLifetimeStart %[[#Casted1]], 72 +; CL-NEXT: OpLifetimeStart %[[#Casted1]], 16 ; CL-NEXT: OpBitcast ; CL-NEXT: OpInBoundsPtrAccessChain ; CL-NEXT: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]] -; CL-NEXT: OpLifetimeStop %[[#Casted2]], 72 +; CL-NEXT: OpLifetimeStop %[[#Casted2]], 16 ; VK: OpFunction ; VK: %[[#FooVar:]] = OpVariable @@ -29,18 +29,20 @@ ; VK-NEXT: OpReturn define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange) { %RoundedRangeKernel = alloca %tprange, align 8 - call void @llvm.lifetime.start.p0(i64 72, ptr nonnull %RoundedRangeKernel) + call void @llvm.lifetime.start.p0(ptr nonnull %RoundedRangeKernel) %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8 - call void @llvm.lifetime.end.p0(i64 72, ptr nonnull %RoundedRangeKernel) + call void @llvm.lifetime.end.p0(ptr nonnull %RoundedRangeKernel) ret void } ; CL: OpFunction ; CL: %[[#BarVar:]] = OpVariable -; CL-NEXT: OpLifetimeStart %[[#BarVar]], 0 +; CL-NEXT: %[[#Casted1:]] = OpBitcast %[[#PtrChar]] %[[#BarVar]] +; CL-NEXT: OpLifetimeStart %[[#Casted1]], 16 ; CL-NEXT: OpBitcast ; CL-NEXT: OpInBoundsPtrAccessChain -; CL-NEXT: OpLifetimeStop %[[#BarVar]], 0 +; CL-NEXT: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#BarVar]] +; CL-NEXT: OpLifetimeStop %[[#Casted2]], 16 ; VK: OpFunction ; VK: %[[#BarVar:]] = OpVariable @@ -48,9 +50,9 @@ define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange) ; VK-NEXT: OpReturn define spir_func void @bar(ptr noundef byval(%tprange) align 8 %_arg_UserRange) { %RoundedRangeKernel = alloca %tprange, align 8 - call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %RoundedRangeKernel) + call void @llvm.lifetime.start.p0(ptr nonnull %RoundedRangeKernel) %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8 - call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %RoundedRangeKernel) + call void @llvm.lifetime.end.p0(ptr nonnull %RoundedRangeKernel) ret void } @@ -66,12 +68,12 @@ define spir_func void @bar(ptr noundef byval(%tprange) align 8 %_arg_UserRange) ; VK-NEXT: OpReturn define spir_func void @test(ptr noundef align 8 %_arg) { %var = alloca i8, align 8 - call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %var) + call void @llvm.lifetime.start.p0(ptr nonnull %var) %KernelFunc = getelementptr inbounds i8, ptr %var, i64 1 - call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %var) + call void @llvm.lifetime.end.p0(ptr nonnull %var) ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/CodeGen/Thumb/scmp.ll b/llvm/test/CodeGen/Thumb/scmp.ll index 661dbe9..c002449 100644 --- a/llvm/test/CodeGen/Thumb/scmp.ll +++ b/llvm/test/CodeGen/Thumb/scmp.ll @@ -1,151 +1,420 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s +; RUN: llc -mtriple=thumbv6m-eabi %s -o - | FileCheck %s -check-prefix=THUMB1 +; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s -check-prefix=THUMB2 +; RUN: llc -mtriple thumbv8.1m.main-none-eabi -o - %s | FileCheck %s --check-prefix=V81M define i8 @scmp_8_8(i8 signext %x, i8 signext %y) nounwind { -; CHECK-LABEL: scmp_8_8: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: it gt -; CHECK-NEXT: movgt r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: scmp_8_8: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: movs r2, #1 +; THUMB1-NEXT: movs r3, #0 +; THUMB1-NEXT: cmp r0, r1 +; THUMB1-NEXT: mov r0, r2 +; THUMB1-NEXT: bge .LBB0_3 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: ble .LBB0_4 +; THUMB1-NEXT: .LBB0_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; THUMB1-NEXT: .LBB0_3: +; THUMB1-NEXT: mov r0, r3 +; THUMB1-NEXT: bgt .LBB0_2 +; THUMB1-NEXT: .LBB0_4: +; THUMB1-NEXT: mov r2, r3 +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: scmp_8_8: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it gt +; THUMB2-NEXT: movgt r0, #1 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: scmp_8_8: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, gt +; V81M-NEXT: it lt +; V81M-NEXT: movlt.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i8 @llvm.scmp(i8 %x, i8 %y) ret i8 %1 } define i8 @scmp_8_16(i16 signext %x, i16 signext %y) nounwind { -; CHECK-LABEL: scmp_8_16: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: it gt -; CHECK-NEXT: movgt r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: scmp_8_16: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: movs r2, #1 +; THUMB1-NEXT: movs r3, #0 +; THUMB1-NEXT: cmp r0, r1 +; THUMB1-NEXT: mov r0, r2 +; THUMB1-NEXT: bge .LBB1_3 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: ble .LBB1_4 +; THUMB1-NEXT: .LBB1_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; THUMB1-NEXT: .LBB1_3: +; THUMB1-NEXT: mov r0, r3 +; THUMB1-NEXT: bgt .LBB1_2 +; THUMB1-NEXT: .LBB1_4: +; THUMB1-NEXT: mov r2, r3 +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: scmp_8_16: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it gt +; THUMB2-NEXT: movgt r0, #1 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: scmp_8_16: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, gt +; V81M-NEXT: it lt +; V81M-NEXT: movlt.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i8 @llvm.scmp(i16 %x, i16 %y) ret i8 %1 } define i8 @scmp_8_32(i32 %x, i32 %y) nounwind { -; CHECK-LABEL: scmp_8_32: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: it gt -; CHECK-NEXT: movgt r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: scmp_8_32: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: movs r2, #1 +; THUMB1-NEXT: movs r3, #0 +; THUMB1-NEXT: cmp r0, r1 +; THUMB1-NEXT: mov r0, r2 +; THUMB1-NEXT: bge .LBB2_3 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: ble .LBB2_4 +; THUMB1-NEXT: .LBB2_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; THUMB1-NEXT: .LBB2_3: +; THUMB1-NEXT: mov r0, r3 +; THUMB1-NEXT: bgt .LBB2_2 +; THUMB1-NEXT: .LBB2_4: +; THUMB1-NEXT: mov r2, r3 +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: scmp_8_32: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it gt +; THUMB2-NEXT: movgt r0, #1 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: scmp_8_32: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, gt +; V81M-NEXT: it lt +; V81M-NEXT: movlt.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i8 @llvm.scmp(i32 %x, i32 %y) ret i8 %1 } define i8 @scmp_8_64(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: scmp_8_64: -; CHECK: @ %bb.0: -; CHECK-NEXT: subs.w r12, r0, r2 -; CHECK-NEXT: mov.w r9, #0 -; CHECK-NEXT: sbcs.w r12, r1, r3 -; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r12, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r9, #1 -; CHECK-NEXT: sub.w r0, r9, r12 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: scmp_8_64: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, lr} +; THUMB1-NEXT: push {r4, r5, r6, lr} +; THUMB1-NEXT: movs r4, #1 +; THUMB1-NEXT: movs r5, #0 +; THUMB1-NEXT: subs r6, r0, r2 +; THUMB1-NEXT: mov r6, r1 +; THUMB1-NEXT: sbcs r6, r3 +; THUMB1-NEXT: mov r6, r4 +; THUMB1-NEXT: blt .LBB3_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: mov r6, r5 +; THUMB1-NEXT: .LBB3_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: sbcs r3, r1 +; THUMB1-NEXT: blt .LBB3_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: mov r4, r5 +; THUMB1-NEXT: .LBB3_4: +; THUMB1-NEXT: subs r0, r4, r6 +; THUMB1-NEXT: pop {r4, r5, r6, pc} +; +; THUMB2-LABEL: scmp_8_64: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs.w r12, r0, r2 +; THUMB2-NEXT: mov.w r9, #0 +; THUMB2-NEXT: sbcs.w r12, r1, r3 +; THUMB2-NEXT: mov.w r12, #0 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r12, #1 +; THUMB2-NEXT: subs r0, r2, r0 +; THUMB2-NEXT: sbcs.w r0, r3, r1 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r9, #1 +; THUMB2-NEXT: sub.w r0, r9, r12 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: scmp_8_64: +; V81M: @ %bb.0: +; V81M-NEXT: subs.w r12, r0, r2 +; V81M-NEXT: sbcs.w r12, r1, r3 +; V81M-NEXT: cset r12, lt +; V81M-NEXT: subs r0, r2, r0 +; V81M-NEXT: sbcs.w r0, r3, r1 +; V81M-NEXT: cset r0, lt +; V81M-NEXT: sub.w r0, r0, r12 +; V81M-NEXT: bx lr %1 = call i8 @llvm.scmp(i64 %x, i64 %y) ret i8 %1 } define i8 @scmp_8_128(i128 %x, i128 %y) nounwind { -; CHECK-LABEL: scmp_8_128: -; CHECK: @ %bb.0: -; CHECK-NEXT: push {r4, r5, r6, lr} -; CHECK-NEXT: add.w lr, sp, #16 -; CHECK-NEXT: ldr r4, [sp, #28] -; CHECK-NEXT: movs r5, #0 -; CHECK-NEXT: ldm.w lr, {r9, r12, lr} -; CHECK-NEXT: subs.w r6, r0, r9 -; CHECK-NEXT: sbcs.w r6, r1, r12 -; CHECK-NEXT: sbcs.w r6, r2, lr -; CHECK-NEXT: sbcs.w r6, r3, r4 -; CHECK-NEXT: mov.w r6, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 -; CHECK-NEXT: subs.w r0, r9, r0 -; CHECK-NEXT: sbcs.w r0, r12, r1 -; CHECK-NEXT: sbcs.w r0, lr, r2 -; CHECK-NEXT: sbcs.w r0, r4, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #1 -; CHECK-NEXT: subs r0, r5, r6 -; CHECK-NEXT: pop {r4, r5, r6, pc} +; THUMB1-LABEL: scmp_8_128: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, r7, lr} +; THUMB1-NEXT: push {r4, r5, r6, r7, lr} +; THUMB1-NEXT: .pad #20 +; THUMB1-NEXT: sub sp, #20 +; THUMB1-NEXT: str r3, [sp, #16] @ 4-byte Spill +; THUMB1-NEXT: movs r3, #1 +; THUMB1-NEXT: str r3, [sp] @ 4-byte Spill +; THUMB1-NEXT: movs r3, #0 +; THUMB1-NEXT: str r3, [sp, #12] @ 4-byte Spill +; THUMB1-NEXT: ldr r6, [sp, #52] +; THUMB1-NEXT: add r7, sp, #40 +; THUMB1-NEXT: ldm r7, {r3, r5, r7} +; THUMB1-NEXT: subs r4, r0, r3 +; THUMB1-NEXT: str r1, [sp, #4] @ 4-byte Spill +; THUMB1-NEXT: mov r4, r1 +; THUMB1-NEXT: ldr r1, [sp] @ 4-byte Reload +; THUMB1-NEXT: sbcs r4, r5 +; THUMB1-NEXT: str r2, [sp, #8] @ 4-byte Spill +; THUMB1-NEXT: mov r4, r2 +; THUMB1-NEXT: sbcs r4, r7 +; THUMB1-NEXT: ldr r4, [sp, #16] @ 4-byte Reload +; THUMB1-NEXT: sbcs r4, r6 +; THUMB1-NEXT: mov r2, r1 +; THUMB1-NEXT: blt .LBB4_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: ldr r2, [sp, #12] @ 4-byte Reload +; THUMB1-NEXT: .LBB4_2: +; THUMB1-NEXT: subs r0, r3, r0 +; THUMB1-NEXT: ldr r0, [sp, #4] @ 4-byte Reload +; THUMB1-NEXT: sbcs r5, r0 +; THUMB1-NEXT: ldr r0, [sp, #8] @ 4-byte Reload +; THUMB1-NEXT: sbcs r7, r0 +; THUMB1-NEXT: ldr r0, [sp, #16] @ 4-byte Reload +; THUMB1-NEXT: sbcs r6, r0 +; THUMB1-NEXT: blt .LBB4_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: ldr r1, [sp, #12] @ 4-byte Reload +; THUMB1-NEXT: .LBB4_4: +; THUMB1-NEXT: subs r0, r1, r2 +; THUMB1-NEXT: add sp, #20 +; THUMB1-NEXT: pop {r4, r5, r6, r7, pc} +; +; THUMB2-LABEL: scmp_8_128: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: push {r4, r5, r6, lr} +; THUMB2-NEXT: add.w lr, sp, #16 +; THUMB2-NEXT: ldr r4, [sp, #28] +; THUMB2-NEXT: movs r5, #0 +; THUMB2-NEXT: ldm.w lr, {r9, r12, lr} +; THUMB2-NEXT: subs.w r6, r0, r9 +; THUMB2-NEXT: sbcs.w r6, r1, r12 +; THUMB2-NEXT: sbcs.w r6, r2, lr +; THUMB2-NEXT: sbcs.w r6, r3, r4 +; THUMB2-NEXT: mov.w r6, #0 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt r6, #1 +; THUMB2-NEXT: subs.w r0, r9, r0 +; THUMB2-NEXT: sbcs.w r0, r12, r1 +; THUMB2-NEXT: sbcs.w r0, lr, r2 +; THUMB2-NEXT: sbcs.w r0, r4, r3 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt r5, #1 +; THUMB2-NEXT: subs r0, r5, r6 +; THUMB2-NEXT: pop {r4, r5, r6, pc} +; +; V81M-LABEL: scmp_8_128: +; V81M: @ %bb.0: +; V81M-NEXT: .save {r4, r5, r6, lr} +; V81M-NEXT: push {r4, r5, r6, lr} +; V81M-NEXT: ldrd r5, r4, [sp, #16] +; V81M-NEXT: ldrd lr, r12, [sp, #24] +; V81M-NEXT: subs r6, r0, r5 +; V81M-NEXT: sbcs.w r6, r1, r4 +; V81M-NEXT: sbcs.w r6, r2, lr +; V81M-NEXT: sbcs.w r6, r3, r12 +; V81M-NEXT: cset r6, lt +; V81M-NEXT: subs r0, r5, r0 +; V81M-NEXT: sbcs.w r0, r4, r1 +; V81M-NEXT: sbcs.w r0, lr, r2 +; V81M-NEXT: sbcs.w r0, r12, r3 +; V81M-NEXT: cset r0, lt +; V81M-NEXT: subs r0, r0, r6 +; V81M-NEXT: pop {r4, r5, r6, pc} %1 = call i8 @llvm.scmp(i128 %x, i128 %y) ret i8 %1 } define i32 @scmp_32_32(i32 %x, i32 %y) nounwind { -; CHECK-LABEL: scmp_32_32: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: it gt -; CHECK-NEXT: movgt r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: scmp_32_32: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: movs r2, #1 +; THUMB1-NEXT: movs r3, #0 +; THUMB1-NEXT: cmp r0, r1 +; THUMB1-NEXT: mov r0, r2 +; THUMB1-NEXT: bge .LBB5_3 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: ble .LBB5_4 +; THUMB1-NEXT: .LBB5_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; THUMB1-NEXT: .LBB5_3: +; THUMB1-NEXT: mov r0, r3 +; THUMB1-NEXT: bgt .LBB5_2 +; THUMB1-NEXT: .LBB5_4: +; THUMB1-NEXT: mov r2, r3 +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: scmp_32_32: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it gt +; THUMB2-NEXT: movgt r0, #1 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: scmp_32_32: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, gt +; V81M-NEXT: it lt +; V81M-NEXT: movlt.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i32 @llvm.scmp(i32 %x, i32 %y) ret i32 %1 } define i32 @scmp_32_64(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: scmp_32_64: -; CHECK: @ %bb.0: -; CHECK-NEXT: subs.w r12, r0, r2 -; CHECK-NEXT: mov.w r9, #0 -; CHECK-NEXT: sbcs.w r12, r1, r3 -; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r12, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r9, #1 -; CHECK-NEXT: sub.w r0, r9, r12 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: scmp_32_64: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, lr} +; THUMB1-NEXT: push {r4, r5, r6, lr} +; THUMB1-NEXT: movs r4, #1 +; THUMB1-NEXT: movs r5, #0 +; THUMB1-NEXT: subs r6, r0, r2 +; THUMB1-NEXT: mov r6, r1 +; THUMB1-NEXT: sbcs r6, r3 +; THUMB1-NEXT: mov r6, r4 +; THUMB1-NEXT: blt .LBB6_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: mov r6, r5 +; THUMB1-NEXT: .LBB6_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: sbcs r3, r1 +; THUMB1-NEXT: blt .LBB6_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: mov r4, r5 +; THUMB1-NEXT: .LBB6_4: +; THUMB1-NEXT: subs r0, r4, r6 +; THUMB1-NEXT: pop {r4, r5, r6, pc} +; +; THUMB2-LABEL: scmp_32_64: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs.w r12, r0, r2 +; THUMB2-NEXT: mov.w r9, #0 +; THUMB2-NEXT: sbcs.w r12, r1, r3 +; THUMB2-NEXT: mov.w r12, #0 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r12, #1 +; THUMB2-NEXT: subs r0, r2, r0 +; THUMB2-NEXT: sbcs.w r0, r3, r1 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r9, #1 +; THUMB2-NEXT: sub.w r0, r9, r12 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: scmp_32_64: +; V81M: @ %bb.0: +; V81M-NEXT: subs.w r12, r0, r2 +; V81M-NEXT: sbcs.w r12, r1, r3 +; V81M-NEXT: cset r12, lt +; V81M-NEXT: subs r0, r2, r0 +; V81M-NEXT: sbcs.w r0, r3, r1 +; V81M-NEXT: cset r0, lt +; V81M-NEXT: sub.w r0, r0, r12 +; V81M-NEXT: bx lr %1 = call i32 @llvm.scmp(i64 %x, i64 %y) ret i32 %1 } define i64 @scmp_64_64(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: scmp_64_64: -; CHECK: @ %bb.0: -; CHECK-NEXT: subs.w r12, r0, r2 -; CHECK-NEXT: mov.w r9, #0 -; CHECK-NEXT: sbcs.w r12, r1, r3 -; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r12, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r9, #1 -; CHECK-NEXT: sub.w r0, r9, r12 -; CHECK-NEXT: asrs r1, r0, #31 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: scmp_64_64: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, lr} +; THUMB1-NEXT: push {r4, r5, r6, lr} +; THUMB1-NEXT: movs r4, #1 +; THUMB1-NEXT: movs r5, #0 +; THUMB1-NEXT: subs r6, r0, r2 +; THUMB1-NEXT: mov r6, r1 +; THUMB1-NEXT: sbcs r6, r3 +; THUMB1-NEXT: mov r6, r4 +; THUMB1-NEXT: blt .LBB7_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: mov r6, r5 +; THUMB1-NEXT: .LBB7_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: sbcs r3, r1 +; THUMB1-NEXT: blt .LBB7_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: mov r4, r5 +; THUMB1-NEXT: .LBB7_4: +; THUMB1-NEXT: subs r0, r4, r6 +; THUMB1-NEXT: asrs r1, r0, #31 +; THUMB1-NEXT: pop {r4, r5, r6, pc} +; +; THUMB2-LABEL: scmp_64_64: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs.w r12, r0, r2 +; THUMB2-NEXT: mov.w r9, #0 +; THUMB2-NEXT: sbcs.w r12, r1, r3 +; THUMB2-NEXT: mov.w r12, #0 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r12, #1 +; THUMB2-NEXT: subs r0, r2, r0 +; THUMB2-NEXT: sbcs.w r0, r3, r1 +; THUMB2-NEXT: it lt +; THUMB2-NEXT: movlt.w r9, #1 +; THUMB2-NEXT: sub.w r0, r9, r12 +; THUMB2-NEXT: asrs r1, r0, #31 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: scmp_64_64: +; V81M: @ %bb.0: +; V81M-NEXT: subs.w r12, r0, r2 +; V81M-NEXT: sbcs.w r12, r1, r3 +; V81M-NEXT: cset r12, lt +; V81M-NEXT: subs r0, r2, r0 +; V81M-NEXT: sbcs.w r0, r3, r1 +; V81M-NEXT: cset r0, lt +; V81M-NEXT: sub.w r0, r0, r12 +; V81M-NEXT: asrs r1, r0, #31 +; V81M-NEXT: bx lr %1 = call i64 @llvm.scmp(i64 %x, i64 %y) ret i64 %1 } diff --git a/llvm/test/CodeGen/Thumb/ucmp.ll b/llvm/test/CodeGen/Thumb/ucmp.ll index 7e6d0a3..5d0f57e 100644 --- a/llvm/test/CodeGen/Thumb/ucmp.ll +++ b/llvm/test/CodeGen/Thumb/ucmp.ll @@ -1,151 +1,376 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s +; RUN: llc -mtriple=thumbv6m-eabi %s -o - | FileCheck %s -check-prefix=THUMB1 +; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s -check-prefix=THUMB2 +; RUN: llc -mtriple thumbv8.1m.main-none-eabi -o - %s | FileCheck %s --check-prefix=V81M define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind { -; CHECK-LABEL: ucmp_8_8: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 -; CHECK-NEXT: it hi -; CHECK-NEXT: movhi r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: ucmp_8_8: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: subs r2, r0, r1 +; THUMB1-NEXT: sbcs r2, r2 +; THUMB1-NEXT: cmp r1, r0 +; THUMB1-NEXT: sbcs r1, r1 +; THUMB1-NEXT: subs r0, r2, r1 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: ucmp_8_8: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it hi +; THUMB2-NEXT: movhi r0, #1 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: ucmp_8_8: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, hi +; V81M-NEXT: it lo +; V81M-NEXT: movlo.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i8 @llvm.ucmp(i8 %x, i8 %y) ret i8 %1 } define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind { -; CHECK-LABEL: ucmp_8_16: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 -; CHECK-NEXT: it hi -; CHECK-NEXT: movhi r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: ucmp_8_16: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: subs r2, r0, r1 +; THUMB1-NEXT: sbcs r2, r2 +; THUMB1-NEXT: cmp r1, r0 +; THUMB1-NEXT: sbcs r1, r1 +; THUMB1-NEXT: subs r0, r2, r1 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: ucmp_8_16: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it hi +; THUMB2-NEXT: movhi r0, #1 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: ucmp_8_16: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, hi +; V81M-NEXT: it lo +; V81M-NEXT: movlo.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i8 @llvm.ucmp(i16 %x, i16 %y) ret i8 %1 } define i8 @ucmp_8_32(i32 %x, i32 %y) nounwind { -; CHECK-LABEL: ucmp_8_32: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 -; CHECK-NEXT: it hi -; CHECK-NEXT: movhi r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: ucmp_8_32: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: subs r2, r0, r1 +; THUMB1-NEXT: sbcs r2, r2 +; THUMB1-NEXT: cmp r1, r0 +; THUMB1-NEXT: sbcs r1, r1 +; THUMB1-NEXT: subs r0, r2, r1 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: ucmp_8_32: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it hi +; THUMB2-NEXT: movhi r0, #1 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: ucmp_8_32: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, hi +; V81M-NEXT: it lo +; V81M-NEXT: movlo.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i8 @llvm.ucmp(i32 %x, i32 %y) ret i8 %1 } define i8 @ucmp_8_64(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: ucmp_8_64: -; CHECK: @ %bb.0: -; CHECK-NEXT: subs.w r12, r0, r2 -; CHECK-NEXT: mov.w r9, #0 -; CHECK-NEXT: sbcs.w r12, r1, r3 -; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo.w r12, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo.w r9, #1 -; CHECK-NEXT: sub.w r0, r9, r12 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: ucmp_8_64: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, lr} +; THUMB1-NEXT: push {r4, r5, r6, lr} +; THUMB1-NEXT: movs r4, #1 +; THUMB1-NEXT: movs r5, #0 +; THUMB1-NEXT: subs r6, r0, r2 +; THUMB1-NEXT: mov r6, r1 +; THUMB1-NEXT: sbcs r6, r3 +; THUMB1-NEXT: mov r6, r4 +; THUMB1-NEXT: blo .LBB3_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: mov r6, r5 +; THUMB1-NEXT: .LBB3_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: sbcs r3, r1 +; THUMB1-NEXT: blo .LBB3_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: mov r4, r5 +; THUMB1-NEXT: .LBB3_4: +; THUMB1-NEXT: subs r0, r4, r6 +; THUMB1-NEXT: pop {r4, r5, r6, pc} +; +; THUMB2-LABEL: ucmp_8_64: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs.w r12, r0, r2 +; THUMB2-NEXT: mov.w r9, #0 +; THUMB2-NEXT: sbcs.w r12, r1, r3 +; THUMB2-NEXT: mov.w r12, #0 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r12, #1 +; THUMB2-NEXT: subs r0, r2, r0 +; THUMB2-NEXT: sbcs.w r0, r3, r1 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r9, #1 +; THUMB2-NEXT: sub.w r0, r9, r12 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: ucmp_8_64: +; V81M: @ %bb.0: +; V81M-NEXT: subs.w r12, r0, r2 +; V81M-NEXT: sbcs.w r12, r1, r3 +; V81M-NEXT: cset r12, lo +; V81M-NEXT: subs r0, r2, r0 +; V81M-NEXT: sbcs.w r0, r3, r1 +; V81M-NEXT: cset r0, lo +; V81M-NEXT: sub.w r0, r0, r12 +; V81M-NEXT: bx lr %1 = call i8 @llvm.ucmp(i64 %x, i64 %y) ret i8 %1 } define i8 @ucmp_8_128(i128 %x, i128 %y) nounwind { -; CHECK-LABEL: ucmp_8_128: -; CHECK: @ %bb.0: -; CHECK-NEXT: push {r4, r5, r6, lr} -; CHECK-NEXT: add.w lr, sp, #16 -; CHECK-NEXT: ldr r4, [sp, #28] -; CHECK-NEXT: movs r5, #0 -; CHECK-NEXT: ldm.w lr, {r9, r12, lr} -; CHECK-NEXT: subs.w r6, r0, r9 -; CHECK-NEXT: sbcs.w r6, r1, r12 -; CHECK-NEXT: sbcs.w r6, r2, lr -; CHECK-NEXT: sbcs.w r6, r3, r4 -; CHECK-NEXT: mov.w r6, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r6, #1 -; CHECK-NEXT: subs.w r0, r9, r0 -; CHECK-NEXT: sbcs.w r0, r12, r1 -; CHECK-NEXT: sbcs.w r0, lr, r2 -; CHECK-NEXT: sbcs.w r0, r4, r3 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r5, #1 -; CHECK-NEXT: subs r0, r5, r6 -; CHECK-NEXT: pop {r4, r5, r6, pc} +; THUMB1-LABEL: ucmp_8_128: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, r7, lr} +; THUMB1-NEXT: push {r4, r5, r6, r7, lr} +; THUMB1-NEXT: .pad #20 +; THUMB1-NEXT: sub sp, #20 +; THUMB1-NEXT: str r3, [sp, #16] @ 4-byte Spill +; THUMB1-NEXT: movs r3, #1 +; THUMB1-NEXT: str r3, [sp] @ 4-byte Spill +; THUMB1-NEXT: movs r3, #0 +; THUMB1-NEXT: str r3, [sp, #12] @ 4-byte Spill +; THUMB1-NEXT: ldr r6, [sp, #52] +; THUMB1-NEXT: add r7, sp, #40 +; THUMB1-NEXT: ldm r7, {r3, r5, r7} +; THUMB1-NEXT: subs r4, r0, r3 +; THUMB1-NEXT: str r1, [sp, #4] @ 4-byte Spill +; THUMB1-NEXT: mov r4, r1 +; THUMB1-NEXT: ldr r1, [sp] @ 4-byte Reload +; THUMB1-NEXT: sbcs r4, r5 +; THUMB1-NEXT: str r2, [sp, #8] @ 4-byte Spill +; THUMB1-NEXT: mov r4, r2 +; THUMB1-NEXT: sbcs r4, r7 +; THUMB1-NEXT: ldr r4, [sp, #16] @ 4-byte Reload +; THUMB1-NEXT: sbcs r4, r6 +; THUMB1-NEXT: mov r2, r1 +; THUMB1-NEXT: blo .LBB4_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: ldr r2, [sp, #12] @ 4-byte Reload +; THUMB1-NEXT: .LBB4_2: +; THUMB1-NEXT: subs r0, r3, r0 +; THUMB1-NEXT: ldr r0, [sp, #4] @ 4-byte Reload +; THUMB1-NEXT: sbcs r5, r0 +; THUMB1-NEXT: ldr r0, [sp, #8] @ 4-byte Reload +; THUMB1-NEXT: sbcs r7, r0 +; THUMB1-NEXT: ldr r0, [sp, #16] @ 4-byte Reload +; THUMB1-NEXT: sbcs r6, r0 +; THUMB1-NEXT: blo .LBB4_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: ldr r1, [sp, #12] @ 4-byte Reload +; THUMB1-NEXT: .LBB4_4: +; THUMB1-NEXT: subs r0, r1, r2 +; THUMB1-NEXT: add sp, #20 +; THUMB1-NEXT: pop {r4, r5, r6, r7, pc} +; +; THUMB2-LABEL: ucmp_8_128: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: push {r4, r5, r6, lr} +; THUMB2-NEXT: add.w lr, sp, #16 +; THUMB2-NEXT: ldr r4, [sp, #28] +; THUMB2-NEXT: movs r5, #0 +; THUMB2-NEXT: ldm.w lr, {r9, r12, lr} +; THUMB2-NEXT: subs.w r6, r0, r9 +; THUMB2-NEXT: sbcs.w r6, r1, r12 +; THUMB2-NEXT: sbcs.w r6, r2, lr +; THUMB2-NEXT: sbcs.w r6, r3, r4 +; THUMB2-NEXT: mov.w r6, #0 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo r6, #1 +; THUMB2-NEXT: subs.w r0, r9, r0 +; THUMB2-NEXT: sbcs.w r0, r12, r1 +; THUMB2-NEXT: sbcs.w r0, lr, r2 +; THUMB2-NEXT: sbcs.w r0, r4, r3 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo r5, #1 +; THUMB2-NEXT: subs r0, r5, r6 +; THUMB2-NEXT: pop {r4, r5, r6, pc} +; +; V81M-LABEL: ucmp_8_128: +; V81M: @ %bb.0: +; V81M-NEXT: .save {r4, r5, r6, lr} +; V81M-NEXT: push {r4, r5, r6, lr} +; V81M-NEXT: ldrd r5, r4, [sp, #16] +; V81M-NEXT: ldrd lr, r12, [sp, #24] +; V81M-NEXT: subs r6, r0, r5 +; V81M-NEXT: sbcs.w r6, r1, r4 +; V81M-NEXT: sbcs.w r6, r2, lr +; V81M-NEXT: sbcs.w r6, r3, r12 +; V81M-NEXT: cset r6, lo +; V81M-NEXT: subs r0, r5, r0 +; V81M-NEXT: sbcs.w r0, r4, r1 +; V81M-NEXT: sbcs.w r0, lr, r2 +; V81M-NEXT: sbcs.w r0, r12, r3 +; V81M-NEXT: cset r0, lo +; V81M-NEXT: subs r0, r0, r6 +; V81M-NEXT: pop {r4, r5, r6, pc} %1 = call i8 @llvm.ucmp(i128 %x, i128 %y) ret i8 %1 } define i32 @ucmp_32_32(i32 %x, i32 %y) nounwind { -; CHECK-LABEL: ucmp_32_32: -; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 -; CHECK-NEXT: it hi -; CHECK-NEXT: movhi r2, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: ucmp_32_32: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: subs r2, r0, r1 +; THUMB1-NEXT: sbcs r2, r2 +; THUMB1-NEXT: cmp r1, r0 +; THUMB1-NEXT: sbcs r1, r1 +; THUMB1-NEXT: subs r0, r2, r1 +; THUMB1-NEXT: bx lr +; +; THUMB2-LABEL: ucmp_32_32: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs r0, r0, r1 +; THUMB2-NEXT: it hi +; THUMB2-NEXT: movhi r0, #1 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r0, #-1 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: ucmp_32_32: +; V81M: @ %bb.0: +; V81M-NEXT: cmp r0, r1 +; V81M-NEXT: cset r0, hi +; V81M-NEXT: it lo +; V81M-NEXT: movlo.w r0, #-1 +; V81M-NEXT: bx lr %1 = call i32 @llvm.ucmp(i32 %x, i32 %y) ret i32 %1 } define i32 @ucmp_32_64(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: ucmp_32_64: -; CHECK: @ %bb.0: -; CHECK-NEXT: subs.w r12, r0, r2 -; CHECK-NEXT: mov.w r9, #0 -; CHECK-NEXT: sbcs.w r12, r1, r3 -; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo.w r12, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo.w r9, #1 -; CHECK-NEXT: sub.w r0, r9, r12 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: ucmp_32_64: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, lr} +; THUMB1-NEXT: push {r4, r5, r6, lr} +; THUMB1-NEXT: movs r4, #1 +; THUMB1-NEXT: movs r5, #0 +; THUMB1-NEXT: subs r6, r0, r2 +; THUMB1-NEXT: mov r6, r1 +; THUMB1-NEXT: sbcs r6, r3 +; THUMB1-NEXT: mov r6, r4 +; THUMB1-NEXT: blo .LBB6_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: mov r6, r5 +; THUMB1-NEXT: .LBB6_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: sbcs r3, r1 +; THUMB1-NEXT: blo .LBB6_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: mov r4, r5 +; THUMB1-NEXT: .LBB6_4: +; THUMB1-NEXT: subs r0, r4, r6 +; THUMB1-NEXT: pop {r4, r5, r6, pc} +; +; THUMB2-LABEL: ucmp_32_64: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs.w r12, r0, r2 +; THUMB2-NEXT: mov.w r9, #0 +; THUMB2-NEXT: sbcs.w r12, r1, r3 +; THUMB2-NEXT: mov.w r12, #0 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r12, #1 +; THUMB2-NEXT: subs r0, r2, r0 +; THUMB2-NEXT: sbcs.w r0, r3, r1 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r9, #1 +; THUMB2-NEXT: sub.w r0, r9, r12 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: ucmp_32_64: +; V81M: @ %bb.0: +; V81M-NEXT: subs.w r12, r0, r2 +; V81M-NEXT: sbcs.w r12, r1, r3 +; V81M-NEXT: cset r12, lo +; V81M-NEXT: subs r0, r2, r0 +; V81M-NEXT: sbcs.w r0, r3, r1 +; V81M-NEXT: cset r0, lo +; V81M-NEXT: sub.w r0, r0, r12 +; V81M-NEXT: bx lr %1 = call i32 @llvm.ucmp(i64 %x, i64 %y) ret i32 %1 } define i64 @ucmp_64_64(i64 %x, i64 %y) nounwind { -; CHECK-LABEL: ucmp_64_64: -; CHECK: @ %bb.0: -; CHECK-NEXT: subs.w r12, r0, r2 -; CHECK-NEXT: mov.w r9, #0 -; CHECK-NEXT: sbcs.w r12, r1, r3 -; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo.w r12, #1 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo.w r9, #1 -; CHECK-NEXT: sub.w r0, r9, r12 -; CHECK-NEXT: asrs r1, r0, #31 -; CHECK-NEXT: bx lr +; THUMB1-LABEL: ucmp_64_64: +; THUMB1: @ %bb.0: +; THUMB1-NEXT: .save {r4, r5, r6, lr} +; THUMB1-NEXT: push {r4, r5, r6, lr} +; THUMB1-NEXT: movs r4, #1 +; THUMB1-NEXT: movs r5, #0 +; THUMB1-NEXT: subs r6, r0, r2 +; THUMB1-NEXT: mov r6, r1 +; THUMB1-NEXT: sbcs r6, r3 +; THUMB1-NEXT: mov r6, r4 +; THUMB1-NEXT: blo .LBB7_2 +; THUMB1-NEXT: @ %bb.1: +; THUMB1-NEXT: mov r6, r5 +; THUMB1-NEXT: .LBB7_2: +; THUMB1-NEXT: subs r0, r2, r0 +; THUMB1-NEXT: sbcs r3, r1 +; THUMB1-NEXT: blo .LBB7_4 +; THUMB1-NEXT: @ %bb.3: +; THUMB1-NEXT: mov r4, r5 +; THUMB1-NEXT: .LBB7_4: +; THUMB1-NEXT: subs r0, r4, r6 +; THUMB1-NEXT: asrs r1, r0, #31 +; THUMB1-NEXT: pop {r4, r5, r6, pc} +; +; THUMB2-LABEL: ucmp_64_64: +; THUMB2: @ %bb.0: +; THUMB2-NEXT: subs.w r12, r0, r2 +; THUMB2-NEXT: mov.w r9, #0 +; THUMB2-NEXT: sbcs.w r12, r1, r3 +; THUMB2-NEXT: mov.w r12, #0 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r12, #1 +; THUMB2-NEXT: subs r0, r2, r0 +; THUMB2-NEXT: sbcs.w r0, r3, r1 +; THUMB2-NEXT: it lo +; THUMB2-NEXT: movlo.w r9, #1 +; THUMB2-NEXT: sub.w r0, r9, r12 +; THUMB2-NEXT: asrs r1, r0, #31 +; THUMB2-NEXT: bx lr +; +; V81M-LABEL: ucmp_64_64: +; V81M: @ %bb.0: +; V81M-NEXT: subs.w r12, r0, r2 +; V81M-NEXT: sbcs.w r12, r1, r3 +; V81M-NEXT: cset r12, lo +; V81M-NEXT: subs r0, r2, r0 +; V81M-NEXT: sbcs.w r0, r3, r1 +; V81M-NEXT: cset r0, lo +; V81M-NEXT: sub.w r0, r0, r12 +; V81M-NEXT: asrs r1, r0, #31 +; V81M-NEXT: bx lr %1 = call i64 @llvm.ucmp(i64 %x, i64 %y) ret i64 %1 } diff --git a/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll b/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll index a27650f..7a90d28 100644 --- a/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll +++ b/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll @@ -37,52 +37,52 @@ define hidden void @copy(ptr noundef %va) { ; CHECK-NEXT: %va.addr = alloca ptr, align 4 ; CHECK-NEXT: %cp = alloca ptr, align 4 ; CHECK-NEXT: store ptr %va, ptr %va.addr, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %cp) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %cp) ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr %cp, ptr %va.addr, i32 4, i1 false) ; CHECK-NEXT: %0 = load ptr, ptr %cp, align 4 ; CHECK-NEXT: call void @valist(ptr noundef %0) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %cp) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %cp) ; CHECK-NEXT: ret void ; entry: %va.addr = alloca ptr, align 4 %cp = alloca ptr, align 4 store ptr %va, ptr %va.addr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %cp) + call void @llvm.lifetime.start.p0(ptr nonnull %cp) call void @llvm.va_copy.p0(ptr nonnull %cp, ptr nonnull %va.addr) %0 = load ptr, ptr %cp, align 4 call void @valist(ptr noundef %0) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %cp) + call void @llvm.lifetime.end.p0(ptr nonnull %cp) ret void } -declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @llvm.va_copy.p0(ptr, ptr) declare void @valist(ptr noundef) -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) define hidden void @start_once(...) { ; CHECK-LABEL: define {{[^@]+}}@start_once(ptr %varargs) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %s = alloca ptr, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %s) ; CHECK-NEXT: store ptr %varargs, ptr %s, align 4 ; CHECK-NEXT: %0 = load ptr, ptr %s, align 4 ; CHECK-NEXT: call void @valist(ptr noundef %0) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %s) ; CHECK-NEXT: ret void ; entry: %s = alloca ptr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s) + call void @llvm.lifetime.start.p0(ptr nonnull %s) call void @llvm.va_start.p0(ptr nonnull %s) %0 = load ptr, ptr %s, align 4 call void @valist(ptr noundef %0) call void @llvm.va_end.p0(ptr %s) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s) + call void @llvm.lifetime.end.p0(ptr nonnull %s) ret void } @@ -95,23 +95,23 @@ define hidden void @start_twice(...) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %s0 = alloca ptr, align 4 ; CHECK-NEXT: %s1 = alloca ptr, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s0) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s1) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %s0) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %s1) ; CHECK-NEXT: store ptr %varargs, ptr %s0, align 4 ; CHECK-NEXT: %0 = load ptr, ptr %s0, align 4 ; CHECK-NEXT: call void @valist(ptr noundef %0) ; CHECK-NEXT: store ptr %varargs, ptr %s1, align 4 ; CHECK-NEXT: %1 = load ptr, ptr %s1, align 4 ; CHECK-NEXT: call void @valist(ptr noundef %1) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s1) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s0) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %s1) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %s0) ; CHECK-NEXT: ret void ; entry: %s0 = alloca ptr, align 4 %s1 = alloca ptr, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s0) - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s1) + call void @llvm.lifetime.start.p0(ptr nonnull %s0) + call void @llvm.lifetime.start.p0(ptr nonnull %s1) call void @llvm.va_start.p0(ptr nonnull %s0) %0 = load ptr, ptr %s0, align 4 call void @valist(ptr noundef %0) @@ -120,8 +120,8 @@ entry: %1 = load ptr, ptr %s1, align 4 call void @valist(ptr noundef %1) call void @llvm.va_end.p0(ptr %s1) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s1) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s0) + call void @llvm.lifetime.end.p0(ptr nonnull %s1) + call void @llvm.lifetime.end.p0(ptr nonnull %s0) ret void } @@ -129,11 +129,11 @@ define hidden void @single_i32(i32 noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_i32(i32 noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_i32.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -147,11 +147,11 @@ define hidden void @single_double(double noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_double(double noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_double.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_double.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store double %x, ptr %0, align 8 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -163,11 +163,11 @@ define hidden void @single_v4f32(<4 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v4f32(<4 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v4f32.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v4f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <4 x float> %x, ptr %0, align 16 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -179,11 +179,11 @@ define hidden void @single_v8f32(<8 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v8f32(<8 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v8f32.vararg, align 32 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v8f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <8 x float> %x, ptr %0, align 32 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -195,11 +195,11 @@ define hidden void @single_v16f32(<16 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v16f32(<16 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v16f32.vararg, align 64 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v16f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <16 x float> %x, ptr %0, align 64 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -211,11 +211,11 @@ define hidden void @single_v32f32(<32 x float> noundef %x) { ; CHECK-LABEL: define {{[^@]+}}@single_v32f32(<32 x float> noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %single_v32f32.vararg, align 128 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 128, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v32f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <32 x float> %x, ptr %0, align 128 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 128, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -227,13 +227,13 @@ define hidden void @i32_double(i32 noundef %x, double noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_double(i32 noundef %x, double noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_double.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_double.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_double.vararg, ptr %vararg_buffer, i32 0, i32 2 ; CHECK-NEXT: store double %y, ptr %1, align 8 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -245,13 +245,13 @@ define hidden void @double_i32(double noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@double_i32(double noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %double_i32.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %double_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store double %x, ptr %0, align 8 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %double_i32.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr %1, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -265,13 +265,13 @@ define hidden void @i32_libcS(i32 noundef %x, ptr noundef byval(%struct.libcS) a ; CHECK-NEXT: %IndirectAlloca = alloca %struct.libcS, align 8 ; CHECK-NEXT: %vararg_buffer = alloca %i32_libcS.vararg, align 16 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %IndirectAlloca, ptr %y, i64 24, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_libcS.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_libcS.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store ptr %IndirectAlloca, ptr %1, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -285,13 +285,13 @@ define hidden void @libcS_i32(ptr noundef byval(%struct.libcS) align 8 %x, i32 n ; CHECK-NEXT: %IndirectAlloca = alloca %struct.libcS, align 8 ; CHECK-NEXT: %vararg_buffer = alloca %libcS_i32.vararg, align 16 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %IndirectAlloca, ptr %x, i64 24, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %libcS_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store ptr %IndirectAlloca, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %libcS_i32.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr %1, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -303,13 +303,13 @@ define hidden void @i32_v4f32(i32 noundef %x, <4 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v4f32(i32 noundef %x, <4 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v4f32.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr %vararg_buffer, i32 0, i32 2 ; CHECK-NEXT: store <4 x float> %y, ptr %1, align 16 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -321,13 +321,13 @@ define hidden void @v4f32_i32(<4 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v4f32_i32(<4 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v4f32_i32.vararg, align 16 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <4 x float> %x, ptr %0, align 16 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr %1, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -339,13 +339,13 @@ define hidden void @i32_v8f32(i32 noundef %x, <8 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v8f32(i32 noundef %x, <8 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v8f32.vararg, align 32 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr %vararg_buffer, i32 0, i32 2 ; CHECK-NEXT: store <8 x float> %y, ptr %1, align 32 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -357,13 +357,13 @@ define hidden void @v8f32_i32(<8 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v8f32_i32(<8 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v8f32_i32.vararg, align 32 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 36, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <8 x float> %x, ptr %0, align 32 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr %1, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 36, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -375,13 +375,13 @@ define hidden void @i32_v16f32(i32 noundef %x, <16 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v16f32(i32 noundef %x, <16 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v16f32.vararg, align 64 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 128, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr %vararg_buffer, i32 0, i32 2 ; CHECK-NEXT: store <16 x float> %y, ptr %1, align 64 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 128, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -393,13 +393,13 @@ define hidden void @v16f32_i32(<16 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v16f32_i32(<16 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v16f32_i32.vararg, align 64 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 68, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <16 x float> %x, ptr %0, align 64 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr %1, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 68, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -411,13 +411,13 @@ define hidden void @i32_v32f32(i32 noundef %x, <32 x float> noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@i32_v32f32(i32 noundef %x, <32 x float> noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %i32_v32f32.vararg, align 128 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %0, align 4 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr %vararg_buffer, i32 0, i32 2 ; CHECK-NEXT: store <32 x float> %y, ptr %1, align 128 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -429,13 +429,13 @@ define hidden void @v32f32_i32(<32 x float> noundef %x, i32 noundef %y) { ; CHECK-LABEL: define {{[^@]+}}@v32f32_i32(<32 x float> noundef %x, i32 noundef %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %v32f32_i32.vararg, align 128 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 132, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %0 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store <32 x float> %x, ptr %0, align 128 ; CHECK-NEXT: %1 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1 ; CHECK-NEXT: store i32 %y, ptr %1, align 4 ; CHECK-NEXT: call void @vararg(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 132, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -448,11 +448,11 @@ define hidden void @fptr_single_i32(i32 noundef %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: %vararg_buffer = alloca %fptr_single_i32.vararg, align 16 ; CHECK-NEXT: %0 = load volatile ptr, ptr @vararg_ptr, align 4 -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_single_i32.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store i32 %x, ptr %1, align 4 ; CHECK-NEXT: call void %0(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: @@ -468,11 +468,11 @@ define hidden void @fptr_libcS(ptr noundef byval(%struct.libcS) align 8 %x) { ; CHECK-NEXT: %vararg_buffer = alloca %fptr_libcS.vararg, align 16 ; CHECK-NEXT: %0 = load volatile ptr, ptr @vararg_ptr, align 4 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %IndirectAlloca, ptr %x, i64 24, i1 false) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer) ; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_libcS.vararg, ptr %vararg_buffer, i32 0, i32 0 ; CHECK-NEXT: store ptr %IndirectAlloca, ptr %1, align 4 ; CHECK-NEXT: call void %0(ptr %vararg_buffer) -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %vararg_buffer) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer) ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll index 0f968de..3264fe9 100644 --- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll +++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll @@ -18,7 +18,7 @@ define void @test_static() { ; CHECK-NEXT: i32 1, label %[[ENTRY_SPLIT_SPLIT:.*]] ; CHECK-NEXT: ] ; CHECK: [[ENTRY_SPLIT]]: -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]]) ; CHECK-NEXT: call void @__wasm_setjmp(ptr @buf, i32 1, ptr [[FUNCTIONINVOCATIONID]]) ; CHECK-NEXT: br label %[[ENTRY_SPLIT_SPLIT]] ; CHECK: [[ENTRY_SPLIT_SPLIT]]: @@ -31,7 +31,7 @@ define void @test_static() { ; CHECK: [[_NOEXC:.*:]] ; CHECK-NEXT: ret void ; CHECK: [[ELSE]]: -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[X]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]]) ; CHECK-NEXT: ret void ; CHECK: [[CATCH_DISPATCH_LONGJMP]]: ; CHECK-NEXT: [[TMP0:%.*]] = catchswitch within none [label %catch.longjmp] unwind to caller @@ -53,7 +53,7 @@ define void @test_static() { ; entry: %x = alloca i32, align 4 - call void @llvm.lifetime.start.p0(i64 4, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) %call = call i32 @setjmp(ptr @buf) returns_twice %cmp = icmp eq i32 %call, 0 br i1 %cmp, label %if, label %else @@ -63,7 +63,7 @@ if: ret void else: - call void @llvm.lifetime.end.p0(i64 4, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) ret void } @@ -114,7 +114,7 @@ define void @test_dynamic(i32 %size) { ; entry: %x = alloca i32, i32 %size, align 4 - call void @llvm.lifetime.start.p0(i64 -1, ptr %x) + call void @llvm.lifetime.start.p0(ptr %x) %call = call i32 @setjmp(ptr @buf) returns_twice %cmp = icmp eq i32 %call, 0 br i1 %cmp, label %if, label %else @@ -124,6 +124,6 @@ if: ret void else: - call void @llvm.lifetime.end.p0(i64 -1, ptr %x) + call void @llvm.lifetime.end.p0(ptr %x) ret void } diff --git a/llvm/test/CodeGen/WebAssembly/ref-test-func.ll b/llvm/test/CodeGen/WebAssembly/ref-test-func.ll index ea2453f..4fda253 100644 --- a/llvm/test/CodeGen/WebAssembly/ref-test-func.ll +++ b/llvm/test/CodeGen/WebAssembly/ref-test-func.ll @@ -31,7 +31,7 @@ define void @test_fpsig_return_i32(ptr noundef %func) local_unnamed_addr #0 { ; CHECK-NEXT: call use ; CHECK-NEXT: # fallthrough-return entry: - %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i32 0) + %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i32 poison) tail call void @use(i32 noundef %res) #3 ret void } @@ -48,7 +48,7 @@ define void @test_fpsig_return_i64(ptr noundef %func) local_unnamed_addr #0 { ; CHECK-NEXT: call use ; CHECK-NEXT: # fallthrough-return entry: - %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i64 0) + %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i64 poison) tail call void @use(i32 noundef %res) #3 ret void } @@ -65,7 +65,7 @@ define void @test_fpsig_return_f32(ptr noundef %func) local_unnamed_addr #0 { ; CHECK-NEXT: call use ; CHECK-NEXT: # fallthrough-return entry: - %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, float 0.) + %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, float poison) tail call void @use(i32 noundef %res) #3 ret void } @@ -82,7 +82,7 @@ define void @test_fpsig_return_f64(ptr noundef %func) local_unnamed_addr #0 { ; CHECK-NEXT: call use ; CHECK-NEXT: # fallthrough-return entry: - %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, double 0.) + %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, double poison) tail call void @use(i32 noundef %res) #3 ret void } @@ -100,7 +100,7 @@ define void @test_fpsig_param_i32(ptr noundef %func) local_unnamed_addr #0 { ; CHECK-NEXT: call use ; CHECK-NEXT: # fallthrough-return entry: - %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, double 0.) + %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, double poison) tail call void @use(i32 noundef %res) #3 ret void } @@ -118,7 +118,7 @@ define void @test_fpsig_multiple_params_and_returns(ptr noundef %func) local_unn ; CHECK-NEXT: call use ; CHECK-NEXT: # fallthrough-return entry: - %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i32 0, i64 0, float 0., double 0., token poison, i64 0, float 0., i64 0) + %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i32 poison, i64 poison, float poison, double poison, token poison, i64 poison, float poison, i64 poison) tail call void @use(i32 noundef %res) #3 ret void } @@ -137,10 +137,26 @@ define void @test_fpsig_ptrs(ptr noundef %func) local_unnamed_addr #0 { ; CHECK-NEXT: call use ; CHECK-NEXT: # fallthrough-return entry: - %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, ptr null, token poison, ptr null, ptr null) + %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, ptr poison, token poison, ptr poison, ptr poison) tail call void @use(i32 noundef %res) #3 ret void } +define void @test_reference_types(ptr noundef %func) local_unnamed_addr #0 { +; CHECK-LABEL: test_reference_types: +; CHK32: .functype test_reference_types (i32) -> () +; CHK64: .functype test_reference_types (i64) -> () +; CHECK-NEXT: # %bb.0: # %entry +; CHECK-NEXT: local.get 0 +; CHK64-NEXT: i32.wrap_i64 +; CHECK-NEXT: table.get __indirect_function_table +; CHECK-NEXT: ref.test (funcref, externref) -> (externref) +; CHECK-NEXT: call use +; CHECK-NEXT: # fallthrough-return +entry: + %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, ptr addrspace(10) poison, token poison, ptr addrspace(20) poison, ptr addrspace(10) poison) + tail call void @use(i32 noundef %res) #3 + ret void +} declare void @use(i32 noundef) local_unnamed_addr #1 diff --git a/llvm/test/CodeGen/WebAssembly/returned.ll b/llvm/test/CodeGen/WebAssembly/returned.ll index aef75d8..bad9d60 100644 --- a/llvm/test/CodeGen/WebAssembly/returned.ll +++ b/llvm/test/CodeGen/WebAssembly/returned.ll @@ -99,8 +99,8 @@ define void @test() { ; CHECK-NEXT: return entry: %a = alloca i32 - call void @llvm.lifetime.start.p0(i64 4, ptr %a) + call void @llvm.lifetime.start.p0(ptr %a) %ret = call ptr @returns_arg(ptr %a) - call void @llvm.lifetime.end.p0(i64 4, ptr %a) + call void @llvm.lifetime.end.p0(ptr %a) ret void } diff --git a/llvm/test/CodeGen/X86/GlobalISel/ptrtoaddr.ll b/llvm/test/CodeGen/X86/GlobalISel/ptrtoaddr.ll new file mode 100644 index 0000000..f65d99d --- /dev/null +++ b/llvm/test/CodeGen/X86/GlobalISel/ptrtoaddr.ll @@ -0,0 +1,109 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=CHECK + +define i1 @ptrtoaddr_1(ptr %p) { +; CHECK-LABEL: ptrtoaddr_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: xorb $1, %al +; CHECK-NEXT: # kill: def $al killed $al killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i1 + %ret = xor i1 %trunc, 1 + ret i1 %ret +} + +define i8 @ptrtoaddr_8(ptr %p) { +; CHECK-LABEL: ptrtoaddr_8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notb %al +; CHECK-NEXT: # kill: def $al killed $al killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i8 + %ret = xor i8 %trunc, -1 + ret i8 %ret +} + +define i16 @ptrtoaddr_16(ptr %p) { +; CHECK-LABEL: ptrtoaddr_16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notw %ax +; CHECK-NEXT: # kill: def $ax killed $ax killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i16 + %ret = xor i16 %trunc, -1 + ret i16 %ret +} + +define i32 @ptrtoaddr_32(ptr %p) { +; CHECK-LABEL: ptrtoaddr_32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notl %eax +; CHECK-NEXT: # kill: def $eax killed $eax killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i32 + %ret = xor i32 %trunc, -1 + ret i32 %ret +} + +define i64 @ptrtoaddr_64(ptr %p) { +; CHECK-LABEL: ptrtoaddr_64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notq %rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %ret = xor i64 %addr, -1 + ret i64 %ret +} + +define i128 @ptrtoaddr_128(ptr %p) { +; CHECK-LABEL: ptrtoaddr_128: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: xorl %edx, %edx +; CHECK-NEXT: notq %rax +; CHECK-NEXT: notq %rdx +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %ext = zext i64 %addr to i128 + %ret = xor i128 %ext, -1 + ret i128 %ret +} + +; TODO: Vector version cannot be handled by GlobalIsel yet (same error as ptrtoint: https://github.com/llvm/llvm-project/issues/150875). +; define <2 x i64> @ptrtoaddr_vec(<2 x ptr> %p) { +; entry: +; %addr = ptrtoaddr <2 x ptr> %p to <2 x i64> +; %ret = xor <2 x i64> %addr, <i64 -1, i64 -1> +; ret <2 x i64> %ret +;} + +; UTC_ARGS: --disable + +@foo = global [16 x i8] zeroinitializer +@addr = global i64 ptrtoaddr (ptr @foo to i64) +; CHECK: addr: +; CHECK-NEXT: .quad foo +; CHECK-NEXT: .size addr, 8 +@addr_plus_one = global i64 ptrtoaddr (ptr getelementptr (i8, ptr @foo, i64 1) to i64) +; CHECK: addr_plus_one: +; CHECK-NEXT: .quad foo+1 +; CHECK-NEXT: .size addr_plus_one, 8 +@const_addr = global i64 ptrtoaddr (ptr getelementptr (i8, ptr null, i64 1) to i64) +; CHECK: const_addr: +; CHECK-NEXT: .quad 0+1 +; CHECK-NEXT: .size const_addr, 8 diff --git a/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll b/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll index 2ca99bd..58dfd63 100644 --- a/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll +++ b/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll @@ -51,20 +51,20 @@ entry: %sincos = tail call { float, float } @llvm.sincos.f32(float %in) %sin = extractvalue { float, float } %sincos, 0 %cos = extractvalue { float, float } %sincos, 1 - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %computed) + call void @llvm.lifetime.start.p0(ptr nonnull %computed) store float %cos, ptr %computed, align 4 call void @use_ptr(ptr nonnull %computed) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %computed) - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %computed1) + call void @llvm.lifetime.end.p0(ptr nonnull %computed) + call void @llvm.lifetime.start.p0(ptr nonnull %computed1) %fneg_sin = fneg float %sin store float %fneg_sin, ptr %computed1, align 4 call void @use_ptr(ptr nonnull %computed1) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %computed1) - call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %computed3) + call void @llvm.lifetime.end.p0(ptr nonnull %computed1) + call void @llvm.lifetime.start.p0(ptr nonnull %computed3) %fneg_cos = fneg float %cos store float %fneg_cos, ptr %computed3, align 4 call void @use_ptr(ptr nonnull %computed3) - call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %computed3) + call void @llvm.lifetime.end.p0(ptr nonnull %computed3) ret i32 0 } diff --git a/llvm/test/CodeGen/X86/pr152630.ll b/llvm/test/CodeGen/X86/pr152630.ll new file mode 100644 index 0000000..8fa9883 --- /dev/null +++ b/llvm/test/CodeGen/X86/pr152630.ll @@ -0,0 +1,34 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s + +define i32 @pr152630(i1 %cond) nounwind { +; CHECK-LABEL: pr152630: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: andl $1, %edi +; CHECK-NEXT: decl %edi +; CHECK-NEXT: cmpl $-1, %edi +; CHECK-NEXT: je .LBB0_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: movzbl %dil, %eax +; CHECK-NEXT: testl %eax, %eax +; CHECK-NEXT: jne .LBB0_3 +; CHECK-NEXT: .LBB0_2: # %if.then +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: retq +; CHECK-NEXT: .LBB0_3: # %if.else +; CHECK-NEXT: movl $1, %eax +; CHECK-NEXT: retq +entry: + %sel = select i1 %cond, i32 0, i32 -1 + %conv = trunc nsw i32 %sel to i8 + switch i8 %conv, label %if.else [ + i8 -1, label %if.then + i8 0, label %if.then + ] + +if.then: + ret i32 0 + +if.else: + ret i32 1 +} diff --git a/llvm/test/CodeGen/X86/ptrtoaddr.ll b/llvm/test/CodeGen/X86/ptrtoaddr.ll new file mode 100644 index 0000000..24bf9db --- /dev/null +++ b/llvm/test/CodeGen/X86/ptrtoaddr.ll @@ -0,0 +1,113 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu < %s -o - | FileCheck %s --check-prefix=CHECK + +define i1 @ptrtoaddr_1(ptr %p) { +; CHECK-LABEL: ptrtoaddr_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: xorb $1, %al +; CHECK-NEXT: # kill: def $al killed $al killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i1 + %ret = xor i1 %trunc, 1 + ret i1 %ret +} + +define i8 @ptrtoaddr_8(ptr %p) { +; CHECK-LABEL: ptrtoaddr_8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notb %al +; CHECK-NEXT: # kill: def $al killed $al killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i8 + %ret = xor i8 %trunc, -1 + ret i8 %ret +} + +define i16 @ptrtoaddr_16(ptr %p) { +; CHECK-LABEL: ptrtoaddr_16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notl %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i16 + %ret = xor i16 %trunc, -1 + ret i16 %ret +} + +define i32 @ptrtoaddr_32(ptr %p) { +; CHECK-LABEL: ptrtoaddr_32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notl %eax +; CHECK-NEXT: # kill: def $eax killed $eax killed $rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %trunc = trunc i64 %addr to i32 + %ret = xor i32 %trunc, -1 + ret i32 %ret +} + +define i64 @ptrtoaddr_64(ptr %p) { +; CHECK-LABEL: ptrtoaddr_64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notq %rax +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %ret = xor i64 %addr, -1 + ret i64 %ret +} + +define i128 @ptrtoaddr_128(ptr %p) { +; CHECK-LABEL: ptrtoaddr_128: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: notq %rax +; CHECK-NEXT: movq $-1, %rdx +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr ptr %p to i64 + %ext = zext i64 %addr to i128 + %ret = xor i128 %ext, -1 + ret i128 %ret +} + + +define <2 x i64> @ptrtoaddr_vec(<2 x ptr> %p) { +; CHECK-LABEL: ptrtoaddr_vec: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pcmpeqd %xmm1, %xmm1 +; CHECK-NEXT: pxor %xmm1, %xmm0 +; CHECK-NEXT: retq +entry: + %addr = ptrtoaddr <2 x ptr> %p to <2 x i64> + %ret = xor <2 x i64> %addr, <i64 -1, i64 -1> + ret <2 x i64> %ret +} + +; UTC_ARGS: --disable + +@foo = global [16 x i8] zeroinitializer +@addr = global i64 ptrtoaddr (ptr @foo to i64) +; CHECK: addr: +; CHECK-NEXT: .quad foo +; CHECK-NEXT: .size addr, 8 +@addr_plus_one = global i64 ptrtoaddr (ptr getelementptr (i8, ptr @foo, i64 1) to i64) +; CHECK: addr_plus_one: +; CHECK-NEXT: .quad foo+1 +; CHECK-NEXT: .size addr_plus_one, 8 +@const_addr = global i64 ptrtoaddr (ptr getelementptr (i8, ptr null, i64 1) to i64) +; CHECK: const_addr: +; CHECK-NEXT: .quad 0+1 +; CHECK-NEXT: .size const_addr, 8 diff --git a/llvm/test/CodeGen/X86/select-optimize.ll b/llvm/test/CodeGen/X86/select-optimize.ll index c7cf9cb..6cb49f2 100644 --- a/llvm/test/CodeGen/X86/select-optimize.ll +++ b/llvm/test/CodeGen/X86/select-optimize.ll @@ -233,7 +233,7 @@ define i32 @expensive_val_operand5(i32 %b, i32 %y, i1 %cmp) { ; CHECK-LABEL: @expensive_val_operand5( ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[A]], align 8 -; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr nonnull [[A]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]]) ; CHECK-NEXT: [[CMP_FROZEN:%.*]] = freeze i1 [[CMP:%.*]] ; CHECK-NEXT: br i1 [[CMP_FROZEN]], label [[SELECT_TRUE_SINK:%.*]], label [[SELECT_END:%.*]], !prof [[PROF18]] ; CHECK: select.true.sink: @@ -245,7 +245,7 @@ define i32 @expensive_val_operand5(i32 %b, i32 %y, i1 %cmp) { ; %a = alloca i32 %load = load i32, ptr %a, align 8 - call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %a) + call void @llvm.lifetime.end.p0(ptr nonnull %a) %x = add i32 %load, %b %sel = select i1 %cmp, i32 %x, i32 %y, !prof !17 ret i32 %sel @@ -520,7 +520,7 @@ for.body: ; preds = %for.body.preheader, declare void @llvm.dbg.value(metadata, metadata, metadata) ; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn -declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) +declare void @llvm.lifetime.end.p0(ptr nocapture) declare void @free(ptr nocapture) diff --git a/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll b/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll index 5c5f704..6b07891 100644 --- a/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll +++ b/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll @@ -62,10 +62,11 @@ entry: define i32 @simplify_demanded_bits_drop_flag(i1 zeroext %x, i1 zeroext %y) nounwind { ; CHECK-LABEL: simplify_demanded_bits_drop_flag: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: negl %edi +; CHECK-NEXT: # kill: def $esi killed $esi def $rsi ; CHECK-NEXT: shll $2, %esi -; CHECK-NEXT: xorl %edi, %esi -; CHECK-NEXT: movslq %esi, %rax +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: negq %rax +; CHECK-NEXT: xorq %rsi, %rax ; CHECK-NEXT: imulq $-1634202141, %rax, %rax # imm = 0x9E980DE3 ; CHECK-NEXT: movq %rax, %rcx ; CHECK-NEXT: shrq $63, %rcx diff --git a/llvm/test/CodeGen/Xtensa/atomic-load-store.ll b/llvm/test/CodeGen/Xtensa/atomic-load-store.ll new file mode 100644 index 0000000..bd843a3 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/atomic-load-store.ll @@ -0,0 +1,498 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=xtensa -mattr=+windowed < %s | FileCheck %s --check-prefixes=XTENSA +; RUN: llc -mtriple=xtensa -mattr=+windowed,s32c1i < %s | FileCheck %s --check-prefixes=XTENSA-ATOMIC + +define i8 @atomic_load_i8_unordered(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i8, ptr %a unordered, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_monotonic(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i8, ptr %a monotonic, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_acquire(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI2_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i8, ptr %a acquire, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI3_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i8, ptr %a seq_cst, align 1 + ret i8 %1 +} + +define i16 @atomic_load_i16_unordered(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i16, ptr %a unordered, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_monotonic(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i16, ptr %a monotonic, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_acquire(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i16, ptr %a acquire, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_seq_cst(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i16, ptr %a seq_cst, align 2 + ret i16 %1 +} + +define i32 @atomic_load_i32_unordered(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i32, ptr %a unordered, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_monotonic(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI9_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i32, ptr %a monotonic, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_acquire(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i32, ptr %a acquire, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_seq_cst(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i32, ptr %a seq_cst, align 4 + ret i32 %1 +} + +define void @atomic_store_i8_unordered(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i8 %b, ptr %a unordered, align 1 + ret void +} + +define void @atomic_store_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI13_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i8 %b, ptr %a monotonic, align 1 + ret void +} + +define void @atomic_store_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i8 %b, ptr %a release, align 1 + ret void +} + +define void @atomic_store_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i8 %b, ptr %a seq_cst, align 1 + ret void +} + +define void @atomic_store_i16_unordered(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i16_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i16 %b, ptr %a unordered, align 2 + ret void +} + +define void @atomic_store_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i16 %b, ptr %a monotonic, align 2 + ret void +} + +define void @atomic_store_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i16 %b, ptr %a release, align 2 + ret void +} + +define void @atomic_store_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i16 %b, ptr %a seq_cst, align 2 + ret void +} + +define void @atomic_store_i32_unordered(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i32_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 %b, ptr %a unordered, align 4 + ret void +} + +define void @atomic_store_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI21_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 %b, ptr %a monotonic, align 4 + ret void +} + +define void @atomic_store_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI22_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 %b, ptr %a release, align 4 + ret void +} + +define void @atomic_store_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI23_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 %b, ptr %a seq_cst, align 4 + ret void +} diff --git a/llvm/test/CodeGen/Xtensa/atomic-rmw.ll b/llvm/test/CodeGen/Xtensa/atomic-rmw.ll new file mode 100644 index 0000000..81cb2dd --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/atomic-rmw.ll @@ -0,0 +1,10298 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=xtensa -mattr=+windowed < %s | FileCheck %s --check-prefixes=XTENSA +; RUN: llc -mtriple=xtensa -mattr=+windowed,s32c1i < %s | FileCheck %s --check-prefixes=XTENSA-ATOMIC + +define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB0_2 +; XTENSA-ATOMIC-NEXT: .LBB0_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB0_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB0_4 +; XTENSA-ATOMIC-NEXT: .LBB0_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB0_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB0_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB0_1 +; XTENSA-ATOMIC-NEXT: .LBB0_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB1_2 +; XTENSA-ATOMIC-NEXT: .LBB1_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB1_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB1_4 +; XTENSA-ATOMIC-NEXT: .LBB1_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB1_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB1_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB1_1 +; XTENSA-ATOMIC-NEXT: .LBB1_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI2_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB2_2 +; XTENSA-ATOMIC-NEXT: .LBB2_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB2_4 +; XTENSA-ATOMIC-NEXT: .LBB2_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB2_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB2_1 +; XTENSA-ATOMIC-NEXT: .LBB2_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI3_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB3_2 +; XTENSA-ATOMIC-NEXT: .LBB3_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB3_4 +; XTENSA-ATOMIC-NEXT: .LBB3_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB3_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB3_1 +; XTENSA-ATOMIC-NEXT: .LBB3_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB4_2 +; XTENSA-ATOMIC-NEXT: .LBB4_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB4_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB4_4 +; XTENSA-ATOMIC-NEXT: .LBB4_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB4_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB4_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB4_1 +; XTENSA-ATOMIC-NEXT: .LBB4_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB5_2 +; XTENSA-ATOMIC-NEXT: .LBB5_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB5_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB5_4 +; XTENSA-ATOMIC-NEXT: .LBB5_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB5_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB5_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB5_1 +; XTENSA-ATOMIC-NEXT: .LBB5_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB6_2 +; XTENSA-ATOMIC-NEXT: .LBB6_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB6_4 +; XTENSA-ATOMIC-NEXT: .LBB6_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB6_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB6_1 +; XTENSA-ATOMIC-NEXT: .LBB6_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB7_2 +; XTENSA-ATOMIC-NEXT: .LBB7_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB7_4 +; XTENSA-ATOMIC-NEXT: .LBB7_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB7_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB7_1 +; XTENSA-ATOMIC-NEXT: .LBB7_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB8_2 +; XTENSA-ATOMIC-NEXT: .LBB8_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB8_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB8_4 +; XTENSA-ATOMIC-NEXT: .LBB8_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB8_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB8_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB8_1 +; XTENSA-ATOMIC-NEXT: .LBB8_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI9_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB9_2 +; XTENSA-ATOMIC-NEXT: .LBB9_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB9_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB9_4 +; XTENSA-ATOMIC-NEXT: .LBB9_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB9_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB9_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB9_1 +; XTENSA-ATOMIC-NEXT: .LBB9_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB10_2 +; XTENSA-ATOMIC-NEXT: .LBB10_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB10_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB10_4 +; XTENSA-ATOMIC-NEXT: .LBB10_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB10_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB10_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB10_1 +; XTENSA-ATOMIC-NEXT: .LBB10_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB11_2 +; XTENSA-ATOMIC-NEXT: .LBB11_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB11_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB11_4 +; XTENSA-ATOMIC-NEXT: .LBB11_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB11_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB11_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB11_1 +; XTENSA-ATOMIC-NEXT: .LBB11_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB12_2 +; XTENSA-ATOMIC-NEXT: .LBB12_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB12_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB12_4 +; XTENSA-ATOMIC-NEXT: .LBB12_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB12_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB12_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB12_1 +; XTENSA-ATOMIC-NEXT: .LBB12_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI13_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB13_2 +; XTENSA-ATOMIC-NEXT: .LBB13_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB13_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB13_4 +; XTENSA-ATOMIC-NEXT: .LBB13_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB13_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB13_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB13_1 +; XTENSA-ATOMIC-NEXT: .LBB13_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB14_2 +; XTENSA-ATOMIC-NEXT: .LBB14_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB14_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB14_4 +; XTENSA-ATOMIC-NEXT: .LBB14_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB14_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB14_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB14_1 +; XTENSA-ATOMIC-NEXT: .LBB14_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB15_2 +; XTENSA-ATOMIC-NEXT: .LBB15_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB15_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB15_4 +; XTENSA-ATOMIC-NEXT: .LBB15_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB15_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB15_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB15_1 +; XTENSA-ATOMIC-NEXT: .LBB15_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB16_2 +; XTENSA-ATOMIC-NEXT: .LBB16_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB16_4 +; XTENSA-ATOMIC-NEXT: .LBB16_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB16_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB16_1 +; XTENSA-ATOMIC-NEXT: .LBB16_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB17_2 +; XTENSA-ATOMIC-NEXT: .LBB17_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB17_4 +; XTENSA-ATOMIC-NEXT: .LBB17_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB17_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB17_1 +; XTENSA-ATOMIC-NEXT: .LBB17_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB18_2 +; XTENSA-ATOMIC-NEXT: .LBB18_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB18_4 +; XTENSA-ATOMIC-NEXT: .LBB18_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB18_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB18_1 +; XTENSA-ATOMIC-NEXT: .LBB18_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB19_2 +; XTENSA-ATOMIC-NEXT: .LBB19_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB19_4 +; XTENSA-ATOMIC-NEXT: .LBB19_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB19_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB19_1 +; XTENSA-ATOMIC-NEXT: .LBB19_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB20_2 +; XTENSA-ATOMIC-NEXT: .LBB20_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB20_4 +; XTENSA-ATOMIC-NEXT: .LBB20_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB20_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB20_1 +; XTENSA-ATOMIC-NEXT: .LBB20_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI21_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB21_2 +; XTENSA-ATOMIC-NEXT: .LBB21_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB21_4 +; XTENSA-ATOMIC-NEXT: .LBB21_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB21_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB21_1 +; XTENSA-ATOMIC-NEXT: .LBB21_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI22_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB22_2 +; XTENSA-ATOMIC-NEXT: .LBB22_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB22_4 +; XTENSA-ATOMIC-NEXT: .LBB22_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB22_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB22_1 +; XTENSA-ATOMIC-NEXT: .LBB22_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI23_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB23_2 +; XTENSA-ATOMIC-NEXT: .LBB23_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB23_4 +; XTENSA-ATOMIC-NEXT: .LBB23_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB23_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB23_1 +; XTENSA-ATOMIC-NEXT: .LBB23_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI24_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB24_2 +; XTENSA-ATOMIC-NEXT: .LBB24_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB24_4 +; XTENSA-ATOMIC-NEXT: .LBB24_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB24_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB24_1 +; XTENSA-ATOMIC-NEXT: .LBB24_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI25_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB25_2 +; XTENSA-ATOMIC-NEXT: .LBB25_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB25_4 +; XTENSA-ATOMIC-NEXT: .LBB25_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB25_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB25_1 +; XTENSA-ATOMIC-NEXT: .LBB25_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI26_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB26_2 +; XTENSA-ATOMIC-NEXT: .LBB26_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB26_4 +; XTENSA-ATOMIC-NEXT: .LBB26_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB26_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB26_1 +; XTENSA-ATOMIC-NEXT: .LBB26_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI27_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB27_2 +; XTENSA-ATOMIC-NEXT: .LBB27_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB27_4 +; XTENSA-ATOMIC-NEXT: .LBB27_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB27_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB27_1 +; XTENSA-ATOMIC-NEXT: .LBB27_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI28_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB28_2 +; XTENSA-ATOMIC-NEXT: .LBB28_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB28_4 +; XTENSA-ATOMIC-NEXT: .LBB28_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB28_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB28_1 +; XTENSA-ATOMIC-NEXT: .LBB28_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI29_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB29_2 +; XTENSA-ATOMIC-NEXT: .LBB29_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB29_4 +; XTENSA-ATOMIC-NEXT: .LBB29_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB29_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB29_1 +; XTENSA-ATOMIC-NEXT: .LBB29_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI30_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB30_2 +; XTENSA-ATOMIC-NEXT: .LBB30_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB30_4 +; XTENSA-ATOMIC-NEXT: .LBB30_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB30_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB30_1 +; XTENSA-ATOMIC-NEXT: .LBB30_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI31_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB31_2 +; XTENSA-ATOMIC-NEXT: .LBB31_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB31_4 +; XTENSA-ATOMIC-NEXT: .LBB31_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB31_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB31_1 +; XTENSA-ATOMIC-NEXT: .LBB31_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI32_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB32_2 +; XTENSA-ATOMIC-NEXT: .LBB32_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB32_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB32_4 +; XTENSA-ATOMIC-NEXT: .LBB32_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB32_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB32_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB32_1 +; XTENSA-ATOMIC-NEXT: .LBB32_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI33_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB33_2 +; XTENSA-ATOMIC-NEXT: .LBB33_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB33_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB33_4 +; XTENSA-ATOMIC-NEXT: .LBB33_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB33_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB33_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB33_1 +; XTENSA-ATOMIC-NEXT: .LBB33_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI34_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB34_2 +; XTENSA-ATOMIC-NEXT: .LBB34_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB34_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB34_4 +; XTENSA-ATOMIC-NEXT: .LBB34_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB34_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB34_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB34_1 +; XTENSA-ATOMIC-NEXT: .LBB34_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a4, .LCPI35_0 +; XTENSA-NEXT: j .LBB35_2 +; XTENSA-NEXT: .LBB35_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB35_4 +; XTENSA-NEXT: .LBB35_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB35_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB35_1 +; XTENSA-NEXT: .LBB35_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB35_2 +; XTENSA-ATOMIC-NEXT: .LBB35_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB35_6 +; XTENSA-ATOMIC-NEXT: .LBB35_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB35_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB35_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB35_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB35_1 +; XTENSA-ATOMIC-NEXT: .LBB35_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a4, .LCPI36_0 +; XTENSA-NEXT: j .LBB36_2 +; XTENSA-NEXT: .LBB36_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB36_4 +; XTENSA-NEXT: .LBB36_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB36_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB36_1 +; XTENSA-NEXT: .LBB36_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB36_2 +; XTENSA-ATOMIC-NEXT: .LBB36_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB36_6 +; XTENSA-ATOMIC-NEXT: .LBB36_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB36_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB36_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB36_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB36_1 +; XTENSA-ATOMIC-NEXT: .LBB36_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l8ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 24 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a5, .LCPI37_0 +; XTENSA-NEXT: j .LBB37_2 +; XTENSA-NEXT: .LBB37_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB37_4 +; XTENSA-NEXT: .LBB37_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bge a4, a8, .LBB37_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB37_1 +; XTENSA-NEXT: .LBB37_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB37_2 +; XTENSA-ATOMIC-NEXT: .LBB37_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB37_6 +; XTENSA-ATOMIC-NEXT: .LBB37_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB37_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB37_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB37_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB37_1 +; XTENSA-ATOMIC-NEXT: .LBB37_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l8ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 24 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a5, .LCPI38_0 +; XTENSA-NEXT: j .LBB38_2 +; XTENSA-NEXT: .LBB38_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB38_4 +; XTENSA-NEXT: .LBB38_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bge a4, a8, .LBB38_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB38_1 +; XTENSA-NEXT: .LBB38_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB38_2 +; XTENSA-ATOMIC-NEXT: .LBB38_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB38_6 +; XTENSA-ATOMIC-NEXT: .LBB38_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB38_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB38_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB38_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB38_1 +; XTENSA-ATOMIC-NEXT: .LBB38_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI39_0 +; XTENSA-NEXT: j .LBB39_2 +; XTENSA-NEXT: .LBB39_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB39_4 +; XTENSA-NEXT: .LBB39_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB39_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB39_1 +; XTENSA-NEXT: .LBB39_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB39_2 +; XTENSA-ATOMIC-NEXT: .LBB39_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB39_6 +; XTENSA-ATOMIC-NEXT: .LBB39_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB39_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB39_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB39_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB39_1 +; XTENSA-ATOMIC-NEXT: .LBB39_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a4, .LCPI40_0 +; XTENSA-NEXT: j .LBB40_2 +; XTENSA-NEXT: .LBB40_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB40_4 +; XTENSA-NEXT: .LBB40_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB40_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB40_1 +; XTENSA-NEXT: .LBB40_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB40_2 +; XTENSA-ATOMIC-NEXT: .LBB40_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB40_6 +; XTENSA-ATOMIC-NEXT: .LBB40_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB40_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB40_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB40_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB40_1 +; XTENSA-ATOMIC-NEXT: .LBB40_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a4, .LCPI41_0 +; XTENSA-NEXT: j .LBB41_2 +; XTENSA-NEXT: .LBB41_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB41_4 +; XTENSA-NEXT: .LBB41_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB41_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB41_1 +; XTENSA-NEXT: .LBB41_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB41_2 +; XTENSA-ATOMIC-NEXT: .LBB41_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB41_6 +; XTENSA-ATOMIC-NEXT: .LBB41_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB41_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB41_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB41_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB41_1 +; XTENSA-ATOMIC-NEXT: .LBB41_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l8ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 24 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a5, .LCPI42_0 +; XTENSA-NEXT: j .LBB42_2 +; XTENSA-NEXT: .LBB42_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB42_4 +; XTENSA-NEXT: .LBB42_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: blt a4, a8, .LBB42_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB42_1 +; XTENSA-NEXT: .LBB42_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB42_2 +; XTENSA-ATOMIC-NEXT: .LBB42_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB42_6 +; XTENSA-ATOMIC-NEXT: .LBB42_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB42_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB42_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB42_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB42_1 +; XTENSA-ATOMIC-NEXT: .LBB42_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l8ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 24 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a5, .LCPI43_0 +; XTENSA-NEXT: j .LBB43_2 +; XTENSA-NEXT: .LBB43_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB43_4 +; XTENSA-NEXT: .LBB43_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: blt a4, a8, .LBB43_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB43_1 +; XTENSA-NEXT: .LBB43_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB43_2 +; XTENSA-ATOMIC-NEXT: .LBB43_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB43_6 +; XTENSA-ATOMIC-NEXT: .LBB43_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB43_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB43_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB43_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB43_1 +; XTENSA-ATOMIC-NEXT: .LBB43_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI44_0 +; XTENSA-NEXT: j .LBB44_2 +; XTENSA-NEXT: .LBB44_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB44_4 +; XTENSA-NEXT: .LBB44_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB44_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB44_1 +; XTENSA-NEXT: .LBB44_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB44_2 +; XTENSA-ATOMIC-NEXT: .LBB44_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB44_6 +; XTENSA-ATOMIC-NEXT: .LBB44_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB44_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB44_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB44_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB44_1 +; XTENSA-ATOMIC-NEXT: .LBB44_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a6, .LCPI45_0 +; XTENSA-NEXT: j .LBB45_2 +; XTENSA-NEXT: .LBB45_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB45_4 +; XTENSA-NEXT: .LBB45_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a4, a8, .LBB45_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB45_1 +; XTENSA-NEXT: .LBB45_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB45_2 +; XTENSA-ATOMIC-NEXT: .LBB45_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB45_6 +; XTENSA-ATOMIC-NEXT: .LBB45_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB45_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB45_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB45_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB45_1 +; XTENSA-ATOMIC-NEXT: .LBB45_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a6, .LCPI46_0 +; XTENSA-NEXT: j .LBB46_2 +; XTENSA-NEXT: .LBB46_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB46_4 +; XTENSA-NEXT: .LBB46_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a4, a8, .LBB46_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB46_1 +; XTENSA-NEXT: .LBB46_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB46_2 +; XTENSA-ATOMIC-NEXT: .LBB46_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB46_6 +; XTENSA-ATOMIC-NEXT: .LBB46_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB46_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB46_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB46_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB46_1 +; XTENSA-ATOMIC-NEXT: .LBB46_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a4, 255 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: and a8, a3, a4 +; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a3, .LCPI47_0 +; XTENSA-NEXT: j .LBB47_2 +; XTENSA-NEXT: .LBB47_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 8 +; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a3 +; XTENSA-NEXT: l8ui a2, a1, 8 +; XTENSA-NEXT: bnez a10, .LBB47_4 +; XTENSA-NEXT: .LBB47_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 8 +; XTENSA-NEXT: and a8, a2, a4 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bgeu a9, a8, .LBB47_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB47_1 +; XTENSA-NEXT: .LBB47_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB47_2 +; XTENSA-ATOMIC-NEXT: .LBB47_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB47_6 +; XTENSA-ATOMIC-NEXT: .LBB47_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB47_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB47_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB47_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB47_1 +; XTENSA-ATOMIC-NEXT: .LBB47_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a4, 255 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: and a8, a3, a4 +; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a3, .LCPI48_0 +; XTENSA-NEXT: j .LBB48_2 +; XTENSA-NEXT: .LBB48_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 8 +; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a3 +; XTENSA-NEXT: l8ui a2, a1, 8 +; XTENSA-NEXT: bnez a10, .LBB48_4 +; XTENSA-NEXT: .LBB48_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 8 +; XTENSA-NEXT: and a8, a2, a4 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bgeu a9, a8, .LBB48_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB48_1 +; XTENSA-NEXT: .LBB48_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB48_2 +; XTENSA-ATOMIC-NEXT: .LBB48_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB48_6 +; XTENSA-ATOMIC-NEXT: .LBB48_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB48_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB48_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB48_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB48_1 +; XTENSA-ATOMIC-NEXT: .LBB48_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a6, .LCPI49_0 +; XTENSA-NEXT: j .LBB49_2 +; XTENSA-NEXT: .LBB49_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB49_4 +; XTENSA-NEXT: .LBB49_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a4, a8, .LBB49_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB49_1 +; XTENSA-NEXT: .LBB49_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB49_2 +; XTENSA-ATOMIC-NEXT: .LBB49_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB49_6 +; XTENSA-ATOMIC-NEXT: .LBB49_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB49_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB49_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB49_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB49_1 +; XTENSA-ATOMIC-NEXT: .LBB49_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a6, .LCPI50_0 +; XTENSA-NEXT: j .LBB50_2 +; XTENSA-NEXT: .LBB50_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB50_4 +; XTENSA-NEXT: .LBB50_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a4, a8, .LBB50_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB50_1 +; XTENSA-NEXT: .LBB50_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB50_2 +; XTENSA-ATOMIC-NEXT: .LBB50_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB50_6 +; XTENSA-ATOMIC-NEXT: .LBB50_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB50_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB50_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB50_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB50_1 +; XTENSA-ATOMIC-NEXT: .LBB50_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a6, .LCPI51_0 +; XTENSA-NEXT: j .LBB51_2 +; XTENSA-NEXT: .LBB51_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB51_4 +; XTENSA-NEXT: .LBB51_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a4, a8, .LBB51_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB51_1 +; XTENSA-NEXT: .LBB51_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB51_2 +; XTENSA-ATOMIC-NEXT: .LBB51_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB51_6 +; XTENSA-ATOMIC-NEXT: .LBB51_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB51_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB51_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB51_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB51_1 +; XTENSA-ATOMIC-NEXT: .LBB51_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a4, 255 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: and a8, a3, a4 +; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a3, .LCPI52_0 +; XTENSA-NEXT: j .LBB52_2 +; XTENSA-NEXT: .LBB52_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 8 +; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a3 +; XTENSA-NEXT: l8ui a2, a1, 8 +; XTENSA-NEXT: bnez a10, .LBB52_4 +; XTENSA-NEXT: .LBB52_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 8 +; XTENSA-NEXT: and a8, a2, a4 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bltu a9, a8, .LBB52_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB52_1 +; XTENSA-NEXT: .LBB52_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB52_2 +; XTENSA-ATOMIC-NEXT: .LBB52_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB52_6 +; XTENSA-ATOMIC-NEXT: .LBB52_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB52_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB52_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB52_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB52_1 +; XTENSA-ATOMIC-NEXT: .LBB52_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a4, 255 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: and a8, a3, a4 +; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a3, .LCPI53_0 +; XTENSA-NEXT: j .LBB53_2 +; XTENSA-NEXT: .LBB53_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 8 +; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a3 +; XTENSA-NEXT: l8ui a2, a1, 8 +; XTENSA-NEXT: bnez a10, .LBB53_4 +; XTENSA-NEXT: .LBB53_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 8 +; XTENSA-NEXT: and a8, a2, a4 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bltu a9, a8, .LBB53_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB53_1 +; XTENSA-NEXT: .LBB53_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB53_2 +; XTENSA-ATOMIC-NEXT: .LBB53_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB53_6 +; XTENSA-ATOMIC-NEXT: .LBB53_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB53_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB53_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB53_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB53_1 +; XTENSA-ATOMIC-NEXT: .LBB53_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a6, .LCPI54_0 +; XTENSA-NEXT: j .LBB54_2 +; XTENSA-NEXT: .LBB54_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB54_4 +; XTENSA-NEXT: .LBB54_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a4, a8, .LBB54_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB54_1 +; XTENSA-NEXT: .LBB54_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB54_2 +; XTENSA-ATOMIC-NEXT: .LBB54_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB54_6 +; XTENSA-ATOMIC-NEXT: .LBB54_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB54_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB54_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB54_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB54_1 +; XTENSA-ATOMIC-NEXT: .LBB54_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI55_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI55_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB55_2 +; XTENSA-ATOMIC-NEXT: .LBB55_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB55_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB55_4 +; XTENSA-ATOMIC-NEXT: .LBB55_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB55_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB55_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB55_1 +; XTENSA-ATOMIC-NEXT: .LBB55_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI56_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI56_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB56_2 +; XTENSA-ATOMIC-NEXT: .LBB56_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB56_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB56_4 +; XTENSA-ATOMIC-NEXT: .LBB56_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB56_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB56_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB56_1 +; XTENSA-ATOMIC-NEXT: .LBB56_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI57_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI57_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB57_2 +; XTENSA-ATOMIC-NEXT: .LBB57_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB57_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB57_4 +; XTENSA-ATOMIC-NEXT: .LBB57_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB57_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB57_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB57_1 +; XTENSA-ATOMIC-NEXT: .LBB57_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI58_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI58_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB58_2 +; XTENSA-ATOMIC-NEXT: .LBB58_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB58_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB58_4 +; XTENSA-ATOMIC-NEXT: .LBB58_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB58_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB58_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB58_1 +; XTENSA-ATOMIC-NEXT: .LBB58_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI59_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI59_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB59_2 +; XTENSA-ATOMIC-NEXT: .LBB59_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB59_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB59_4 +; XTENSA-ATOMIC-NEXT: .LBB59_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB59_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB59_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB59_1 +; XTENSA-ATOMIC-NEXT: .LBB59_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI60_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI60_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB60_2 +; XTENSA-ATOMIC-NEXT: .LBB60_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB60_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB60_4 +; XTENSA-ATOMIC-NEXT: .LBB60_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB60_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB60_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB60_1 +; XTENSA-ATOMIC-NEXT: .LBB60_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI61_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI61_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB61_2 +; XTENSA-ATOMIC-NEXT: .LBB61_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB61_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB61_4 +; XTENSA-ATOMIC-NEXT: .LBB61_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB61_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB61_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB61_1 +; XTENSA-ATOMIC-NEXT: .LBB61_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI62_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI62_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB62_2 +; XTENSA-ATOMIC-NEXT: .LBB62_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB62_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB62_4 +; XTENSA-ATOMIC-NEXT: .LBB62_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB62_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB62_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB62_1 +; XTENSA-ATOMIC-NEXT: .LBB62_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI63_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI63_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB63_2 +; XTENSA-ATOMIC-NEXT: .LBB63_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB63_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB63_4 +; XTENSA-ATOMIC-NEXT: .LBB63_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB63_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB63_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB63_1 +; XTENSA-ATOMIC-NEXT: .LBB63_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI64_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI64_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB64_2 +; XTENSA-ATOMIC-NEXT: .LBB64_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB64_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB64_4 +; XTENSA-ATOMIC-NEXT: .LBB64_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB64_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB64_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB64_1 +; XTENSA-ATOMIC-NEXT: .LBB64_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI65_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI65_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB65_2 +; XTENSA-ATOMIC-NEXT: .LBB65_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB65_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB65_4 +; XTENSA-ATOMIC-NEXT: .LBB65_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB65_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB65_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB65_1 +; XTENSA-ATOMIC-NEXT: .LBB65_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI66_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI66_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB66_2 +; XTENSA-ATOMIC-NEXT: .LBB66_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB66_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB66_4 +; XTENSA-ATOMIC-NEXT: .LBB66_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB66_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB66_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB66_1 +; XTENSA-ATOMIC-NEXT: .LBB66_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI67_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI67_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB67_2 +; XTENSA-ATOMIC-NEXT: .LBB67_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB67_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB67_4 +; XTENSA-ATOMIC-NEXT: .LBB67_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB67_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB67_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB67_1 +; XTENSA-ATOMIC-NEXT: .LBB67_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI68_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI68_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB68_2 +; XTENSA-ATOMIC-NEXT: .LBB68_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB68_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB68_4 +; XTENSA-ATOMIC-NEXT: .LBB68_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB68_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB68_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB68_1 +; XTENSA-ATOMIC-NEXT: .LBB68_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI69_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI69_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB69_2 +; XTENSA-ATOMIC-NEXT: .LBB69_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB69_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB69_4 +; XTENSA-ATOMIC-NEXT: .LBB69_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB69_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB69_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB69_1 +; XTENSA-ATOMIC-NEXT: .LBB69_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI70_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI70_0 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB70_2 +; XTENSA-ATOMIC-NEXT: .LBB70_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB70_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB70_4 +; XTENSA-ATOMIC-NEXT: .LBB70_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB70_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB70_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB70_1 +; XTENSA-ATOMIC-NEXT: .LBB70_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI71_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI71_0 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB71_2 +; XTENSA-ATOMIC-NEXT: .LBB71_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB71_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB71_4 +; XTENSA-ATOMIC-NEXT: .LBB71_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB71_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB71_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB71_1 +; XTENSA-ATOMIC-NEXT: .LBB71_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI72_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI72_0 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB72_2 +; XTENSA-ATOMIC-NEXT: .LBB72_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB72_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB72_4 +; XTENSA-ATOMIC-NEXT: .LBB72_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB72_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB72_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB72_1 +; XTENSA-ATOMIC-NEXT: .LBB72_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI73_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI73_0 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB73_2 +; XTENSA-ATOMIC-NEXT: .LBB73_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB73_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB73_4 +; XTENSA-ATOMIC-NEXT: .LBB73_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB73_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB73_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB73_1 +; XTENSA-ATOMIC-NEXT: .LBB73_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI74_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI74_0 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB74_2 +; XTENSA-ATOMIC-NEXT: .LBB74_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB74_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB74_4 +; XTENSA-ATOMIC-NEXT: .LBB74_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB74_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB74_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB74_1 +; XTENSA-ATOMIC-NEXT: .LBB74_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI75_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI75_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB75_2 +; XTENSA-ATOMIC-NEXT: .LBB75_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB75_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB75_4 +; XTENSA-ATOMIC-NEXT: .LBB75_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB75_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB75_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB75_1 +; XTENSA-ATOMIC-NEXT: .LBB75_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI76_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI76_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB76_2 +; XTENSA-ATOMIC-NEXT: .LBB76_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB76_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB76_4 +; XTENSA-ATOMIC-NEXT: .LBB76_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB76_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB76_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB76_1 +; XTENSA-ATOMIC-NEXT: .LBB76_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI77_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI77_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB77_2 +; XTENSA-ATOMIC-NEXT: .LBB77_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB77_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB77_4 +; XTENSA-ATOMIC-NEXT: .LBB77_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB77_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB77_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB77_1 +; XTENSA-ATOMIC-NEXT: .LBB77_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI78_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI78_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB78_2 +; XTENSA-ATOMIC-NEXT: .LBB78_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB78_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB78_4 +; XTENSA-ATOMIC-NEXT: .LBB78_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB78_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB78_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB78_1 +; XTENSA-ATOMIC-NEXT: .LBB78_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI79_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI79_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB79_2 +; XTENSA-ATOMIC-NEXT: .LBB79_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB79_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB79_4 +; XTENSA-ATOMIC-NEXT: .LBB79_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB79_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB79_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB79_1 +; XTENSA-ATOMIC-NEXT: .LBB79_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI80_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI80_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB80_2 +; XTENSA-ATOMIC-NEXT: .LBB80_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB80_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB80_4 +; XTENSA-ATOMIC-NEXT: .LBB80_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB80_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB80_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB80_1 +; XTENSA-ATOMIC-NEXT: .LBB80_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI81_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI81_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB81_2 +; XTENSA-ATOMIC-NEXT: .LBB81_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB81_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB81_4 +; XTENSA-ATOMIC-NEXT: .LBB81_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB81_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB81_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB81_1 +; XTENSA-ATOMIC-NEXT: .LBB81_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI82_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI82_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB82_2 +; XTENSA-ATOMIC-NEXT: .LBB82_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB82_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB82_4 +; XTENSA-ATOMIC-NEXT: .LBB82_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB82_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB82_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB82_1 +; XTENSA-ATOMIC-NEXT: .LBB82_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI83_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI83_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB83_2 +; XTENSA-ATOMIC-NEXT: .LBB83_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB83_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB83_4 +; XTENSA-ATOMIC-NEXT: .LBB83_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB83_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB83_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB83_1 +; XTENSA-ATOMIC-NEXT: .LBB83_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI84_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI84_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB84_2 +; XTENSA-ATOMIC-NEXT: .LBB84_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB84_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB84_4 +; XTENSA-ATOMIC-NEXT: .LBB84_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB84_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB84_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB84_1 +; XTENSA-ATOMIC-NEXT: .LBB84_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI85_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI85_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB85_2 +; XTENSA-ATOMIC-NEXT: .LBB85_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB85_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB85_4 +; XTENSA-ATOMIC-NEXT: .LBB85_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB85_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB85_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB85_1 +; XTENSA-ATOMIC-NEXT: .LBB85_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI86_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI86_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB86_2 +; XTENSA-ATOMIC-NEXT: .LBB86_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB86_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB86_4 +; XTENSA-ATOMIC-NEXT: .LBB86_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB86_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB86_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB86_1 +; XTENSA-ATOMIC-NEXT: .LBB86_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI87_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI87_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB87_2 +; XTENSA-ATOMIC-NEXT: .LBB87_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB87_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB87_4 +; XTENSA-ATOMIC-NEXT: .LBB87_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB87_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB87_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB87_1 +; XTENSA-ATOMIC-NEXT: .LBB87_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI88_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI88_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB88_2 +; XTENSA-ATOMIC-NEXT: .LBB88_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB88_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB88_4 +; XTENSA-ATOMIC-NEXT: .LBB88_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB88_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB88_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB88_1 +; XTENSA-ATOMIC-NEXT: .LBB88_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI89_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI89_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB89_2 +; XTENSA-ATOMIC-NEXT: .LBB89_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB89_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB89_4 +; XTENSA-ATOMIC-NEXT: .LBB89_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB89_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB89_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB89_1 +; XTENSA-ATOMIC-NEXT: .LBB89_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a4, .LCPI90_0 +; XTENSA-NEXT: j .LBB90_2 +; XTENSA-NEXT: .LBB90_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB90_4 +; XTENSA-NEXT: .LBB90_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB90_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB90_1 +; XTENSA-NEXT: .LBB90_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI90_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB90_2 +; XTENSA-ATOMIC-NEXT: .LBB90_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB90_6 +; XTENSA-ATOMIC-NEXT: .LBB90_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB90_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB90_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI90_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB90_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB90_1 +; XTENSA-ATOMIC-NEXT: .LBB90_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a4, .LCPI91_0 +; XTENSA-NEXT: j .LBB91_2 +; XTENSA-NEXT: .LBB91_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB91_4 +; XTENSA-NEXT: .LBB91_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB91_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB91_1 +; XTENSA-NEXT: .LBB91_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI91_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB91_2 +; XTENSA-ATOMIC-NEXT: .LBB91_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB91_6 +; XTENSA-ATOMIC-NEXT: .LBB91_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB91_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB91_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI91_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB91_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB91_1 +; XTENSA-ATOMIC-NEXT: .LBB91_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l16ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 16 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a5, .LCPI92_0 +; XTENSA-NEXT: j .LBB92_2 +; XTENSA-NEXT: .LBB92_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB92_4 +; XTENSA-NEXT: .LBB92_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bge a4, a8, .LBB92_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB92_1 +; XTENSA-NEXT: .LBB92_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI92_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB92_2 +; XTENSA-ATOMIC-NEXT: .LBB92_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB92_6 +; XTENSA-ATOMIC-NEXT: .LBB92_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB92_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB92_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI92_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB92_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB92_1 +; XTENSA-ATOMIC-NEXT: .LBB92_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l16ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 16 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a5, .LCPI93_0 +; XTENSA-NEXT: j .LBB93_2 +; XTENSA-NEXT: .LBB93_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB93_4 +; XTENSA-NEXT: .LBB93_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bge a4, a8, .LBB93_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB93_1 +; XTENSA-NEXT: .LBB93_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI93_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB93_2 +; XTENSA-ATOMIC-NEXT: .LBB93_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB93_6 +; XTENSA-ATOMIC-NEXT: .LBB93_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB93_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB93_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI93_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB93_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB93_1 +; XTENSA-ATOMIC-NEXT: .LBB93_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI94_0 +; XTENSA-NEXT: j .LBB94_2 +; XTENSA-NEXT: .LBB94_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB94_4 +; XTENSA-NEXT: .LBB94_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB94_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB94_1 +; XTENSA-NEXT: .LBB94_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI94_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB94_2 +; XTENSA-ATOMIC-NEXT: .LBB94_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB94_6 +; XTENSA-ATOMIC-NEXT: .LBB94_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB94_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB94_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI94_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB94_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB94_1 +; XTENSA-ATOMIC-NEXT: .LBB94_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a4, .LCPI95_0 +; XTENSA-NEXT: j .LBB95_2 +; XTENSA-NEXT: .LBB95_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB95_4 +; XTENSA-NEXT: .LBB95_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB95_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB95_1 +; XTENSA-NEXT: .LBB95_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI95_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB95_2 +; XTENSA-ATOMIC-NEXT: .LBB95_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB95_6 +; XTENSA-ATOMIC-NEXT: .LBB95_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB95_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB95_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI95_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB95_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB95_1 +; XTENSA-ATOMIC-NEXT: .LBB95_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a4, .LCPI96_0 +; XTENSA-NEXT: j .LBB96_2 +; XTENSA-NEXT: .LBB96_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB96_4 +; XTENSA-NEXT: .LBB96_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB96_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB96_1 +; XTENSA-NEXT: .LBB96_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI96_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB96_2 +; XTENSA-ATOMIC-NEXT: .LBB96_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB96_6 +; XTENSA-ATOMIC-NEXT: .LBB96_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB96_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB96_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI96_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB96_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB96_1 +; XTENSA-ATOMIC-NEXT: .LBB96_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l16ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 16 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a5, .LCPI97_0 +; XTENSA-NEXT: j .LBB97_2 +; XTENSA-NEXT: .LBB97_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB97_4 +; XTENSA-NEXT: .LBB97_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: blt a4, a8, .LBB97_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB97_1 +; XTENSA-NEXT: .LBB97_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI97_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB97_2 +; XTENSA-ATOMIC-NEXT: .LBB97_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB97_6 +; XTENSA-ATOMIC-NEXT: .LBB97_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB97_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB97_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI97_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB97_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB97_1 +; XTENSA-ATOMIC-NEXT: .LBB97_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l16ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 16 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a5, .LCPI98_0 +; XTENSA-NEXT: j .LBB98_2 +; XTENSA-NEXT: .LBB98_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB98_4 +; XTENSA-NEXT: .LBB98_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: blt a4, a8, .LBB98_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB98_1 +; XTENSA-NEXT: .LBB98_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI98_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB98_2 +; XTENSA-ATOMIC-NEXT: .LBB98_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB98_6 +; XTENSA-ATOMIC-NEXT: .LBB98_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB98_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB98_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI98_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB98_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB98_1 +; XTENSA-ATOMIC-NEXT: .LBB98_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI99_0 +; XTENSA-NEXT: j .LBB99_2 +; XTENSA-NEXT: .LBB99_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB99_4 +; XTENSA-NEXT: .LBB99_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB99_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB99_1 +; XTENSA-NEXT: .LBB99_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI99_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB99_2 +; XTENSA-ATOMIC-NEXT: .LBB99_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB99_6 +; XTENSA-ATOMIC-NEXT: .LBB99_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB99_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB99_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI99_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB99_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB99_1 +; XTENSA-ATOMIC-NEXT: .LBB99_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI100_1 +; XTENSA-NEXT: j .LBB100_2 +; XTENSA-NEXT: .LBB100_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB100_4 +; XTENSA-NEXT: .LBB100_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI100_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a9, a8, .LBB100_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB100_1 +; XTENSA-NEXT: .LBB100_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI100_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB100_2 +; XTENSA-ATOMIC-NEXT: .LBB100_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB100_6 +; XTENSA-ATOMIC-NEXT: .LBB100_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI100_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB100_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB100_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB100_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB100_1 +; XTENSA-ATOMIC-NEXT: .LBB100_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI101_1 +; XTENSA-NEXT: j .LBB101_2 +; XTENSA-NEXT: .LBB101_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB101_4 +; XTENSA-NEXT: .LBB101_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI101_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a9, a8, .LBB101_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB101_1 +; XTENSA-NEXT: .LBB101_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI101_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB101_2 +; XTENSA-ATOMIC-NEXT: .LBB101_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB101_6 +; XTENSA-ATOMIC-NEXT: .LBB101_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI101_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB101_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB101_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB101_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB101_1 +; XTENSA-ATOMIC-NEXT: .LBB101_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l16ui a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI102_1 +; XTENSA-NEXT: j .LBB102_2 +; XTENSA-NEXT: .LBB102_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB102_4 +; XTENSA-NEXT: .LBB102_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI102_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a9, a8, .LBB102_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB102_1 +; XTENSA-NEXT: .LBB102_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI102_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB102_2 +; XTENSA-ATOMIC-NEXT: .LBB102_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB102_6 +; XTENSA-ATOMIC-NEXT: .LBB102_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI102_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB102_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB102_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB102_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB102_1 +; XTENSA-ATOMIC-NEXT: .LBB102_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l16ui a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI103_1 +; XTENSA-NEXT: j .LBB103_2 +; XTENSA-NEXT: .LBB103_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB103_4 +; XTENSA-NEXT: .LBB103_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI103_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a9, a8, .LBB103_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB103_1 +; XTENSA-NEXT: .LBB103_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI103_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB103_2 +; XTENSA-ATOMIC-NEXT: .LBB103_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB103_6 +; XTENSA-ATOMIC-NEXT: .LBB103_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI103_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB103_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB103_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB103_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB103_1 +; XTENSA-ATOMIC-NEXT: .LBB103_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI104_1 +; XTENSA-NEXT: j .LBB104_2 +; XTENSA-NEXT: .LBB104_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB104_4 +; XTENSA-NEXT: .LBB104_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI104_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a9, a8, .LBB104_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB104_1 +; XTENSA-NEXT: .LBB104_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI104_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB104_2 +; XTENSA-ATOMIC-NEXT: .LBB104_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB104_6 +; XTENSA-ATOMIC-NEXT: .LBB104_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI104_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB104_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB104_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB104_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB104_1 +; XTENSA-ATOMIC-NEXT: .LBB104_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI105_1 +; XTENSA-NEXT: j .LBB105_2 +; XTENSA-NEXT: .LBB105_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB105_4 +; XTENSA-NEXT: .LBB105_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI105_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a9, a8, .LBB105_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB105_1 +; XTENSA-NEXT: .LBB105_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI105_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB105_2 +; XTENSA-ATOMIC-NEXT: .LBB105_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB105_6 +; XTENSA-ATOMIC-NEXT: .LBB105_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI105_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB105_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB105_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB105_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB105_1 +; XTENSA-ATOMIC-NEXT: .LBB105_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI106_1 +; XTENSA-NEXT: j .LBB106_2 +; XTENSA-NEXT: .LBB106_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB106_4 +; XTENSA-NEXT: .LBB106_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI106_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a9, a8, .LBB106_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB106_1 +; XTENSA-NEXT: .LBB106_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI106_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB106_2 +; XTENSA-ATOMIC-NEXT: .LBB106_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB106_6 +; XTENSA-ATOMIC-NEXT: .LBB106_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI106_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB106_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB106_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB106_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB106_1 +; XTENSA-ATOMIC-NEXT: .LBB106_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l16ui a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI107_1 +; XTENSA-NEXT: j .LBB107_2 +; XTENSA-NEXT: .LBB107_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB107_4 +; XTENSA-NEXT: .LBB107_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI107_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a9, a8, .LBB107_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB107_1 +; XTENSA-NEXT: .LBB107_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI107_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB107_2 +; XTENSA-ATOMIC-NEXT: .LBB107_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB107_6 +; XTENSA-ATOMIC-NEXT: .LBB107_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI107_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB107_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB107_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB107_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB107_1 +; XTENSA-ATOMIC-NEXT: .LBB107_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l16ui a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI108_1 +; XTENSA-NEXT: j .LBB108_2 +; XTENSA-NEXT: .LBB108_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB108_4 +; XTENSA-NEXT: .LBB108_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI108_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a9, a8, .LBB108_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB108_1 +; XTENSA-NEXT: .LBB108_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI108_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB108_2 +; XTENSA-ATOMIC-NEXT: .LBB108_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB108_6 +; XTENSA-ATOMIC-NEXT: .LBB108_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI108_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB108_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB108_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB108_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB108_1 +; XTENSA-ATOMIC-NEXT: .LBB108_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI109_1 +; XTENSA-NEXT: j .LBB109_2 +; XTENSA-NEXT: .LBB109_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB109_4 +; XTENSA-NEXT: .LBB109_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI109_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a9, a8, .LBB109_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB109_1 +; XTENSA-NEXT: .LBB109_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI109_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB109_2 +; XTENSA-ATOMIC-NEXT: .LBB109_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB109_6 +; XTENSA-ATOMIC-NEXT: .LBB109_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI109_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB109_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB109_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB109_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB109_1 +; XTENSA-ATOMIC-NEXT: .LBB109_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI110_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB110_2 +; XTENSA-ATOMIC-NEXT: .LBB110_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB110_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB110_4 +; XTENSA-ATOMIC-NEXT: .LBB110_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB110_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB110_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB110_1 +; XTENSA-ATOMIC-NEXT: .LBB110_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_xchg_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI111_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB111_2 +; XTENSA-ATOMIC-NEXT: .LBB111_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB111_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB111_4 +; XTENSA-ATOMIC-NEXT: .LBB111_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB111_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB111_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB111_1 +; XTENSA-ATOMIC-NEXT: .LBB111_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_xchg_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI112_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB112_2 +; XTENSA-ATOMIC-NEXT: .LBB112_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB112_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB112_4 +; XTENSA-ATOMIC-NEXT: .LBB112_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB112_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB112_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB112_1 +; XTENSA-ATOMIC-NEXT: .LBB112_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_xchg_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI113_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB113_2 +; XTENSA-ATOMIC-NEXT: .LBB113_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB113_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB113_4 +; XTENSA-ATOMIC-NEXT: .LBB113_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB113_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB113_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB113_1 +; XTENSA-ATOMIC-NEXT: .LBB113_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_xchg_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI114_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB114_2 +; XTENSA-ATOMIC-NEXT: .LBB114_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB114_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB114_4 +; XTENSA-ATOMIC-NEXT: .LBB114_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB114_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB114_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB114_1 +; XTENSA-ATOMIC-NEXT: .LBB114_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI115_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB115_2 +; XTENSA-ATOMIC-NEXT: .LBB115_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB115_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB115_4 +; XTENSA-ATOMIC-NEXT: .LBB115_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: add a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB115_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB115_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB115_1 +; XTENSA-ATOMIC-NEXT: .LBB115_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_add_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI116_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB116_2 +; XTENSA-ATOMIC-NEXT: .LBB116_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB116_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB116_4 +; XTENSA-ATOMIC-NEXT: .LBB116_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: add a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB116_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB116_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB116_1 +; XTENSA-ATOMIC-NEXT: .LBB116_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_add_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI117_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB117_2 +; XTENSA-ATOMIC-NEXT: .LBB117_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB117_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB117_4 +; XTENSA-ATOMIC-NEXT: .LBB117_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: add a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB117_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB117_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB117_1 +; XTENSA-ATOMIC-NEXT: .LBB117_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_add_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI118_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB118_2 +; XTENSA-ATOMIC-NEXT: .LBB118_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB118_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB118_4 +; XTENSA-ATOMIC-NEXT: .LBB118_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: add a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB118_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB118_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB118_1 +; XTENSA-ATOMIC-NEXT: .LBB118_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_add_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI119_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB119_2 +; XTENSA-ATOMIC-NEXT: .LBB119_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB119_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB119_4 +; XTENSA-ATOMIC-NEXT: .LBB119_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: add a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB119_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB119_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB119_1 +; XTENSA-ATOMIC-NEXT: .LBB119_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI120_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB120_2 +; XTENSA-ATOMIC-NEXT: .LBB120_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB120_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB120_4 +; XTENSA-ATOMIC-NEXT: .LBB120_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: sub a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB120_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB120_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB120_1 +; XTENSA-ATOMIC-NEXT: .LBB120_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_sub_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI121_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB121_2 +; XTENSA-ATOMIC-NEXT: .LBB121_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB121_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB121_4 +; XTENSA-ATOMIC-NEXT: .LBB121_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: sub a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB121_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB121_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB121_1 +; XTENSA-ATOMIC-NEXT: .LBB121_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_sub_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI122_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB122_2 +; XTENSA-ATOMIC-NEXT: .LBB122_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB122_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB122_4 +; XTENSA-ATOMIC-NEXT: .LBB122_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: sub a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB122_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB122_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB122_1 +; XTENSA-ATOMIC-NEXT: .LBB122_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_sub_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI123_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB123_2 +; XTENSA-ATOMIC-NEXT: .LBB123_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB123_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB123_4 +; XTENSA-ATOMIC-NEXT: .LBB123_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: sub a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB123_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB123_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB123_1 +; XTENSA-ATOMIC-NEXT: .LBB123_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_sub_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI124_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB124_2 +; XTENSA-ATOMIC-NEXT: .LBB124_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB124_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB124_4 +; XTENSA-ATOMIC-NEXT: .LBB124_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: sub a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB124_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB124_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB124_1 +; XTENSA-ATOMIC-NEXT: .LBB124_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI125_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB125_2 +; XTENSA-ATOMIC-NEXT: .LBB125_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB125_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB125_4 +; XTENSA-ATOMIC-NEXT: .LBB125_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB125_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB125_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB125_1 +; XTENSA-ATOMIC-NEXT: .LBB125_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_and_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI126_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB126_2 +; XTENSA-ATOMIC-NEXT: .LBB126_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB126_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB126_4 +; XTENSA-ATOMIC-NEXT: .LBB126_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB126_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB126_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB126_1 +; XTENSA-ATOMIC-NEXT: .LBB126_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_and_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI127_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB127_2 +; XTENSA-ATOMIC-NEXT: .LBB127_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB127_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB127_4 +; XTENSA-ATOMIC-NEXT: .LBB127_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB127_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB127_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB127_1 +; XTENSA-ATOMIC-NEXT: .LBB127_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_and_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI128_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB128_2 +; XTENSA-ATOMIC-NEXT: .LBB128_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB128_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB128_4 +; XTENSA-ATOMIC-NEXT: .LBB128_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB128_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB128_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB128_1 +; XTENSA-ATOMIC-NEXT: .LBB128_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_and_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI129_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB129_2 +; XTENSA-ATOMIC-NEXT: .LBB129_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB129_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB129_4 +; XTENSA-ATOMIC-NEXT: .LBB129_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB129_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB129_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB129_1 +; XTENSA-ATOMIC-NEXT: .LBB129_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i32 %b seq_cst + ret i32 %res +} + +;define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind { +; %res = atomicrmw nand ptr %a, i32 %b monotonic +; ret i32 %res +;} +; +;define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind { +; %res = atomicrmw nand ptr %a, i32 %b acquire +; ret i32 %res +;} +; +;define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind { +; %res = atomicrmw nand ptr %a, i32 %b release +; ret i32 %res +;} +; +;define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind { +; %res = atomicrmw nand ptr %a, i32 %b acq_rel +; ret i32 %res +;} +; +;define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind { +; %res = atomicrmw nand ptr %a, i32 %b seq_cst +; ret i32 %res +;} + +define i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI130_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB130_2 +; XTENSA-ATOMIC-NEXT: .LBB130_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB130_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB130_4 +; XTENSA-ATOMIC-NEXT: .LBB130_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB130_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB130_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB130_1 +; XTENSA-ATOMIC-NEXT: .LBB130_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_or_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI131_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB131_2 +; XTENSA-ATOMIC-NEXT: .LBB131_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB131_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB131_4 +; XTENSA-ATOMIC-NEXT: .LBB131_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB131_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB131_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB131_1 +; XTENSA-ATOMIC-NEXT: .LBB131_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_or_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI132_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB132_2 +; XTENSA-ATOMIC-NEXT: .LBB132_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB132_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB132_4 +; XTENSA-ATOMIC-NEXT: .LBB132_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB132_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB132_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB132_1 +; XTENSA-ATOMIC-NEXT: .LBB132_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_or_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI133_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB133_2 +; XTENSA-ATOMIC-NEXT: .LBB133_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB133_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB133_4 +; XTENSA-ATOMIC-NEXT: .LBB133_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB133_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB133_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB133_1 +; XTENSA-ATOMIC-NEXT: .LBB133_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_or_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI134_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB134_2 +; XTENSA-ATOMIC-NEXT: .LBB134_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB134_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB134_4 +; XTENSA-ATOMIC-NEXT: .LBB134_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB134_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB134_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB134_1 +; XTENSA-ATOMIC-NEXT: .LBB134_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI135_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB135_2 +; XTENSA-ATOMIC-NEXT: .LBB135_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB135_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB135_4 +; XTENSA-ATOMIC-NEXT: .LBB135_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB135_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB135_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB135_1 +; XTENSA-ATOMIC-NEXT: .LBB135_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_xor_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI136_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB136_2 +; XTENSA-ATOMIC-NEXT: .LBB136_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB136_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB136_4 +; XTENSA-ATOMIC-NEXT: .LBB136_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB136_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB136_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB136_1 +; XTENSA-ATOMIC-NEXT: .LBB136_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_xor_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI137_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB137_2 +; XTENSA-ATOMIC-NEXT: .LBB137_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB137_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB137_4 +; XTENSA-ATOMIC-NEXT: .LBB137_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB137_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB137_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB137_1 +; XTENSA-ATOMIC-NEXT: .LBB137_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI138_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB138_2 +; XTENSA-ATOMIC-NEXT: .LBB138_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB138_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB138_4 +; XTENSA-ATOMIC-NEXT: .LBB138_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB138_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB138_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB138_1 +; XTENSA-ATOMIC-NEXT: .LBB138_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_xor_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI139_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB139_2 +; XTENSA-ATOMIC-NEXT: .LBB139_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB139_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB139_4 +; XTENSA-ATOMIC-NEXT: .LBB139_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB139_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB139_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB139_1 +; XTENSA-ATOMIC-NEXT: .LBB139_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI140_0 +; XTENSA-NEXT: j .LBB140_2 +; XTENSA-NEXT: .LBB140_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB140_4 +; XTENSA-NEXT: .LBB140_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a3, a2, .LBB140_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB140_1 +; XTENSA-NEXT: .LBB140_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB140_2 +; XTENSA-ATOMIC-NEXT: .LBB140_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB140_6 +; XTENSA-ATOMIC-NEXT: .LBB140_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB140_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB140_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB140_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB140_1 +; XTENSA-ATOMIC-NEXT: .LBB140_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI141_0 +; XTENSA-NEXT: j .LBB141_2 +; XTENSA-NEXT: .LBB141_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB141_4 +; XTENSA-NEXT: .LBB141_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a3, a2, .LBB141_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB141_1 +; XTENSA-NEXT: .LBB141_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB141_2 +; XTENSA-ATOMIC-NEXT: .LBB141_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB141_6 +; XTENSA-ATOMIC-NEXT: .LBB141_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB141_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB141_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB141_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB141_1 +; XTENSA-ATOMIC-NEXT: .LBB141_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI142_0 +; XTENSA-NEXT: j .LBB142_2 +; XTENSA-NEXT: .LBB142_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB142_4 +; XTENSA-NEXT: .LBB142_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a3, a2, .LBB142_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB142_1 +; XTENSA-NEXT: .LBB142_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB142_2 +; XTENSA-ATOMIC-NEXT: .LBB142_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB142_6 +; XTENSA-ATOMIC-NEXT: .LBB142_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB142_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB142_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB142_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB142_1 +; XTENSA-ATOMIC-NEXT: .LBB142_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI143_0 +; XTENSA-NEXT: j .LBB143_2 +; XTENSA-NEXT: .LBB143_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB143_4 +; XTENSA-NEXT: .LBB143_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a3, a2, .LBB143_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB143_1 +; XTENSA-NEXT: .LBB143_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB143_2 +; XTENSA-ATOMIC-NEXT: .LBB143_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB143_6 +; XTENSA-ATOMIC-NEXT: .LBB143_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB143_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB143_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB143_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB143_1 +; XTENSA-ATOMIC-NEXT: .LBB143_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI144_0 +; XTENSA-NEXT: j .LBB144_2 +; XTENSA-NEXT: .LBB144_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB144_4 +; XTENSA-NEXT: .LBB144_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a3, a2, .LBB144_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB144_1 +; XTENSA-NEXT: .LBB144_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB144_2 +; XTENSA-ATOMIC-NEXT: .LBB144_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB144_6 +; XTENSA-ATOMIC-NEXT: .LBB144_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB144_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB144_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB144_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB144_1 +; XTENSA-ATOMIC-NEXT: .LBB144_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI145_0 +; XTENSA-NEXT: j .LBB145_2 +; XTENSA-NEXT: .LBB145_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB145_4 +; XTENSA-NEXT: .LBB145_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a3, a2, .LBB145_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB145_1 +; XTENSA-NEXT: .LBB145_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB145_2 +; XTENSA-ATOMIC-NEXT: .LBB145_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB145_6 +; XTENSA-ATOMIC-NEXT: .LBB145_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB145_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB145_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB145_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB145_1 +; XTENSA-ATOMIC-NEXT: .LBB145_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI146_0 +; XTENSA-NEXT: j .LBB146_2 +; XTENSA-NEXT: .LBB146_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB146_4 +; XTENSA-NEXT: .LBB146_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a3, a2, .LBB146_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB146_1 +; XTENSA-NEXT: .LBB146_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB146_2 +; XTENSA-ATOMIC-NEXT: .LBB146_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB146_6 +; XTENSA-ATOMIC-NEXT: .LBB146_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB146_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB146_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB146_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB146_1 +; XTENSA-ATOMIC-NEXT: .LBB146_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI147_0 +; XTENSA-NEXT: j .LBB147_2 +; XTENSA-NEXT: .LBB147_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB147_4 +; XTENSA-NEXT: .LBB147_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a3, a2, .LBB147_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB147_1 +; XTENSA-NEXT: .LBB147_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB147_2 +; XTENSA-ATOMIC-NEXT: .LBB147_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB147_6 +; XTENSA-ATOMIC-NEXT: .LBB147_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB147_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB147_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB147_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB147_1 +; XTENSA-ATOMIC-NEXT: .LBB147_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI148_0 +; XTENSA-NEXT: j .LBB148_2 +; XTENSA-NEXT: .LBB148_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB148_4 +; XTENSA-NEXT: .LBB148_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a3, a2, .LBB148_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB148_1 +; XTENSA-NEXT: .LBB148_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB148_2 +; XTENSA-ATOMIC-NEXT: .LBB148_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB148_6 +; XTENSA-ATOMIC-NEXT: .LBB148_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB148_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB148_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB148_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB148_1 +; XTENSA-ATOMIC-NEXT: .LBB148_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI149_0 +; XTENSA-NEXT: j .LBB149_2 +; XTENSA-NEXT: .LBB149_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB149_4 +; XTENSA-NEXT: .LBB149_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a3, a2, .LBB149_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB149_1 +; XTENSA-NEXT: .LBB149_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB149_2 +; XTENSA-ATOMIC-NEXT: .LBB149_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB149_6 +; XTENSA-ATOMIC-NEXT: .LBB149_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB149_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB149_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB149_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB149_1 +; XTENSA-ATOMIC-NEXT: .LBB149_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI150_0 +; XTENSA-NEXT: j .LBB150_2 +; XTENSA-NEXT: .LBB150_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB150_4 +; XTENSA-NEXT: .LBB150_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a3, a2, .LBB150_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB150_1 +; XTENSA-NEXT: .LBB150_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB150_2 +; XTENSA-ATOMIC-NEXT: .LBB150_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB150_6 +; XTENSA-ATOMIC-NEXT: .LBB150_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB150_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB150_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB150_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB150_1 +; XTENSA-ATOMIC-NEXT: .LBB150_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI151_0 +; XTENSA-NEXT: j .LBB151_2 +; XTENSA-NEXT: .LBB151_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB151_4 +; XTENSA-NEXT: .LBB151_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a3, a2, .LBB151_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB151_1 +; XTENSA-NEXT: .LBB151_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB151_2 +; XTENSA-ATOMIC-NEXT: .LBB151_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB151_6 +; XTENSA-ATOMIC-NEXT: .LBB151_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB151_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB151_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB151_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB151_1 +; XTENSA-ATOMIC-NEXT: .LBB151_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI152_0 +; XTENSA-NEXT: j .LBB152_2 +; XTENSA-NEXT: .LBB152_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB152_4 +; XTENSA-NEXT: .LBB152_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a3, a2, .LBB152_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB152_1 +; XTENSA-NEXT: .LBB152_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB152_2 +; XTENSA-ATOMIC-NEXT: .LBB152_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB152_6 +; XTENSA-ATOMIC-NEXT: .LBB152_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB152_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB152_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB152_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB152_1 +; XTENSA-ATOMIC-NEXT: .LBB152_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI153_0 +; XTENSA-NEXT: j .LBB153_2 +; XTENSA-NEXT: .LBB153_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB153_4 +; XTENSA-NEXT: .LBB153_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a3, a2, .LBB153_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB153_1 +; XTENSA-NEXT: .LBB153_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB153_2 +; XTENSA-ATOMIC-NEXT: .LBB153_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB153_6 +; XTENSA-ATOMIC-NEXT: .LBB153_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB153_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB153_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB153_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB153_1 +; XTENSA-ATOMIC-NEXT: .LBB153_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI154_0 +; XTENSA-NEXT: j .LBB154_2 +; XTENSA-NEXT: .LBB154_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB154_4 +; XTENSA-NEXT: .LBB154_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a3, a2, .LBB154_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB154_1 +; XTENSA-NEXT: .LBB154_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB154_2 +; XTENSA-ATOMIC-NEXT: .LBB154_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB154_6 +; XTENSA-ATOMIC-NEXT: .LBB154_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB154_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB154_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB154_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB154_1 +; XTENSA-ATOMIC-NEXT: .LBB154_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI155_0 +; XTENSA-NEXT: j .LBB155_2 +; XTENSA-NEXT: .LBB155_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB155_4 +; XTENSA-NEXT: .LBB155_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a3, a2, .LBB155_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB155_1 +; XTENSA-NEXT: .LBB155_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB155_2 +; XTENSA-ATOMIC-NEXT: .LBB155_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB155_6 +; XTENSA-ATOMIC-NEXT: .LBB155_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB155_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB155_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB155_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB155_1 +; XTENSA-ATOMIC-NEXT: .LBB155_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI156_0 +; XTENSA-NEXT: j .LBB156_2 +; XTENSA-NEXT: .LBB156_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB156_4 +; XTENSA-NEXT: .LBB156_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a3, a2, .LBB156_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB156_1 +; XTENSA-NEXT: .LBB156_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB156_2 +; XTENSA-ATOMIC-NEXT: .LBB156_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB156_6 +; XTENSA-ATOMIC-NEXT: .LBB156_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB156_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB156_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB156_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB156_1 +; XTENSA-ATOMIC-NEXT: .LBB156_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI157_0 +; XTENSA-NEXT: j .LBB157_2 +; XTENSA-NEXT: .LBB157_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB157_4 +; XTENSA-NEXT: .LBB157_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a3, a2, .LBB157_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB157_1 +; XTENSA-NEXT: .LBB157_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB157_2 +; XTENSA-ATOMIC-NEXT: .LBB157_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB157_6 +; XTENSA-ATOMIC-NEXT: .LBB157_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB157_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB157_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB157_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB157_1 +; XTENSA-ATOMIC-NEXT: .LBB157_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI158_0 +; XTENSA-NEXT: j .LBB158_2 +; XTENSA-NEXT: .LBB158_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB158_4 +; XTENSA-NEXT: .LBB158_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a3, a2, .LBB158_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB158_1 +; XTENSA-NEXT: .LBB158_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB158_2 +; XTENSA-ATOMIC-NEXT: .LBB158_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB158_6 +; XTENSA-ATOMIC-NEXT: .LBB158_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB158_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB158_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB158_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB158_1 +; XTENSA-ATOMIC-NEXT: .LBB158_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI159_0 +; XTENSA-NEXT: j .LBB159_2 +; XTENSA-NEXT: .LBB159_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB159_4 +; XTENSA-NEXT: .LBB159_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a3, a2, .LBB159_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB159_1 +; XTENSA-NEXT: .LBB159_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB159_2 +; XTENSA-ATOMIC-NEXT: .LBB159_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB159_6 +; XTENSA-ATOMIC-NEXT: .LBB159_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB159_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB159_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB159_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB159_1 +; XTENSA-ATOMIC-NEXT: .LBB159_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i32 %b seq_cst + ret i32 %res +} diff --git a/llvm/test/CodeGen/Xtensa/forced-atomics.ll b/llvm/test/CodeGen/Xtensa/forced-atomics.ll new file mode 100644 index 0000000..eeec87b --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/forced-atomics.ll @@ -0,0 +1,1426 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=xtensa -mattr=+windowed < %s | FileCheck %s --check-prefixes=XTENSA +; RUN: llc -mtriple=xtensa -mattr=+windowed,s32c1i -mattr=+forced-atomics < %s | FileCheck %s --check-prefixes=XTENSA-ATOMIC + +define i8 @load8(ptr %p) nounwind { +; XTENSA-LABEL: load8: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load8: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i8, ptr %p seq_cst, align 1 + ret i8 %v +} + +define void @store8(ptr %p) nounwind { +; XTENSA-LABEL: store8: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store8: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s8i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i8 0, ptr %p seq_cst, align 1 + ret void +} + +define i8 @rmw8(ptr %p) nounwind { +; XTENSA-LABEL: rmw8: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI2_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw8: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a11, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: j .LBB2_2 +; XTENSA-ATOMIC-NEXT: .LBB2_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB2_4 +; XTENSA-ATOMIC-NEXT: .LBB2_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: add a6, a15, a10 +; XTENSA-ATOMIC-NEXT: and a6, a6, a11 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: or a6, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB2_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB2_1 +; XTENSA-ATOMIC-NEXT: .LBB2_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw add ptr %p, i8 1 seq_cst, align 1 + ret i8 %v +} + +define i8 @cmpxchg8(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg8: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a8, 0 +; XTENSA-NEXT: s8i a8, a1, 0 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: movi a12, 1 +; XTENSA-NEXT: movi a13, 5 +; XTENSA-NEXT: l32r a8, .LCPI3_0 +; XTENSA-NEXT: or a14, a13, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: cmpxchg8: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a10, 0 +; XTENSA-ATOMIC-NEXT: and a7, a11, a9 +; XTENSA-ATOMIC-NEXT: movi a11, 1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a12, a11 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: .LBB3_1: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: or a14, a15, a12 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a11, a11 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB3_3 +; XTENSA-ATOMIC-NEXT: # %bb.2: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_1 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: .LBB3_3: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_1 Depth=1 +; XTENSA-ATOMIC-NEXT: bnez a7, .LBB3_5 +; XTENSA-ATOMIC-NEXT: # %bb.4: # %partword.cmpxchg.failure +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: bne a15, a7, .LBB3_1 +; XTENSA-ATOMIC-NEXT: .LBB3_5: # %partword.cmpxchg.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = cmpxchg ptr %p, i8 0, i8 1 seq_cst seq_cst + %res.0 = extractvalue { i8, i1 } %res, 0 + ret i8 %res.0 +} + +define i16 @load16(ptr %p) nounwind { +; XTENSA-LABEL: load16: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load16: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i16, ptr %p seq_cst, align 2 + ret i16 %v +} + +define void @store16(ptr %p) nounwind { +; XTENSA-LABEL: store16: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store16: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s16i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i16 0, ptr %p seq_cst, align 2 + ret void +} + +define i16 @rmw16(ptr %p) nounwind { +; XTENSA-LABEL: rmw16: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw16: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI6_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a11, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: j .LBB6_2 +; XTENSA-ATOMIC-NEXT: .LBB6_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB6_4 +; XTENSA-ATOMIC-NEXT: .LBB6_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: add a6, a15, a10 +; XTENSA-ATOMIC-NEXT: and a6, a6, a11 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: or a6, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB6_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB6_1 +; XTENSA-ATOMIC-NEXT: .LBB6_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw add ptr %p, i16 1 seq_cst, align 2 + ret i16 %v +} + +define i16 @cmpxchg16(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg16: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a8, 0 +; XTENSA-NEXT: s16i a8, a1, 0 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: movi a12, 1 +; XTENSA-NEXT: movi a13, 5 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: or a14, a13, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: cmpxchg16: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI7_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a10, 0 +; XTENSA-ATOMIC-NEXT: and a7, a11, a9 +; XTENSA-ATOMIC-NEXT: movi a11, 1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a12, a11 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: .LBB7_1: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: or a14, a15, a12 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a11, a11 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB7_3 +; XTENSA-ATOMIC-NEXT: # %bb.2: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_1 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: .LBB7_3: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_1 Depth=1 +; XTENSA-ATOMIC-NEXT: bnez a7, .LBB7_5 +; XTENSA-ATOMIC-NEXT: # %bb.4: # %partword.cmpxchg.failure +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: bne a15, a7, .LBB7_1 +; XTENSA-ATOMIC-NEXT: .LBB7_5: # %partword.cmpxchg.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = cmpxchg ptr %p, i16 0, i16 1 seq_cst seq_cst + %res.0 = extractvalue { i16, i1 } %res, 0 + ret i16 %res.0 +} + +define i32 @load32_unordered(ptr %p) nounwind { +; XTENSA-LABEL: load32_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load32_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i32, ptr %p unordered, align 4 + ret i32 %v +} + +define i32 @load32_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: load32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI9_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i32, ptr %p monotonic, align 4 + ret i32 %v +} + +define i32 @load32_acquire(ptr %p) nounwind { +; XTENSA-LABEL: load32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i32, ptr %p acquire, align 4 + ret i32 %v +} + +define i32 @load32_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: load32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i32, ptr %p seq_cst, align 4 + ret i32 %v +} + +define void @store32_unordered(ptr %p) nounwind { +; XTENSA-LABEL: store32_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: or a12, a11, a11 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store32_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 0, ptr %p unordered, align 4 + ret void +} + +define void @store32_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: store32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI13_0 +; XTENSA-NEXT: or a12, a11, a11 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 0, ptr %p monotonic, align 4 + ret void +} + +define void @store32_release(ptr %p) nounwind { +; XTENSA-LABEL: store32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 0, ptr %p release, align 4 + ret void +} + +define void @store32_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: store32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 0, ptr %p seq_cst, align 4 + ret void +} + +define i32 @rmw32_add_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_add_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_add_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB16_2 +; XTENSA-ATOMIC-NEXT: .LBB16_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB16_4 +; XTENSA-ATOMIC-NEXT: .LBB16_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: addi a8, a11, 1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB16_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB16_1 +; XTENSA-ATOMIC-NEXT: .LBB16_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw add ptr %p, i32 1 monotonic, align 4 + ret i32 %v +} + +define i32 @rmw32_add_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_add_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_add_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB17_2 +; XTENSA-ATOMIC-NEXT: .LBB17_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB17_4 +; XTENSA-ATOMIC-NEXT: .LBB17_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: addi a8, a11, 1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB17_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB17_1 +; XTENSA-ATOMIC-NEXT: .LBB17_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw add ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_sub_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_sub_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_sub_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB18_2 +; XTENSA-ATOMIC-NEXT: .LBB18_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB18_4 +; XTENSA-ATOMIC-NEXT: .LBB18_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: addi a8, a11, -1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB18_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB18_1 +; XTENSA-ATOMIC-NEXT: .LBB18_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw sub ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_and_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_and_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_and_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB19_2 +; XTENSA-ATOMIC-NEXT: .LBB19_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB19_4 +; XTENSA-ATOMIC-NEXT: .LBB19_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a9 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB19_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB19_1 +; XTENSA-ATOMIC-NEXT: .LBB19_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw and ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_nand_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_nand_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_nand_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, -1 +; XTENSA-ATOMIC-NEXT: movi a10, -2 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB20_2 +; XTENSA-ATOMIC-NEXT: .LBB20_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a14, 1, .LBB20_4 +; XTENSA-ATOMIC-NEXT: .LBB20_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a13, a9 +; XTENSA-ATOMIC-NEXT: or a8, a8, a10 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a14, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a8, a13, .LBB20_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB20_1 +; XTENSA-ATOMIC-NEXT: .LBB20_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw nand ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_or_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_or_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI21_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_or_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB21_2 +; XTENSA-ATOMIC-NEXT: .LBB21_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB21_4 +; XTENSA-ATOMIC-NEXT: .LBB21_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a9 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB21_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB21_1 +; XTENSA-ATOMIC-NEXT: .LBB21_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw or ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_xor_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_xor_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI22_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_xor_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB22_2 +; XTENSA-ATOMIC-NEXT: .LBB22_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB22_4 +; XTENSA-ATOMIC-NEXT: .LBB22_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a9 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB22_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB22_1 +; XTENSA-ATOMIC-NEXT: .LBB22_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw xor ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_max_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_max_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a5, 1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI23_0 +; XTENSA-NEXT: j .LBB23_2 +; XTENSA-NEXT: .LBB23_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB23_4 +; XTENSA-NEXT: .LBB23_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: bge a5, a2, .LBB23_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB23_1 +; XTENSA-NEXT: .LBB23_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_max_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB23_2 +; XTENSA-ATOMIC-NEXT: .LBB23_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB23_6 +; XTENSA-ATOMIC-NEXT: .LBB23_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a9, a9 +; XTENSA-ATOMIC-NEXT: bge a9, a11, .LBB23_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB23_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB23_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB23_1 +; XTENSA-ATOMIC-NEXT: .LBB23_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw max ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_min_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_min_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a12, a2, 0 +; XTENSA-NEXT: movi a6, 1 +; XTENSA-NEXT: movi a5, 2 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI24_0 +; XTENSA-NEXT: j .LBB24_2 +; XTENSA-NEXT: .LBB24_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a12, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB24_4 +; XTENSA-NEXT: .LBB24_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a12, a1, 0 +; XTENSA-NEXT: blt a12, a5, .LBB24_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-NEXT: or a12, a6, a6 +; XTENSA-NEXT: j .LBB24_1 +; XTENSA-NEXT: .LBB24_4: # %atomicrmw.end +; XTENSA-NEXT: or a2, a12, a12 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_min_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a12, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 2 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: or a8, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB24_2 +; XTENSA-ATOMIC-NEXT: .LBB24_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a13, 1, .LBB24_6 +; XTENSA-ATOMIC-NEXT: .LBB24_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: blt a12, a10, .LBB24_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a9, a9 +; XTENSA-ATOMIC-NEXT: .LBB24_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a12, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a13, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a12, .LBB24_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB24_1 +; XTENSA-ATOMIC-NEXT: .LBB24_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw min ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_umax_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_umax_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a5, 1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI25_0 +; XTENSA-NEXT: j .LBB25_2 +; XTENSA-NEXT: .LBB25_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB25_4 +; XTENSA-NEXT: .LBB25_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: bgeu a5, a2, .LBB25_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB25_1 +; XTENSA-NEXT: .LBB25_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_umax_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB25_2 +; XTENSA-ATOMIC-NEXT: .LBB25_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB25_6 +; XTENSA-ATOMIC-NEXT: .LBB25_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a9, a9 +; XTENSA-ATOMIC-NEXT: bgeu a9, a11, .LBB25_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB25_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB25_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB25_1 +; XTENSA-ATOMIC-NEXT: .LBB25_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw umax ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_umin_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_umin_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a12, a2, 0 +; XTENSA-NEXT: movi a6, 1 +; XTENSA-NEXT: movi a5, 2 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI26_0 +; XTENSA-NEXT: j .LBB26_2 +; XTENSA-NEXT: .LBB26_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a12, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB26_4 +; XTENSA-NEXT: .LBB26_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a12, a1, 0 +; XTENSA-NEXT: bltu a12, a5, .LBB26_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-NEXT: or a12, a6, a6 +; XTENSA-NEXT: j .LBB26_1 +; XTENSA-NEXT: .LBB26_4: # %atomicrmw.end +; XTENSA-NEXT: or a2, a12, a12 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_umin_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a12, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 2 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: or a8, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB26_2 +; XTENSA-ATOMIC-NEXT: .LBB26_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a13, 1, .LBB26_6 +; XTENSA-ATOMIC-NEXT: .LBB26_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: bltu a12, a10, .LBB26_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a9, a9 +; XTENSA-ATOMIC-NEXT: .LBB26_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a12, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a13, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a12, .LBB26_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB26_1 +; XTENSA-ATOMIC-NEXT: .LBB26_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw umin ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_xchg_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI27_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_xchg_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB27_2 +; XTENSA-ATOMIC-NEXT: .LBB27_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB27_4 +; XTENSA-ATOMIC-NEXT: .LBB27_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a9, a9 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB27_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB27_1 +; XTENSA-ATOMIC-NEXT: .LBB27_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw xchg ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define float @rmw32_fadd_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fadd_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a10, a2, 0 +; XTENSA-NEXT: l32r a6, .LCPI28_1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI28_2 +; XTENSA-NEXT: .LBB28_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a10, a1, 0 +; XTENSA-NEXT: l32r a11, .LCPI28_0 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: or a12, a10, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: l32i a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB28_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_fadd_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI28_1 +; XTENSA-ATOMIC-NEXT: movi a5, 0 +; XTENSA-ATOMIC-NEXT: movi a4, 1 +; XTENSA-ATOMIC-NEXT: j .LBB28_2 +; XTENSA-ATOMIC-NEXT: .LBB28_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a10, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB28_4 +; XTENSA-ATOMIC-NEXT: .LBB28_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI28_0 +; XTENSA-ATOMIC-NEXT: or a10, a7, a7 +; XTENSA-ATOMIC-NEXT: callx8 a6 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0 +; XTENSA-ATOMIC-NEXT: or a8, a4, a4 +; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB28_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a5, a5 +; XTENSA-ATOMIC-NEXT: j .LBB28_1 +; XTENSA-ATOMIC-NEXT: .LBB28_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a10, a10 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw fadd ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define float @rmw32_fsub_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fsub_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a10, a2, 0 +; XTENSA-NEXT: l32r a6, .LCPI29_1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI29_2 +; XTENSA-NEXT: .LBB29_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a10, a1, 0 +; XTENSA-NEXT: l32r a11, .LCPI29_0 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: or a12, a10, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: l32i a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB29_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_fsub_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI29_1 +; XTENSA-ATOMIC-NEXT: movi a5, 0 +; XTENSA-ATOMIC-NEXT: movi a4, 1 +; XTENSA-ATOMIC-NEXT: j .LBB29_2 +; XTENSA-ATOMIC-NEXT: .LBB29_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a10, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB29_4 +; XTENSA-ATOMIC-NEXT: .LBB29_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI29_0 +; XTENSA-ATOMIC-NEXT: or a10, a7, a7 +; XTENSA-ATOMIC-NEXT: callx8 a6 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0 +; XTENSA-ATOMIC-NEXT: or a8, a4, a4 +; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB29_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a5, a5 +; XTENSA-ATOMIC-NEXT: j .LBB29_1 +; XTENSA-ATOMIC-NEXT: .LBB29_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a10, a10 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw fsub ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define float @rmw32_fmin_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fmin_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a10, a2, 0 +; XTENSA-NEXT: l32r a6, .LCPI30_1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI30_2 +; XTENSA-NEXT: .LBB30_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a10, a1, 0 +; XTENSA-NEXT: l32r a11, .LCPI30_0 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: or a12, a10, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: l32i a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB30_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_fmin_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI30_1 +; XTENSA-ATOMIC-NEXT: movi a5, 0 +; XTENSA-ATOMIC-NEXT: movi a4, 1 +; XTENSA-ATOMIC-NEXT: j .LBB30_2 +; XTENSA-ATOMIC-NEXT: .LBB30_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a10, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB30_4 +; XTENSA-ATOMIC-NEXT: .LBB30_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI30_0 +; XTENSA-ATOMIC-NEXT: or a10, a7, a7 +; XTENSA-ATOMIC-NEXT: callx8 a6 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0 +; XTENSA-ATOMIC-NEXT: or a8, a4, a4 +; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB30_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a5, a5 +; XTENSA-ATOMIC-NEXT: j .LBB30_1 +; XTENSA-ATOMIC-NEXT: .LBB30_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a10, a10 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw fmin ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define float @rmw32_fmax_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fmax_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a10, a2, 0 +; XTENSA-NEXT: l32r a6, .LCPI31_1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI31_2 +; XTENSA-NEXT: .LBB31_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a10, a1, 0 +; XTENSA-NEXT: l32r a11, .LCPI31_0 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: or a12, a10, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: l32i a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB31_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_fmax_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI31_1 +; XTENSA-ATOMIC-NEXT: movi a5, 0 +; XTENSA-ATOMIC-NEXT: movi a4, 1 +; XTENSA-ATOMIC-NEXT: j .LBB31_2 +; XTENSA-ATOMIC-NEXT: .LBB31_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a10, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB31_4 +; XTENSA-ATOMIC-NEXT: .LBB31_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI31_0 +; XTENSA-ATOMIC-NEXT: or a10, a7, a7 +; XTENSA-ATOMIC-NEXT: callx8 a6 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0 +; XTENSA-ATOMIC-NEXT: or a8, a4, a4 +; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB31_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a5, a5 +; XTENSA-ATOMIC-NEXT: j .LBB31_1 +; XTENSA-ATOMIC-NEXT: .LBB31_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a10, a10 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw fmax ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define i32 @cmpxchg32_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a13, 0 +; XTENSA-NEXT: s32i a13, a1, 0 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: movi a12, 1 +; XTENSA-NEXT: l32r a8, .LCPI32_0 +; XTENSA-NEXT: or a14, a13, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: cmpxchg32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 1 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: wsr a9, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = cmpxchg ptr %p, i32 0, i32 1 monotonic monotonic + %res.0 = extractvalue { i32, i1 } %res, 0 + ret i32 %res.0 +} + +define i32 @cmpxchg32_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a8, 0 +; XTENSA-NEXT: s32i a8, a1, 0 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: movi a12, 1 +; XTENSA-NEXT: movi a13, 5 +; XTENSA-NEXT: l32r a8, .LCPI33_0 +; XTENSA-NEXT: or a14, a13, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: cmpxchg32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi a8, 1 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: wsr a9, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = cmpxchg ptr %p, i32 0, i32 1 seq_cst seq_cst + %res.0 = extractvalue { i32, i1 } %res, 0 + ret i32 %res.0 +} |