diff options
Diffstat (limited to 'llvm/test/CodeGen')
108 files changed, 23983 insertions, 3114 deletions
diff --git a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll b/llvm/test/CodeGen/AArch64/aarch64-split-logic-bitmask-immediate.ll index 113eb14..4db9db9 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-split-logic-bitmask-immediate.ll @@ -370,3 +370,175 @@ entry: %r = select i1 %c, i64 %a, i64 %ands ret i64 %r } + +; Test EOR. +define i32 @test1_eor(i32 %a) { +; CHECK-LABEL: test1_eor: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: eor w8, w0, #0x400 +; CHECK-NEXT: eor w0, w8, #0x200000 +; CHECK-NEXT: ret +entry: + %eor = xor i32 %a, 2098176 + ret i32 %eor +} + +; This constant should not be split because it can be handled by one mov. +define i32 @test2_eor(i32 %a) { +; CHECK-LABEL: test2_eor: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #135 // =0x87 +; CHECK-NEXT: eor w0, w0, w8 +; CHECK-NEXT: ret +entry: + %eor = xor i32 %a, 135 + ret i32 %eor +} + +; This constant should not be split because the split immediate is not valid +; bitmask immediate. +define i32 @test3_eor(i32 %a) { +; CHECK-LABEL: test3_eor: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #1024 // =0x400 +; CHECK-NEXT: movk w8, #33, lsl #16 +; CHECK-NEXT: eor w0, w0, w8 +; CHECK-NEXT: ret +entry: + %eor = xor i32 %a, 2163712 + ret i32 %eor +} + +define i64 @test4_eor(i64 %a) { +; CHECK-LABEL: test4_eor: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: eor x8, x0, #0x400 +; CHECK-NEXT: eor x0, x8, #0x200000 +; CHECK-NEXT: ret +entry: + %eor = xor i64 %a, 2098176 + ret i64 %eor +} + +define i64 @test5_eor(i64 %a) { +; CHECK-LABEL: test5_eor: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: eor x8, x0, #0x4000 +; CHECK-NEXT: eor x0, x8, #0x200000000 +; CHECK-NEXT: ret +entry: + %eor = xor i64 %a, 8589950976 + ret i64 %eor +} + +; This constant should not be split because it can be handled by one mov. +define i64 @test6_eor(i64 %a) { +; CHECK-LABEL: test6_eor: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #135 // =0x87 +; CHECK-NEXT: eor x0, x0, x8 +; CHECK-NEXT: ret +entry: + %eor = xor i64 %a, 135 + ret i64 %eor +} + +; This constant should not be split because the split immediate is not valid +; bitmask immediate. +define i64 @test7_eor(i64 %a) { +; CHECK-LABEL: test7_eor: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #1024 // =0x400 +; CHECK-NEXT: movk w8, #33, lsl #16 +; CHECK-NEXT: eor x0, x0, x8 +; CHECK-NEXT: ret +entry: + %eor = xor i64 %a, 2163712 + ret i64 %eor +} + +; Test ORR. +define i32 @test1_orr(i32 %a) { +; CHECK-LABEL: test1_orr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: orr w8, w0, #0x400 +; CHECK-NEXT: orr w0, w8, #0x200000 +; CHECK-NEXT: ret +entry: + %orr = or i32 %a, 2098176 + ret i32 %orr +} + +; This constant should not be split because it can be handled by one mov. +define i32 @test2_orr(i32 %a) { +; CHECK-LABEL: test2_orr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #135 // =0x87 +; CHECK-NEXT: orr w0, w0, w8 +; CHECK-NEXT: ret +entry: + %orr = or i32 %a, 135 + ret i32 %orr +} + +; This constant should not be split because the split immediate is not valid +; bitmask immediate. +define i32 @test3_orr(i32 %a) { +; CHECK-LABEL: test3_orr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #1024 // =0x400 +; CHECK-NEXT: movk w8, #33, lsl #16 +; CHECK-NEXT: orr w0, w0, w8 +; CHECK-NEXT: ret +entry: + %orr = or i32 %a, 2163712 + ret i32 %orr +} + +define i64 @test4_orr(i64 %a) { +; CHECK-LABEL: test4_orr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: orr x8, x0, #0x400 +; CHECK-NEXT: orr x0, x8, #0x200000 +; CHECK-NEXT: ret +entry: + %orr = or i64 %a, 2098176 + ret i64 %orr +} + +define i64 @test5_orr(i64 %a) { +; CHECK-LABEL: test5_orr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: orr x8, x0, #0x4000 +; CHECK-NEXT: orr x0, x8, #0x200000000 +; CHECK-NEXT: ret +entry: + %orr = or i64 %a, 8589950976 + ret i64 %orr +} + +; This constant should not be split because it can be handled by one mov. +define i64 @test6_orr(i64 %a) { +; CHECK-LABEL: test6_orr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #135 // =0x87 +; CHECK-NEXT: orr x0, x0, x8 +; CHECK-NEXT: ret +entry: + %orr = or i64 %a, 135 + ret i64 %orr +} + +; This constant should not be split because the split immediate is not valid +; bitmask immediate. +define i64 @test7_orr(i64 %a) { +; CHECK-LABEL: test7_orr: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #1024 // =0x400 +; CHECK-NEXT: movk w8, #33, lsl #16 +; CHECK-NEXT: orr x0, x0, x8 +; CHECK-NEXT: ret +entry: + %orr = or i64 %a, 2163712 + ret i64 %orr +} diff --git a/llvm/test/CodeGen/AArch64/abds-neg.ll b/llvm/test/CodeGen/AArch64/abds-neg.ll index 7524782..02c76ba 100644 --- a/llvm/test/CodeGen/AArch64/abds-neg.ll +++ b/llvm/test/CodeGen/AArch64/abds-neg.ll @@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -26,8 +25,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -43,8 +41,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -60,8 +57,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i16 %a to i64 @@ -93,8 +89,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i16 %a to i64 diff --git a/llvm/test/CodeGen/AArch64/abds.ll b/llvm/test/CodeGen/AArch64/abds.ll index bbdb116..bf52e71 100644 --- a/llvm/test/CodeGen/AArch64/abds.ll +++ b/llvm/test/CodeGen/AArch64/abds.ll @@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -25,8 +24,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -41,8 +39,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i8 %a to i64 @@ -57,8 +54,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i16 %a to i64 @@ -88,8 +84,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i16 %a to i64 @@ -215,8 +210,7 @@ define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_minmax_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i8 @llvm.smin.i8(i8 %a, i8 %b) @@ -229,8 +223,7 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_minmax_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i16 @llvm.smin.i16(i16 %a, i16 %b) @@ -287,8 +280,7 @@ define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_cmp_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp sgt i8 %a, %b @@ -302,8 +294,7 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_cmp_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp sge i16 %a, %b @@ -508,9 +499,8 @@ define i64 @vector_legalized(i16 %a, i16 %b) { ; CHECK: // %bb.0: ; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: addp d0, v0.2d -; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: cneg w8, w8, mi ; CHECK-NEXT: fmov x9, d0 ; CHECK-NEXT: add x0, x9, x8 @@ -533,8 +523,7 @@ define i8 @abd_select_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_select_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp slt i8 %a, %b @@ -548,8 +537,7 @@ define i16 @abd_select_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_select_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w8, w8, w1, sxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, sxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp sle i16 %a, %b diff --git a/llvm/test/CodeGen/AArch64/abdu-neg.ll b/llvm/test/CodeGen/AArch64/abdu-neg.ll index d07f099a..400031b 100644 --- a/llvm/test/CodeGen/AArch64/abdu-neg.ll +++ b/llvm/test/CodeGen/AArch64/abdu-neg.ll @@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -26,8 +25,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -43,8 +41,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -60,8 +57,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i16 %a to i64 @@ -93,8 +89,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i16 %a to i64 diff --git a/llvm/test/CodeGen/AArch64/abdu.ll b/llvm/test/CodeGen/AArch64/abdu.ll index 1045ee2..8d2b0b0 100644 --- a/llvm/test/CodeGen/AArch64/abdu.ll +++ b/llvm/test/CodeGen/AArch64/abdu.ll @@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -25,8 +24,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -41,8 +39,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i8 %a to i64 @@ -57,8 +54,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i16 %a to i64 @@ -88,8 +84,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i16 %a to i64 @@ -219,8 +214,7 @@ define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_minmax_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i8 @llvm.umin.i8(i8 %a, i8 %b) @@ -233,8 +227,7 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_minmax_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i16 @llvm.umin.i16(i16 %a, i16 %b) @@ -293,8 +286,7 @@ define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_cmp_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp ugt i8 %a, %b @@ -308,8 +300,7 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_cmp_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp uge i16 %a, %b @@ -373,10 +364,9 @@ define i64 @vector_legalized(i16 %a, i16 %b) { ; CHECK: // %bb.0: ; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: addp d0, v0.2d +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w8, w8, mi +; CHECK-NEXT: addp d0, v0.2d ; CHECK-NEXT: fmov x9, d0 ; CHECK-NEXT: add x0, x9, x8 ; CHECK-NEXT: ret @@ -398,8 +388,7 @@ define i8 @abd_select_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_select_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: sub w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxtb ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp ult i8 %a, %b @@ -413,8 +402,7 @@ define i16 @abd_select_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_select_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w8, w8, w1, uxth -; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: subs w8, w8, w1, uxth ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %cmp = icmp ule i16 %a, %b diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll index 3a808f5..dd018a6 100644 --- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll +++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll @@ -11,7 +11,7 @@ define void @array_1D(ptr %addr) #0 { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr z0, [x0] ; CHECK-NEXT: ldr z1, [x0, #2, mul vl] @@ -34,7 +34,7 @@ define %my_subtype @array_1D_extract(ptr %addr) #0 { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr z0, [x0, #1, mul vl] ; CHECK-NEXT: addvl sp, sp, #3 @@ -52,7 +52,7 @@ define void @array_1D_insert(ptr %addr, %my_subtype %elt) #0 { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr z1, [x0, #2, mul vl] ; CHECK-NEXT: ldr z2, [x0] @@ -75,7 +75,7 @@ define void @array_2D(ptr %addr) #0 { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-6 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 48 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x30, 0x1e, 0x22 // sp + 16 + 48 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr z0, [x0] ; CHECK-NEXT: ldr z1, [x0, #5, mul vl] diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll index e7d8f4f..be73dc9 100644 --- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll +++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll @@ -10,7 +10,7 @@ define void @test(ptr %addr) #0 { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr z0, [x0] ; CHECK-NEXT: ldr z1, [x0, #2, mul vl] diff --git a/llvm/test/CodeGen/AArch64/arm64-ext.ll b/llvm/test/CodeGen/AArch64/arm64-ext.ll index 8bf2b82..c367057 100644 --- a/llvm/test/CodeGen/AArch64/arm64-ext.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ext.ll @@ -139,9 +139,8 @@ define <2 x ptr> @test_v2p0(<2 x ptr> %a, <2 x ptr> %b) { define <16 x i8> @reverse_vector_s8x16b(<16 x i8> noundef %x) { ; CHECK-SD-LABEL: reverse_vector_s8x16b: ; CHECK-SD: // %bb.0: // %entry -; CHECK-SD-NEXT: rev64 v1.16b, v0.16b -; CHECK-SD-NEXT: ext v0.16b, v1.16b, v1.16b, #8 -; CHECK-SD-NEXT: mov v0.d[1], v1.d[0] +; CHECK-SD-NEXT: rev64 v0.16b, v0.16b +; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: reverse_vector_s8x16b: @@ -161,9 +160,8 @@ entry: define <8 x i16> @reverse_vector_s16x8b(<8 x i16> noundef %x) { ; CHECK-SD-LABEL: reverse_vector_s16x8b: ; CHECK-SD: // %bb.0: // %entry -; CHECK-SD-NEXT: rev64 v1.8h, v0.8h -; CHECK-SD-NEXT: ext v0.16b, v1.16b, v1.16b, #8 -; CHECK-SD-NEXT: mov v0.d[1], v1.d[0] +; CHECK-SD-NEXT: rev64 v0.8h, v0.8h +; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: reverse_vector_s16x8b: diff --git a/llvm/test/CodeGen/AArch64/csel-subs-dag-combine.ll b/llvm/test/CodeGen/AArch64/csel-subs-dag-combine.ll new file mode 100644 index 0000000..5036be9 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/csel-subs-dag-combine.ll @@ -0,0 +1,112 @@ +; RUN: llc -debug-only=isel -o /dev/null < %s 2>&1 | FileCheck %s + +; REQUIRES: asserts + +; These tests ensure that we don't combine +; CSEL a, b, cc, SUBS(SUB(x,y), 0) -> CSEL a, b, cc, SUBS(x,y) +; if the flags set by SUBS(SUB(x,y), 0) have more than one use. +; +; This restriction exists because combining SUBS(SUB(x,y), 0) -> SUBS(x,y) is +; only valid if there are no users of the overflow flags (C/V) generated by the +; SUBS. Currently, we only check the flags used by the CSEL, and therefore we +; conservatively reject cases where the SUBS's flags have other uses. + +target triple = "aarch64-unknown-linux-gnu" + +; CHECK-LABEL: Legalized selection DAG: %bb.0 'combine_subs:' +; CHECK-NEXT: SelectionDAG has 13 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t5: i32 = sub t2, t4 +; CHECK-NEXT: t14: i32,i32 = AArch64ISD::SUBS t5, Constant:i32<0> +; CHECK-NEXT: t16: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t14:1 +; CHECK-NEXT: t11: ch,glue = CopyToReg t0, Register:i32 $w0, t16 +; CHECK-NEXT: t12: ch = AArch64ISD::RET_GLUE t11, Register:i32 $w0, t11:1 + +; CHECK-LABEL: Optimized legalized selection DAG: %bb.0 'combine_subs:' +; CHECK-NEXT: SelectionDAG has 11 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t18: i32,i32 = AArch64ISD::SUBS t2, t4 +; CHECK-NEXT: t16: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t18:1 +; CHECK-NEXT: t11: ch,glue = CopyToReg t0, Register:i32 $w0, t16 +; CHECK-NEXT: t12: ch = AArch64ISD::RET_GLUE t11, Register:i32 $w0, t11:1 + +define i32 @combine_subs(i32 %a, i32 %b) { + %sub = sub i32 %a, %b + %cc = icmp ne i32 %sub, 0 + %sel = select i1 %cc, i32 %a, i32 %b + ret i32 %sel +} + +; CHECK-LABEL: Legalized selection DAG: %bb.0 'combine_subs_multiple_sub_uses:' +; CHECK-NEXT: SelectionDAG has 14 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t5: i32 = sub t2, t4 +; CHECK-NEXT: t15: i32,i32 = AArch64ISD::SUBS t5, Constant:i32<0> +; CHECK-NEXT: t17: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t15:1 +; CHECK-NEXT: t10: i32 = add t17, t5 +; CHECK-NEXT: t12: ch,glue = CopyToReg t0, Register:i32 $w0, t10 +; CHECK-NEXT: t13: ch = AArch64ISD::RET_GLUE t12, Register:i32 $w0, t12:1 + +; CHECK-LABEL: Optimized legalized selection DAG: %bb.0 'combine_subs_multiple_sub_uses:' +; CHECK-NEXT: SelectionDAG has 12 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t17: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t19:1 +; CHECK-NEXT: t10: i32 = add t17, t19 +; CHECK-NEXT: t12: ch,glue = CopyToReg t0, Register:i32 $w0, t10 +; CHECK-NEXT: t19: i32,i32 = AArch64ISD::SUBS t2, t4 +; CHECK-NEXT: t13: ch = AArch64ISD::RET_GLUE t12, Register:i32 $w0, t12:1 + +define i32 @combine_subs_multiple_sub_uses(i32 %a, i32 %b) { + %sub = sub i32 %a, %b + %cc = icmp ne i32 %sub, 0 + %sel = select i1 %cc, i32 %a, i32 %b + %add = add i32 %sel, %sub + ret i32 %add +} + +; CHECK-LABEL: Legalized selection DAG: %bb.0 'do_not_combine_subs_multiple_flag_uses:' +; CHECK-NEXT: SelectionDAG has 19 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t24: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t21:1 +; CHECK-NEXT: t6: i32,ch = CopyFromReg t0, Register:i32 %2 +; CHECK-NEXT: t8: i32,ch = CopyFromReg t0, Register:i32 %3 +; CHECK-NEXT: t23: i32 = AArch64ISD::CSEL t6, t8, Constant:i32<1>, t21:1 +; CHECK-NEXT: t15: i32 = add t24, t23 +; CHECK-NEXT: t17: ch,glue = CopyToReg t0, Register:i32 $w0, t15 +; CHECK-NEXT: t9: i32 = sub t2, t4 +; CHECK-NEXT: t21: i32,i32 = AArch64ISD::SUBS t9, Constant:i32<0> +; CHECK-NEXT: t18: ch = AArch64ISD::RET_GLUE t17, Register:i32 $w0, t17:1 + +; CHECK-LABEL: Optimized legalized selection DAG: %bb.0 'do_not_combine_subs_multiple_flag_uses:' +; CHECK-NEXT: SelectionDAG has 19 nodes: +; CHECK-NEXT: t0: ch,glue = EntryToken +; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0 +; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1 +; CHECK-NEXT: t24: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t21:1 +; CHECK-NEXT: t6: i32,ch = CopyFromReg t0, Register:i32 %2 +; CHECK-NEXT: t8: i32,ch = CopyFromReg t0, Register:i32 %3 +; CHECK-NEXT: t23: i32 = AArch64ISD::CSEL t6, t8, Constant:i32<1>, t21:1 +; CHECK-NEXT: t15: i32 = add t24, t23 +; CHECK-NEXT: t17: ch,glue = CopyToReg t0, Register:i32 $w0, t15 +; CHECK-NEXT: t9: i32 = sub t2, t4 +; CHECK-NEXT: t21: i32,i32 = AArch64ISD::SUBS t9, Constant:i32<0> +; CHECK-NEXT: t18: ch = AArch64ISD::RET_GLUE t17, Register:i32 $w0, t17:1 + +define i32 @do_not_combine_subs_multiple_flag_uses(i32 %a, i32 %b, i32 %c, i32 %d) { + %sub = sub i32 %a, %b + %cc = icmp ne i32 %sub, 0 + %sel = select i1 %cc, i32 %a, i32 %b + %other = select i1 %cc, i32 %c, i32 %d + %add = add i32 %sel, %other + ret i32 %add +} diff --git a/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll b/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll index d1e0729..6a91d85 100644 --- a/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll +++ b/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll @@ -11,10 +11,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @cvtn_f16_tuple(i64 %stride, p ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z2.h, z10.h }, pn8/z, [x1] @@ -52,10 +52,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @cvtnt_f32_tuple(i64 %stride, ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: mov z1.d, z0.d diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir index aed3145..e970d83 100644 --- a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir +++ b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir @@ -9,16 +9,16 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 - ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG + ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill - ; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG + ; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK-NEXT: addvl sp, sp, #-1 - ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG + ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: // implicit-def: $z8 ; CHECK-NEXT: // implicit-def: $p4 ; CHECK-NEXT: addvl sp, sp, #1 - ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG + ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #2 diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir index 17b1ad2..03a6aab 100644 --- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir +++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir @@ -64,7 +64,7 @@ # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 32 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 32 @@ -79,7 +79,8 @@ # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 # ASM: .cfi_def_cfa_offset 32 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 16 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 16 * VG # ASM: .cfi_def_cfa wsp, 32 # ASM: .cfi_def_cfa_offset 16 # ASM: .cfi_def_cfa_offset 0 @@ -88,8 +89,8 @@ # # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_offset: +32 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_offset: +32 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_lit16, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +32 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO: DW_CFA_def_cfa_offset: +0 @@ -129,7 +130,7 @@ body: | # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 48 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # # CHECK-NEXT: $x20 = IMPLICIT_DEF @@ -152,7 +153,8 @@ body: | # ASM-NEXT: .cfi_offset w21, -16 # ASM-NEXT: .cfi_offset w29, -32 # ASM: .cfi_def_cfa_offset 48 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 16 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 48 + 16 * VG # # ASM: .cfi_def_cfa wsp, 48 # ASM: .cfi_def_cfa_offset 32 @@ -166,9 +168,8 @@ body: | # UNWINDINFO: DW_CFA_offset: reg20 -8 # UNWINDINFO-NEXT: DW_CFA_offset: reg21 -16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -32 -# UNWINDINFO: DW_CFA_def_cfa_offset: +48 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# +# UNWINDINFO: DW_CFA_def_cfa_offset: +48 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +48, DW_OP_bregx 0x2e +0, DW_OP_lit16, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +48 # UNWINDINFO: DW_CFA_def_cfa_offset: +32 # UNWINDINFO: DW_CFA_def_cfa_offset: +0 @@ -272,7 +273,7 @@ body: | # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 32 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 16 # CHECK-NEXT: STR_ZXI $z0, killed $[[TMP]], 2 @@ -295,7 +296,8 @@ body: | # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 # ASM: .cfi_def_cfa_offset 32 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 24 * VG # # ASM: .cfi_def_cfa wsp, 32 # ASM: .cfi_def_cfa_offset 16 @@ -305,7 +307,7 @@ body: | # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 # UNWINDINFO: DW_CFA_def_cfa_offset: +32 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus # # UNWINDINFO: DW_CFA_def_cfa: reg31 +32 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 @@ -434,7 +436,7 @@ body: | # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 32 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $[[TMP:x[0-9]+]] = ADDVL_XXI $sp, 1 # CHECK-NEXT: $x0 = LDRXui killed $[[TMP]], 4 @@ -451,7 +453,8 @@ body: | # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 # ASM: .cfi_def_cfa_offset 32 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 8 * VG # # ASM: .cfi_def_cfa wsp, 32 # ASM: .cfi_def_cfa_offset 16 @@ -461,7 +464,7 @@ body: | # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 # UNWINDINFO: DW_CFA_def_cfa_offset: +32 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus # # UNWINDINFO: DW_CFA_def_cfa: reg31 +32 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 @@ -504,23 +507,23 @@ body: | # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 # CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $[[TMP2:x[0-9]+]] = ADDVL_XXI $sp, 1 # CHECK-NEXT: STR_ZXI $z0, killed $[[TMP2]], 255 @@ -529,21 +532,21 @@ body: | # CHECK-NEXT: STR_PXI $p0, killed $[[TMP2]], 255 # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 9 # CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 # CHECK-NEXT: $sp, $[[SCRATCH]] = frame-destroy LDRXpost $sp, 16 @@ -554,48 +557,65 @@ body: | # ASM-LABEL: test_address_sve_out_of_range: # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 256 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 512 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 768 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1024 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1280 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1536 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1792 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2048 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2056 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 256 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 512 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 768 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1024 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1280 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1536 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1792 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 2048 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 2056 * VG # -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1808 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1560 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1312 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1064 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 816 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 568 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 320 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1808 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1560 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1312 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 1064 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 816 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 568 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 320 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 72 * VG # ASM: .cfi_def_cfa wsp, 16 # ASM: .cfi_def_cfa_offset 0 # ASM-NEXT: .cfi_restore w29 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +256, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +512, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +768, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1024, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1280, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1536, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1792, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +2048, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +2056, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +256, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +512, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +768, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1024, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1280, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1536, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1792, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +2048, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +2056, DW_OP_mul, DW_OP_plus # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1808, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1560, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1312, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1064, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +816, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +568, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +320, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +72, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1808, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1560, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1312, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1064, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +816, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +568, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +320, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +72, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +16 # UNWINDINFO: DW_CFA_def_cfa_offset: +0 # UNWINDINFO-NEXT: DW_CFA_restore: reg29 @@ -702,15 +722,15 @@ body: | # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 # CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 # CHECK: $sp = frame-setup ADDVL_XXI $sp, -1 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: frame-setup STR_PXI killed $p6, $sp, 5 # CHECK: frame-setup STR_PXI killed $p5, $sp, 6 # CHECK: frame-setup STR_PXI killed $p4, $sp, 7 # CHECK: $sp = frame-setup SUBXri $sp, 32, 0 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK: $p6 = frame-destroy LDR_PXI $sp, 5 # CHECK: $p5 = frame-destroy LDR_PXI $sp, 6 # CHECK: $p4 = frame-destroy LDR_PXI $sp, 7 @@ -725,20 +745,23 @@ body: | # ASM-LABEL: save_restore_pregs_sve: # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 8 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 8 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 48 + 8 * VG # -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 8 * VG # ASM: .cfi_def_cfa wsp, 16 # ASM: .cfi_def_cfa_offset 0 # ASM-NEXT: .cfi_restore w29 # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +48, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +16 # UNWINDINFO: DW_CFA_def_cfa_offset: +0 # UNWINDINFO-NEXT: DW_CFA_restore: reg29 @@ -761,18 +784,18 @@ body: | # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 # CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: frame-setup STR_ZXI killed $z10, $sp, 0 # CHECK-NEXT: frame-setup STR_ZXI killed $z9, $sp, 1 # CHECK-NEXT: frame-setup STR_ZXI killed $z8, $sp, 2 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 32, 0 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0 # CHECK-NEXT: $z9 = frame-destroy LDR_ZXI $sp, 1 # CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2 @@ -789,13 +812,19 @@ body: | # ASM-LABEL: save_restore_zregs_sve: # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG -# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // $d8 @ cfa - 8 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d9 @ cfa - 16 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d10 @ cfa - 24 * VG - 16 +# ASM: .cfi_escape +# ASM-SAME: // sp + 48 + 24 * VG # -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 24 * VG # ASM: .cfi_def_cfa wsp, 16 # ASM-NEXT: .cfi_restore z8 # ASM-NEXT: .cfi_restore z9 @@ -805,13 +834,13 @@ body: | # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +48, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +16 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105 @@ -848,7 +877,7 @@ body: | # CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -32 # CHECK: $sp = frame-setup ADDVL_XXI $sp, -18 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: frame-setup STR_PXI killed $p15, $sp, 4 # CHECK: frame-setup STR_PXI killed $p14, $sp, 5 # CHECK: frame-setup STR_PXI killed $p5, $sp, 14 @@ -857,23 +886,23 @@ body: | # CHECK: frame-setup STR_ZXI killed $z22, $sp, 3 # CHECK: frame-setup STR_ZXI killed $z9, $sp, 16 # CHECK: frame-setup STR_ZXI killed $z8, $sp, 17 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-setup SUBXri $sp, 32, 0 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-setup ADDVL_XXI $sp, -1 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 1 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK: $z23 = frame-destroy LDR_ZXI $sp, 2 # CHECK: $z22 = frame-destroy LDR_ZXI $sp, 3 # CHECK: $z9 = frame-destroy LDR_ZXI $sp, 16 @@ -909,20 +938,33 @@ body: | # ASM-NEXT: .cfi_offset w20, -16 # ASM-NEXT: .cfi_offset w21, -24 # ASM-NEXT: .cfi_offset w29, -32 -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG -# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 32 - 32 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 32 - 40 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG -# ASM: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 144 * VG -# ASM: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 152 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 144 * VG +# ASM: .cfi_escape +# ASM-SAME: // $d8 @ cfa - 8 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d9 @ cfa - 16 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d10 @ cfa - 24 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d11 @ cfa - 32 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d12 @ cfa - 40 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d13 @ cfa - 48 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d14 @ cfa - 56 * VG - 32 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d15 @ cfa - 64 * VG - 32 +# ASM: .cfi_escape +# ASM-SAME: // sp + 64 + 144 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 64 + 152 * VG # -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 152 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 152 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 32 + 144 * VG # ASM: .cfi_def_cfa wsp, 32 # ASM-NEXT: .cfi_restore z8 # ASM-NEXT: .cfi_restore z9 @@ -943,20 +985,20 @@ body: | # UNWINDINFO-NEXT: DW_CFA_offset: reg20 -16 # UNWINDINFO-NEXT: DW_CFA_offset: reg21 -24 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -32 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +144, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -32, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -40, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -48, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -56, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -64, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +64, DW_OP_plus, DW_OP_consts +144, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +64, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_consts +144, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_bregx 0x2e +0, DW_OP_consts -32, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_bregx 0x2e +0, DW_OP_consts -40, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_bregx 0x2e +0, DW_OP_consts -48, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_bregx 0x2e +0, DW_OP_consts -56, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_bregx 0x2e +0, DW_OP_consts -64, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +64, DW_OP_bregx 0x2e +0, DW_OP_consts +144, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +64, DW_OP_bregx 0x2e +0, DW_OP_consts +152, DW_OP_mul, DW_OP_plus # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +144, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_consts +152, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_consts +144, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +32 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105 @@ -1025,14 +1067,14 @@ body: | # CHECK-NEXT: STR_ZXI killed $z22, $sp, 3 # CHECK: STR_ZXI killed $z9, $sp, 16 # CHECK-NEXT: STR_ZXI killed $z8, $sp, 17 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $[[TMP:x[0-9]+]] = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: $[[TMP]] = frame-setup ADDVL_XXI $[[TMP]], -1 # CHECK-NEXT: $sp = frame-setup ANDXri killed $[[TMP]] @@ -1067,14 +1109,22 @@ body: | # ASM: .cfi_def_cfa w29, 16 # ASM-NEXT: .cfi_offset w30, -8 # ASM-NEXT: .cfi_offset w29, -16 -# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +# ASM: .cfi_escape +# ASM-SAME: // $d8 @ cfa - 8 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d9 @ cfa - 16 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d10 @ cfa - 24 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d11 @ cfa - 32 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d12 @ cfa - 40 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d13 @ cfa - 48 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d14 @ cfa - 56 * VG - 16 +# ASM-NEXT: .cfi_escape +# ASM-SAME: // $d15 @ cfa - 64 * VG - 16 # # ASM: .cfi_restore z8 # ASM-NEXT: .cfi_restore z9 @@ -1093,14 +1143,14 @@ body: | # UNWINDINFO: DW_CFA_def_cfa: reg29 +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -32, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -40, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -48, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -56, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -64, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_bregx 0x2e +0, DW_OP_consts -32, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_bregx 0x2e +0, DW_OP_consts -40, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_bregx 0x2e +0, DW_OP_consts -48, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_bregx 0x2e +0, DW_OP_consts -56, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_bregx 0x2e +0, DW_OP_consts -64, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus # # UNWINDINFO: DW_CFA_restore_extended: reg104 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105 @@ -1188,17 +1238,17 @@ body: | # CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 # CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: STR_PXI killed $p15, $sp, 6 # CHECK-NEXT: STR_PXI killed $p4, $sp, 7 # CHECK-NEXT: STR_ZXI killed $z23, $sp, 1 # CHECK-NEXT: STR_ZXI killed $z8, $sp, 2 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -7 -# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 7 -# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape # CHECK-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 1 # CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2 # CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 6 @@ -1214,11 +1264,15 @@ body: | # ASM-LABEL: frame_layout: # ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG -# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 80 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // $d8 @ cfa - 8 * VG - 16 +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 80 * VG # -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +# ASM: .cfi_escape +# ASM-SAME: // sp + 16 + 24 * VG # ASM: .cfi_def_cfa wsp, 16 # ASM-NEXT: .cfi_restore z8 # ASM: .cfi_def_cfa_offset 0 @@ -1226,11 +1280,11 @@ body: | # UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +80, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +80, DW_OP_mul, DW_OP_plus # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa: reg31 +16 # UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104 # UNWINDINFO: DW_CFA_def_cfa_offset: +0 diff --git a/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll b/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll index 2cf8621..474a9d1 100644 --- a/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll +++ b/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll @@ -36,7 +36,7 @@ define <vscale x 16 x i1> @match_nxv16i8_v4i8(<vscale x 16 x i8> %op1, <4 x i8> ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 ; CHECK-NEXT: umov w8, v1.h[1] @@ -241,7 +241,7 @@ define <vscale x 16 x i1> @match_nxv16i8_v32i8(<vscale x 16 x i8> %op1, <32 x i8 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 ; CHECK-NEXT: mov z3.b, z1.b[1] @@ -463,7 +463,7 @@ define <vscale x 4 x i1> @match_nxv4xi32_v4i32(<vscale x 4 x i32> %op1, <4 x i32 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 ; CHECK-NEXT: mov z2.s, z1.s[1] diff --git a/llvm/test/CodeGen/AArch64/luti-with-sme2.ll b/llvm/test/CodeGen/AArch64/luti-with-sme2.ll index 2d30167..59e1cba 100644 --- a/llvm/test/CodeGen/AArch64/luti-with-sme2.ll +++ b/llvm/test/CodeGen/AArch64/luti-with-sme2.ll @@ -9,10 +9,10 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16> } @test_luti4_lane_i16_x2_tuple( ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] @@ -50,10 +50,10 @@ define { <vscale x 8 x half>, <vscale x 8 x half> } @test_luti4_lane_f16_x2_tupl ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] @@ -91,10 +91,10 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @test_luti4_lane_bf16_x2 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] diff --git a/llvm/test/CodeGen/AArch64/midpoint-int.ll b/llvm/test/CodeGen/AArch64/midpoint-int.ll index 15c1dff..79bba53 100644 --- a/llvm/test/CodeGen/AArch64/midpoint-int.ll +++ b/llvm/test/CodeGen/AArch64/midpoint-int.ll @@ -255,12 +255,11 @@ define i64 @scalar_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { define i16 @scalar_i16_signed_reg_reg(i16 %a1, i16 %a2) nounwind { ; CHECK-LABEL: scalar_i16_signed_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sxth w9, w1 -; CHECK-NEXT: sxth w10, w0 +; CHECK-NEXT: sxth w9, w0 ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w9, w9, w1, sxth ; CHECK-NEXT: cneg w8, w8, le +; CHECK-NEXT: cneg w9, w9, mi ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: madd w0, w9, w8, w0 ; CHECK-NEXT: ret @@ -278,12 +277,11 @@ define i16 @scalar_i16_signed_reg_reg(i16 %a1, i16 %a2) nounwind { define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind { ; CHECK-LABEL: scalar_i16_unsigned_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: and w9, w1, #0xffff -; CHECK-NEXT: and w10, w0, #0xffff +; CHECK-NEXT: and w9, w0, #0xffff ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w9, w9, w1, uxth ; CHECK-NEXT: cneg w8, w8, ls +; CHECK-NEXT: cneg w9, w9, mi ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: madd w0, w9, w8, w0 ; CHECK-NEXT: ret @@ -303,14 +301,13 @@ define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind { define i16 @scalar_i16_signed_mem_reg(ptr %a1_addr, i16 %a2) nounwind { ; CHECK-LABEL: scalar_i16_signed_mem_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sxth w9, w1 -; CHECK-NEXT: ldrsh w10, [x0] +; CHECK-NEXT: ldrsh w9, [x0] ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w10, w9, w1, sxth ; CHECK-NEXT: cneg w8, w8, le -; CHECK-NEXT: lsr w9, w9, #1 -; CHECK-NEXT: madd w0, w9, w8, w10 +; CHECK-NEXT: cneg w10, w10, mi +; CHECK-NEXT: lsr w10, w10, #1 +; CHECK-NEXT: madd w0, w10, w8, w9 ; CHECK-NEXT: ret %a1 = load i16, ptr %a1_addr %t3 = icmp sgt i16 %a1, %a2 ; signed @@ -382,12 +379,11 @@ define i16 @scalar_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { define i8 @scalar_i8_signed_reg_reg(i8 %a1, i8 %a2) nounwind { ; CHECK-LABEL: scalar_i8_signed_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sxtb w9, w1 -; CHECK-NEXT: sxtb w10, w0 +; CHECK-NEXT: sxtb w9, w0 ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w9, w9, w1, sxtb ; CHECK-NEXT: cneg w8, w8, le +; CHECK-NEXT: cneg w9, w9, mi ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: madd w0, w9, w8, w0 ; CHECK-NEXT: ret @@ -405,12 +401,11 @@ define i8 @scalar_i8_signed_reg_reg(i8 %a1, i8 %a2) nounwind { define i8 @scalar_i8_unsigned_reg_reg(i8 %a1, i8 %a2) nounwind { ; CHECK-LABEL: scalar_i8_unsigned_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: and w9, w1, #0xff -; CHECK-NEXT: and w10, w0, #0xff +; CHECK-NEXT: and w9, w0, #0xff ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w9, w9, w1, uxtb ; CHECK-NEXT: cneg w8, w8, ls +; CHECK-NEXT: cneg w9, w9, mi ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: madd w0, w9, w8, w0 ; CHECK-NEXT: ret @@ -430,14 +425,13 @@ define i8 @scalar_i8_unsigned_reg_reg(i8 %a1, i8 %a2) nounwind { define i8 @scalar_i8_signed_mem_reg(ptr %a1_addr, i8 %a2) nounwind { ; CHECK-LABEL: scalar_i8_signed_mem_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sxtb w9, w1 -; CHECK-NEXT: ldrsb w10, [x0] +; CHECK-NEXT: ldrsb w9, [x0] ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: subs w9, w10, w9 -; CHECK-NEXT: cneg w9, w9, mi +; CHECK-NEXT: subs w10, w9, w1, sxtb ; CHECK-NEXT: cneg w8, w8, le -; CHECK-NEXT: lsr w9, w9, #1 -; CHECK-NEXT: madd w0, w9, w8, w10 +; CHECK-NEXT: cneg w10, w10, mi +; CHECK-NEXT: lsr w10, w10, #1 +; CHECK-NEXT: madd w0, w10, w8, w9 ; CHECK-NEXT: ret %a1 = load i8, ptr %a1_addr %t3 = icmp sgt i8 %a1, %a2 ; signed diff --git a/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll b/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll index 7b55c69..1ceb25b 100644 --- a/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll +++ b/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll @@ -13,10 +13,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @tbl2_b_tuple(i64 %stride, ptr ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: ld1b { z3.b, z11.b }, pn8/z, [x1] ; CHECK-NEXT: ld1b { z4.b, z12.b }, pn8/z, [x1, x0] @@ -53,10 +53,10 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16> } @tbl2_h_tuple(i64 %stride, ptr ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] @@ -94,10 +94,10 @@ define { <vscale x 4 x i32>, <vscale x 4 x i32> } @tbl2_s_tuple(i64 %stride, ptr ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1w { z3.s, z11.s }, pn8/z, [x1] @@ -135,10 +135,10 @@ define { <vscale x 2 x i64>, <vscale x 2 x i64> } @tbl2_d_tuple(i64 %stride, ptr ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1d { z3.d, z11.d }, pn8/z, [x1] @@ -176,10 +176,10 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @tbl2_bf16_tuple(i64 %st ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1] @@ -217,10 +217,10 @@ define { <vscale x 4 x float>, <vscale x 4 x float> } @tbl2_f32_tuple(i64 %strid ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1w { z3.s, z11.s }, pn8/z, [x1] @@ -258,10 +258,10 @@ define { <vscale x 2 x double>, <vscale x 2 x double> } @tbl2_f64_tuple(i64 %str ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1d { z3.d, z11.d }, pn8/z, [x1] diff --git a/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll b/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll index 0853325..6fcfc5b 100644 --- a/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll +++ b/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll @@ -328,7 +328,7 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 { ; CHECK-NEXT: .cfi_offset w30, -24 ; CHECK-NEXT: .cfi_offset w29, -32 ; CHECK-NEXT: addvl sp, sp, #-18 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 32 + 144 * VG ; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill @@ -351,16 +351,16 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 { ; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 32 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 32 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d8 @ cfa - 8 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d9 @ cfa - 16 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d10 @ cfa - 24 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d11 @ cfa - 32 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d12 @ cfa - 40 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d13 @ cfa - 48 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d14 @ cfa - 56 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d15 @ cfa - 64 * VG - 32 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 152 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x11, 0x98, 0x01, 0x1e, 0x22 // sp + 32 + 152 * VG ; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP @@ -371,7 +371,7 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 { ; CHECK-NEXT: smstart sm ; CHECK-NEXT: .cfi_restore vg ; CHECK-NEXT: addvl sp, sp, #1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 32 + 144 * VG ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload @@ -448,14 +448,14 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 { ; FP-CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill ; FP-CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; FP-CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG -; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48 +; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48 ; FP-CHECK-NEXT: addvl sp, sp, #-1 ; FP-CHECK-NEXT: str z0, [x29, #-19, mul vl] // 16-byte Folded Spill ; FP-CHECK-NEXT: //APP diff --git a/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll b/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll index b0390ec..8398e07 100644 --- a/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll +++ b/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll @@ -36,7 +36,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1 ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: lsl x8, x0, #1 ; CHECK-NEXT: add x9, x1, x0 @@ -129,10 +129,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @bfcvt_tuple(i64 %stride, ptr ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1h { z2.h, z10.h }, pn8/z, [x1] diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll index b4a83c1..58d2e25 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll @@ -58,7 +58,7 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: lsl x8, x0, #1 ; CHECK-NEXT: add x9, x1, x0 diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll index 0bc9e15..3bb516d 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll @@ -24,10 +24,10 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16> } @multi_vector_sat_shift_narrow ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue pn8.b ; CHECK-NEXT: add x8, x1, x0 ; CHECK-NEXT: ld1w { z2.s, z10.s }, pn8/z, [x1] @@ -98,7 +98,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1 ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: lsl x8, x0, #1 ; CHECK-NEXT: add x9, x1, x0 diff --git a/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir b/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir index 1d04cc6..c3338b1 100644 --- a/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir +++ b/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir @@ -17,7 +17,7 @@ body: | ; CHECK-NEXT: stp d9, d8, [sp, #16] ; CHECK-NEXT: str x29, [sp, #32] ; CHECK-NEXT: addvl sp, sp, #-2 - ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 16 * VG + ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 48 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: .cfi_offset b8, -24 ; CHECK-NEXT: .cfi_offset b9, -32 @@ -97,7 +97,7 @@ body: | ; CHECK: str x29, [sp, #-16]! ; CHECK-NEXT: addvl sp, sp, #-2 - ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG + ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: lsl x9, x1, #1 ; CHECK-NEXT: ptrue pn8.b diff --git a/llvm/test/CodeGen/AArch64/split-vector-insert.ll b/llvm/test/CodeGen/AArch64/split-vector-insert.ll index 555e38a..109059e 100644 --- a/llvm/test/CodeGen/AArch64/split-vector-insert.ll +++ b/llvm/test/CodeGen/AArch64/split-vector-insert.ll @@ -16,7 +16,7 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> % ; CHECK-LEGALIZATION-NEXT: .cfi_def_cfa_offset 16 ; CHECK-LEGALIZATION-NEXT: .cfi_offset w29, -16 ; CHECK-LEGALIZATION-NEXT: addvl sp, sp, #-3 -; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-LEGALIZATION-NEXT: cntd x8 ; CHECK-LEGALIZATION-NEXT: ptrue p0.d, vl2 ; CHECK-LEGALIZATION-NEXT: mov w9, #2 // =0x2 @@ -59,7 +59,7 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> % ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: cntd x8 ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: mov w9, #2 // =0x2 @@ -111,7 +111,7 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x ; CHECK-LEGALIZATION-NEXT: .cfi_def_cfa_offset 16 ; CHECK-LEGALIZATION-NEXT: .cfi_offset w29, -16 ; CHECK-LEGALIZATION-NEXT: addvl sp, sp, #-3 -; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-LEGALIZATION-NEXT: cntd x8 ; CHECK-LEGALIZATION-NEXT: ptrue p0.d, vl2 ; CHECK-LEGALIZATION-NEXT: mov w9, #2 // =0x2 @@ -154,7 +154,7 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: cntd x8 ; CHECK-NEXT: ptrue p0.d, vl2 ; CHECK-NEXT: mov w9, #2 // =0x2 diff --git a/llvm/test/CodeGen/AArch64/stack-hazard.ll b/llvm/test/CodeGen/AArch64/stack-hazard.ll index 3a33405..4615b1a 100644 --- a/llvm/test/CodeGen/AArch64/stack-hazard.ll +++ b/llvm/test/CodeGen/AArch64/stack-hazard.ll @@ -388,7 +388,7 @@ define i32 @csr_d8_allocnxv4i32(i64 %d) "aarch64_pstate_sm_compatible" { ; CHECK0-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill ; CHECK0-NEXT: str x29, [sp, #8] // 8-byte Folded Spill ; CHECK0-NEXT: addvl sp, sp, #-1 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK0-NEXT: .cfi_offset w29, -8 ; CHECK0-NEXT: .cfi_offset b8, -16 ; CHECK0-NEXT: mov z0.s, #0 // =0x0 @@ -407,7 +407,7 @@ define i32 @csr_d8_allocnxv4i32(i64 %d) "aarch64_pstate_sm_compatible" { ; CHECK64-NEXT: str x29, [sp, #72] // 8-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #64 ; CHECK64-NEXT: addvl sp, sp, #-1 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 8 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 144 + 8 * VG ; CHECK64-NEXT: .cfi_offset w29, -8 ; CHECK64-NEXT: .cfi_offset b8, -80 ; CHECK64-NEXT: mov z0.s, #0 // =0x0 @@ -429,7 +429,7 @@ define i32 @csr_d8_allocnxv4i32(i64 %d) "aarch64_pstate_sm_compatible" { ; CHECK1024-NEXT: str x29, [sp, #1032] // 8-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1024 ; CHECK1024-NEXT: addvl sp, sp, #-1 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2064 + 8 * VG ; CHECK1024-NEXT: .cfi_offset w29, -8 ; CHECK1024-NEXT: .cfi_offset b8, -1040 ; CHECK1024-NEXT: mov z0.s, #0 // =0x0 @@ -955,9 +955,9 @@ define i32 @svecc_csr_d8(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK0-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK0-NEXT: addvl sp, sp, #-1 ; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK0-NEXT: .cfi_offset w29, -16 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP ; CHECK0-NEXT: mov w0, wzr @@ -973,9 +973,9 @@ define i32 @svecc_csr_d8(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK64-NEXT: addvl sp, sp, #-1 ; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #64 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 8 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 144 + 8 * VG ; CHECK64-NEXT: .cfi_offset w29, -16 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80 ; CHECK64-NEXT: mov w0, wzr ; CHECK64-NEXT: //APP ; CHECK64-NEXT: //NO_APP @@ -993,9 +993,9 @@ define i32 @svecc_csr_d8(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK1024-NEXT: addvl sp, sp, #-1 ; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1024 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2064 + 8 * VG ; CHECK1024-NEXT: .cfi_offset w29, -16 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040 ; CHECK1024-NEXT: mov w0, wzr ; CHECK1024-NEXT: //APP ; CHECK1024-NEXT: //NO_APP @@ -1017,10 +1017,10 @@ define i32 @svecc_csr_d8d9(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_ps ; CHECK0-NEXT: addvl sp, sp, #-2 ; CHECK0-NEXT: str z9, [sp] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK0-NEXT: .cfi_offset w29, -16 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP ; CHECK0-NEXT: mov w0, wzr @@ -1038,10 +1038,10 @@ define i32 @svecc_csr_d8d9(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_ps ; CHECK64-NEXT: str z9, [sp] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #64 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 16 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 144 + 16 * VG ; CHECK64-NEXT: .cfi_offset w29, -16 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 80 - 16 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 80 ; CHECK64-NEXT: mov w0, wzr ; CHECK64-NEXT: //APP ; CHECK64-NEXT: //NO_APP @@ -1061,10 +1061,10 @@ define i32 @svecc_csr_d8d9(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_ps ; CHECK1024-NEXT: str z9, [sp] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1024 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 16 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2064 + 16 * VG ; CHECK1024-NEXT: .cfi_offset w29, -16 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1040 - 16 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1040 ; CHECK1024-NEXT: mov w0, wzr ; CHECK1024-NEXT: //APP ; CHECK1024-NEXT: //NO_APP @@ -1086,9 +1086,9 @@ define i32 @svecc_csr_d8_allocd(double %d, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK0-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK0-NEXT: addvl sp, sp, #-1 ; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK0-NEXT: .cfi_offset w29, -16 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP ; CHECK0-NEXT: addvl x8, sp, #1 @@ -1106,9 +1106,9 @@ define i32 @svecc_csr_d8_allocd(double %d, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK64-NEXT: addvl sp, sp, #-1 ; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #80 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 160 + 8 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 160 + 8 * VG ; CHECK64-NEXT: .cfi_offset w29, -16 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80 ; CHECK64-NEXT: mov w0, wzr ; CHECK64-NEXT: //APP ; CHECK64-NEXT: //NO_APP @@ -1127,9 +1127,9 @@ define i32 @svecc_csr_d8_allocd(double %d, <vscale x 4 x i32> %vs) "aarch64_psta ; CHECK1024-NEXT: addvl sp, sp, #-1 ; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1040 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2080 + 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG ; CHECK1024-NEXT: .cfi_offset w29, -16 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040 ; CHECK1024-NEXT: mov w0, wzr ; CHECK1024-NEXT: //APP ; CHECK1024-NEXT: //NO_APP @@ -1153,9 +1153,9 @@ define i32 @svecc_csr_d8_alloci64(i64 %d, <vscale x 4 x i32> %vs) "aarch64_pstat ; CHECK0-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK0-NEXT: addvl sp, sp, #-1 ; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK0-NEXT: .cfi_offset w29, -16 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP ; CHECK0-NEXT: mov x8, x0 @@ -1174,9 +1174,9 @@ define i32 @svecc_csr_d8_alloci64(i64 %d, <vscale x 4 x i32> %vs) "aarch64_pstat ; CHECK64-NEXT: addvl sp, sp, #-1 ; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #80 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 160 + 8 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 160 + 8 * VG ; CHECK64-NEXT: .cfi_offset w29, -16 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80 ; CHECK64-NEXT: mov x8, x0 ; CHECK64-NEXT: mov w0, wzr ; CHECK64-NEXT: //APP @@ -1196,9 +1196,9 @@ define i32 @svecc_csr_d8_alloci64(i64 %d, <vscale x 4 x i32> %vs) "aarch64_pstat ; CHECK1024-NEXT: addvl sp, sp, #-1 ; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1040 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2080 + 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG ; CHECK1024-NEXT: .cfi_offset w29, -16 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040 ; CHECK1024-NEXT: mov x8, x0 ; CHECK1024-NEXT: mov w0, wzr ; CHECK1024-NEXT: //APP @@ -1224,9 +1224,9 @@ define i32 @svecc_csr_d8_allocnxv4i32(i64 %d, <vscale x 4 x i32> %vs) "aarch64_p ; CHECK0-NEXT: addvl sp, sp, #-1 ; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK0-NEXT: addvl sp, sp, #-1 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK0-NEXT: .cfi_offset w29, -16 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK0-NEXT: mov z0.s, #0 // =0x0 ; CHECK0-NEXT: mov w0, wzr ; CHECK0-NEXT: //APP @@ -1246,9 +1246,9 @@ define i32 @svecc_csr_d8_allocnxv4i32(i64 %d, <vscale x 4 x i32> %vs) "aarch64_p ; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #64 ; CHECK64-NEXT: addvl sp, sp, #-1 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 16 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 144 + 16 * VG ; CHECK64-NEXT: .cfi_offset w29, -16 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80 ; CHECK64-NEXT: mov z0.s, #0 // =0x0 ; CHECK64-NEXT: add x8, sp, #64 ; CHECK64-NEXT: mov w0, wzr @@ -1271,9 +1271,9 @@ define i32 @svecc_csr_d8_allocnxv4i32(i64 %d, <vscale x 4 x i32> %vs) "aarch64_p ; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1024 ; CHECK1024-NEXT: addvl sp, sp, #-1 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 16 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2064 + 16 * VG ; CHECK1024-NEXT: .cfi_offset w29, -16 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040 ; CHECK1024-NEXT: mov z0.s, #0 // =0x0 ; CHECK1024-NEXT: add x8, sp, #1024 ; CHECK1024-NEXT: mov w0, wzr @@ -1311,7 +1311,7 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK0-NEXT: str z9, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: sub sp, sp, #16 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 80 + 64 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 80 + 64 * VG ; CHECK0-NEXT: .cfi_offset w19, -8 ; CHECK0-NEXT: .cfi_offset w20, -16 ; CHECK0-NEXT: .cfi_offset w21, -24 @@ -1320,14 +1320,14 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK0-NEXT: .cfi_offset w24, -48 ; CHECK0-NEXT: .cfi_offset w25, -56 ; CHECK0-NEXT: .cfi_offset w29, -64 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64 ; CHECK0-NEXT: mov x8, x0 ; CHECK0-NEXT: mov w0, wzr ; CHECK0-NEXT: //APP @@ -1368,7 +1368,7 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK64-NEXT: str z9, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: sub sp, sp, #96 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x01, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 224 + 64 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x01, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 224 + 64 * VG ; CHECK64-NEXT: .cfi_offset w19, -8 ; CHECK64-NEXT: .cfi_offset w20, -16 ; CHECK64-NEXT: .cfi_offset w21, -24 @@ -1377,14 +1377,14 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK64-NEXT: .cfi_offset w24, -48 ; CHECK64-NEXT: .cfi_offset w25, -56 ; CHECK64-NEXT: .cfi_offset w29, -64 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128 ; CHECK64-NEXT: mov x8, x0 ; CHECK64-NEXT: mov w0, wzr ; CHECK64-NEXT: //APP @@ -1431,7 +1431,7 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK1024-NEXT: str z9, [sp, #6, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #7, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: sub sp, sp, #1056 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2144 + 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 2144 + 64 * VG ; CHECK1024-NEXT: .cfi_offset w19, -8 ; CHECK1024-NEXT: .cfi_offset w20, -16 ; CHECK1024-NEXT: .cfi_offset w21, -24 @@ -1440,14 +1440,14 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3 ; CHECK1024-NEXT: .cfi_offset w24, -48 ; CHECK1024-NEXT: .cfi_offset w25, -56 ; CHECK1024-NEXT: .cfi_offset w29, -64 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088 ; CHECK1024-NEXT: mov x8, x0 ; CHECK1024-NEXT: mov w0, wzr ; CHECK1024-NEXT: //APP @@ -1869,7 +1869,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK0-NEXT: .cfi_offset w30, -40 ; CHECK0-NEXT: .cfi_offset w29, -48 ; CHECK0-NEXT: addvl sp, sp, #-18 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG ; CHECK0-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK0-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK0-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -1898,14 +1898,14 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48 ; CHECK0-NEXT: mov x8, x0 ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP @@ -1990,7 +1990,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: .cfi_offset w30, -40 ; CHECK64-NEXT: .cfi_offset w29, -48 ; CHECK64-NEXT: addvl sp, sp, #-18 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG ; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2019,16 +2019,16 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 112 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 112 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 112 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 112 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 112 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 112 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 112 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 112 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 112 ; CHECK64-NEXT: sub sp, sp, #64 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x01, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 176 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x01, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 176 + 144 * VG ; CHECK64-NEXT: mov x8, x0 ; CHECK64-NEXT: //APP ; CHECK64-NEXT: //NO_APP @@ -2051,7 +2051,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: movk w0, #59491, lsl #16 ; CHECK64-NEXT: .cfi_restore vg ; CHECK64-NEXT: add sp, sp, #64 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG ; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -2119,7 +2119,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK1024-NEXT: .cfi_offset w30, -40 ; CHECK1024-NEXT: .cfi_offset w29, -48 ; CHECK1024-NEXT: addvl sp, sp, #-18 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG ; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2148,16 +2148,16 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1072 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1072 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1072 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1072 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1072 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1072 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1072 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1072 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1072 ; CHECK1024-NEXT: sub sp, sp, #1024 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2096 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 2096 + 144 * VG ; CHECK1024-NEXT: mov x8, x0 ; CHECK1024-NEXT: //APP ; CHECK1024-NEXT: //NO_APP @@ -2180,7 +2180,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK1024-NEXT: movk w0, #59491, lsl #16 ; CHECK1024-NEXT: .cfi_restore vg ; CHECK1024-NEXT: add sp, sp, #1024 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG ; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -2252,7 +2252,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK0-NEXT: .cfi_offset w30, -40 ; CHECK0-NEXT: .cfi_offset w29, -48 ; CHECK0-NEXT: addvl sp, sp, #-18 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG ; CHECK0-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK0-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK0-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2281,16 +2281,16 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48 ; CHECK0-NEXT: sub sp, sp, #48 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 96 + 144 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 96 + 144 * VG ; CHECK0-NEXT: //APP ; CHECK0-NEXT: //NO_APP ; CHECK0-NEXT: bl __arm_sme_state @@ -2312,7 +2312,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK0-NEXT: movk w0, #59491, lsl #16 ; CHECK0-NEXT: .cfi_restore vg ; CHECK0-NEXT: add sp, sp, #48 -; CHECK0-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG +; CHECK0-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG ; CHECK0-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK0-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK0-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -2376,7 +2376,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: .cfi_offset w30, -40 ; CHECK64-NEXT: .cfi_offset w29, -48 ; CHECK64-NEXT: addvl sp, sp, #-18 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG ; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2405,16 +2405,16 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 112 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 112 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 112 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 112 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 112 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 112 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 112 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 112 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 112 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 112 ; CHECK64-NEXT: sub sp, sp, #112 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x01, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 224 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x01, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 224 + 144 * VG ; CHECK64-NEXT: //APP ; CHECK64-NEXT: //NO_APP ; CHECK64-NEXT: bl __arm_sme_state @@ -2436,7 +2436,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: movk w0, #59491, lsl #16 ; CHECK64-NEXT: .cfi_restore vg ; CHECK64-NEXT: add sp, sp, #112 -; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG +; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG ; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -2504,7 +2504,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK1024-NEXT: .cfi_offset w30, -40 ; CHECK1024-NEXT: .cfi_offset w29, -48 ; CHECK1024-NEXT: addvl sp, sp, #-18 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG ; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2533,16 +2533,16 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1072 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1072 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1072 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1072 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1072 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1072 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1072 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1072 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1072 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1072 ; CHECK1024-NEXT: sub sp, sp, #1072 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2144 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 2144 + 144 * VG ; CHECK1024-NEXT: //APP ; CHECK1024-NEXT: //NO_APP ; CHECK1024-NEXT: bl __arm_sme_state @@ -2564,7 +2564,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK1024-NEXT: movk w0, #59491, lsl #16 ; CHECK1024-NEXT: .cfi_restore vg ; CHECK1024-NEXT: add sp, sp, #1072 -; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG +; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG ; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -3192,14 +3192,14 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x ; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64 ; CHECK0-NEXT: mov w9, w0 ; CHECK0-NEXT: mov x8, sp ; CHECK0-NEXT: mov w2, w1 @@ -3327,14 +3327,14 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x ; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128 ; CHECK64-NEXT: sub sp, sp, #64 ; CHECK64-NEXT: mov w9, w0 ; CHECK64-NEXT: mov x8, sp @@ -3469,14 +3469,14 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x ; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088 ; CHECK1024-NEXT: sub sp, sp, #1024 ; CHECK1024-NEXT: mov w9, w0 ; CHECK1024-NEXT: mov x8, sp @@ -3616,14 +3616,14 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i ; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64 ; CHECK0-NEXT: sub x9, sp, #1024 ; CHECK0-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK0-NEXT: mov w2, w1 @@ -3743,14 +3743,14 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i ; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128 ; CHECK64-NEXT: sub x9, sp, #1088 ; CHECK64-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK64-NEXT: mov w2, w1 @@ -3875,14 +3875,14 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i ; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088 ; CHECK1024-NEXT: sub x9, sp, #2048 ; CHECK1024-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK1024-NEXT: mov w2, w1 @@ -4016,14 +4016,14 @@ define i32 @svecc_call_dynamic_and_scalable_alloca(<4 x i16> %P0, i32 %P1, i32 % ; CHECK0-NEXT: .cfi_offset w28, -48 ; CHECK0-NEXT: .cfi_offset w30, -56 ; CHECK0-NEXT: .cfi_offset w29, -64 -; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG -; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG +; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64 +; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64 ; CHECK0-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK0-NEXT: ubfiz x8, x0, #2, #32 ; CHECK0-NEXT: mov x9, sp @@ -4125,14 +4125,14 @@ define i32 @svecc_call_dynamic_and_scalable_alloca(<4 x i16> %P0, i32 %P1, i32 % ; CHECK64-NEXT: .cfi_offset w28, -48 ; CHECK64-NEXT: .cfi_offset w30, -56 ; CHECK64-NEXT: .cfi_offset w29, -64 -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128 ; CHECK64-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK64-NEXT: ubfiz x8, x0, #2, #32 ; CHECK64-NEXT: mov x9, sp @@ -4240,14 +4240,14 @@ define i32 @svecc_call_dynamic_and_scalable_alloca(<4 x i16> %P0, i32 %P1, i32 % ; CHECK1024-NEXT: .cfi_offset w28, -48 ; CHECK1024-NEXT: .cfi_offset w30, -56 ; CHECK1024-NEXT: .cfi_offset w29, -64 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG +; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088 +; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088 ; CHECK1024-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK1024-NEXT: ubfiz x8, x0, #2, #32 ; CHECK1024-NEXT: mov x9, sp diff --git a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll index 56d865e..59b95be 100644 --- a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll +++ b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll @@ -18,7 +18,7 @@ define void @sve_1_vector(ptr %out) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -38,7 +38,7 @@ define void @sve_4_vector(ptr %out) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: addvl sp, sp, #4 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -63,7 +63,7 @@ define void @sve_16_vector(ptr %out) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-16 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 128 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x01, 0x1e, 0x22 // sp + 16 + 128 * VG ; CHECK-NEXT: str xzr, [sp] ; CHECK-NEXT: addvl sp, sp, #16 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 @@ -103,7 +103,7 @@ define void @sve_17_vector(ptr %out) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl x9, sp, #-17 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 136 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x01, 0x1e, 0x22 // $x9 + 16 + 136 * VG ; CHECK-NEXT: .LBB3_1: // %entry ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096 @@ -155,9 +155,9 @@ define void @sve_1v_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload @@ -180,15 +180,15 @@ define void @sve_4v_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: str z11, [sp] // 16-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: ldr z11, [sp] // 16-byte Folded Reload @@ -217,7 +217,7 @@ define void @sve_16v_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-16 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 128 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x01, 0x1e, 0x22 // sp + 16 + 128 * VG ; CHECK-NEXT: str xzr, [sp] ; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill ; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill @@ -235,14 +235,14 @@ define void @sve_16v_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload @@ -287,7 +287,7 @@ define void @sve_1p_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP @@ -310,7 +310,7 @@ define void @sve_4p_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str p11, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p10, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p9, [sp, #6, mul vl] // 2-byte Folded Spill @@ -339,7 +339,7 @@ define void @sve_16v_1p_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl x9, sp, #-17 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 136 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x01, 0x1e, 0x22 // $x9 + 16 + 136 * VG ; CHECK-NEXT: .LBB9_1: // %entry ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096 @@ -370,14 +370,14 @@ define void @sve_16v_1p_csr(<vscale x 4 x float> %a) #0 { ; CHECK-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload @@ -426,7 +426,7 @@ define void @sve_1_vector_16_arr(ptr %out) #0 { ; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: .cfi_def_cfa wsp, 32 ; CHECK-NEXT: add sp, sp, #16 @@ -453,9 +453,9 @@ define void @sve_1_vector_4096_arr(ptr %out) #0 { ; CHECK-NEXT: sub x9, sp, #3, lsl #12 // =12288 ; CHECK-NEXT: .cfi_def_cfa w9, 12304 ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0f, 0x79, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 12304 + 256 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x79, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x02, 0x1e, 0x22 // $x9 + 12304 + 256 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0f, 0x79, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 12304 + 512 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x79, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x04, 0x1e, 0x22 // $x9 + 12304 + 512 * VG ; CHECK-NEXT: .LBB11_1: // %entry ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096 @@ -470,9 +470,9 @@ define void @sve_1_vector_4096_arr(ptr %out) #0 { ; CHECK-NEXT: ldr xzr, [sp] ; CHECK-NEXT: .cfi_def_cfa_register wsp ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0f, 0x8f, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x88, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 12304 + 264 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x02, 0x1e, 0x22 // sp + 12304 + 264 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 12304 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 12304 + 16 * VG ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: .cfi_def_cfa wsp, 12304 ; CHECK-NEXT: add sp, sp, #3, lsl #12 // =12288 @@ -538,38 +538,38 @@ define void @sve_1024_64k_guard(ptr %out) #0 "stack-probe-size"="65536" { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 256 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x02, 0x1e, 0x22 // sp + 16 + 256 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 512 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x04, 0x1e, 0x22 // sp + 16 + 512 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 768 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x06, 0x1e, 0x22 // sp + 16 + 768 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1024 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x08, 0x1e, 0x22 // sp + 16 + 1024 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1280 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0a, 0x1e, 0x22 // sp + 16 + 1280 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1536 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0c, 0x1e, 0x22 // sp + 16 + 1536 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1792 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0e, 0x1e, 0x22 // sp + 16 + 1792 * VG ; CHECK-NEXT: addvl sp, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2048 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x10, 0x1e, 0x22 // sp + 16 + 2048 * VG ; CHECK-NEXT: str xzr, [sp] ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1800 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x0e, 0x1e, 0x22 // sp + 16 + 1800 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1552 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x0c, 0x1e, 0x22 // sp + 16 + 1552 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1304 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x98, 0x0a, 0x1e, 0x22 // sp + 16 + 1304 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1056 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x08, 0x1e, 0x22 // sp + 16 + 1056 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 808 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x06, 0x1e, 0x22 // sp + 16 + 808 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 560 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x04, 0x1e, 0x22 // sp + 16 + 560 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 312 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x02, 0x1e, 0x22 // sp + 16 + 312 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG ; CHECK-NEXT: addvl sp, sp, #8 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -588,23 +588,23 @@ define void @sve_1028_64k_guard(ptr %out) #0 "stack-probe-size"="65536" { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl x9, sp, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 256 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x02, 0x1e, 0x22 // $x9 + 16 + 256 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 512 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x04, 0x1e, 0x22 // $x9 + 16 + 512 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 768 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x06, 0x1e, 0x22 // $x9 + 16 + 768 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1024 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x08, 0x1e, 0x22 // $x9 + 16 + 1024 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1280 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0a, 0x1e, 0x22 // $x9 + 16 + 1280 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1536 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0c, 0x1e, 0x22 // $x9 + 16 + 1536 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1792 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0e, 0x1e, 0x22 // $x9 + 16 + 1792 * VG ; CHECK-NEXT: addvl x9, x9, #-32 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 2048 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x10, 0x1e, 0x22 // $x9 + 16 + 2048 * VG ; CHECK-NEXT: addvl x9, x9, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 2056 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x10, 0x1e, 0x22 // $x9 + 16 + 2056 * VG ; CHECK-NEXT: .LBB14_1: // %entry ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: sub sp, sp, #16, lsl #12 // =65536 @@ -619,21 +619,21 @@ define void @sve_1028_64k_guard(ptr %out) #0 "stack-probe-size"="65536" { ; CHECK-NEXT: ldr xzr, [sp] ; CHECK-NEXT: .cfi_def_cfa_register wsp ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1808 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x0e, 0x1e, 0x22 // sp + 16 + 1808 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1560 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x98, 0x0c, 0x1e, 0x22 // sp + 16 + 1560 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1312 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x0a, 0x1e, 0x22 // sp + 16 + 1312 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1064 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x08, 0x1e, 0x22 // sp + 16 + 1064 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 816 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x06, 0x1e, 0x22 // sp + 16 + 816 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 568 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x04, 0x1e, 0x22 // sp + 16 + 568 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 320 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x02, 0x1e, 0x22 // sp + 16 + 320 * VG ; CHECK-NEXT: addvl sp, sp, #31 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG ; CHECK-NEXT: addvl sp, sp, #9 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -656,7 +656,7 @@ define void @sve_5_vector(ptr %out) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-5 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 40 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x28, 0x1e, 0x22 // sp + 16 + 40 * VG ; CHECK-NEXT: str xzr, [sp] ; CHECK-NEXT: addvl sp, sp, #5 ; CHECK-NEXT: .cfi_def_cfa wsp, 16 @@ -682,21 +682,21 @@ define void @sve_unprobed_area(<vscale x 4 x float> %a, i32 %n) #0 { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: str xzr, [sp] ; CHECK-NEXT: str p9, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP ; CHECK-NEXT: addvl sp, sp, #4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/stacksmash-arm64ec.ll b/llvm/test/CodeGen/AArch64/stacksmash-arm64ec.ll index 0960133..bd41101 100644 --- a/llvm/test/CodeGen/AArch64/stacksmash-arm64ec.ll +++ b/llvm/test/CodeGen/AArch64/stacksmash-arm64ec.ll @@ -1,8 +1,10 @@ -; RUN: llc -mtriple=arm64ec-unknown-windows-gnu < %s | FileCheck %s +; RUN: llc -mtriple=arm64ec-unknown-windows < %s | FileCheck -check-prefixes=CHECK,NONGNU %s +; RUN: llc -mtriple=arm64ec-unknown-windows-gnu < %s | FileCheck -check-prefixes=CHECK,GNU %s ; CHECK-LABEL: func = "#func" ; CHECK: bl "#other" -; CHECK: bl "#__stack_chk_fail" +; NONGNU: bl "#__security_check_cookie_arm64ec" +; GNU: bl "#__stack_chk_fail" define void @func() #0 { entry: %buf = alloca [10 x i8], align 1 diff --git a/llvm/test/CodeGen/AArch64/sve-alloca.ll b/llvm/test/CodeGen/AArch64/sve-alloca.ll index 2520095..8b7fa9e 100644 --- a/llvm/test/CodeGen/AArch64/sve-alloca.ll +++ b/llvm/test/CodeGen/AArch64/sve-alloca.ll @@ -46,14 +46,14 @@ define void @foo(<vscale x 4 x i64> %dst, i1 %cond) { ; CHECK-NEXT: .cfi_offset w28, -16 ; CHECK-NEXT: .cfi_offset w30, -24 ; CHECK-NEXT: .cfi_offset w29, -32 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 32 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 32 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d8 @ cfa - 8 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d9 @ cfa - 16 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d10 @ cfa - 24 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d11 @ cfa - 32 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d12 @ cfa - 40 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d13 @ cfa - 48 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d14 @ cfa - 56 * VG - 32 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d15 @ cfa - 64 * VG - 32 ; CHECK-NEXT: rdvl x9, #2 ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: add x9, x9, #15 diff --git a/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll b/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll index 30a8396..254b8e0 100644 --- a/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll +++ b/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll @@ -43,17 +43,17 @@ define void @fbyte(<vscale x 16 x i8> %v){ ; NOPAIR-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; NOPAIR-NEXT: .cfi_offset w30, -8 ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; NOPAIR-NEXT: bl my_func ; NOPAIR-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; NOPAIR-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload @@ -113,17 +113,17 @@ define void @fbyte(<vscale x 16 x i8> %v){ ; PAIR-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; PAIR-NEXT: .cfi_offset w30, -8 ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; PAIR-NEXT: bl my_func ; PAIR-NEXT: ptrue pn8.b ; PAIR-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload @@ -187,17 +187,17 @@ define void @fhalf(<vscale x 8 x half> %v) { ; NOPAIR-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; NOPAIR-NEXT: .cfi_offset w30, -8 ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; NOPAIR-NEXT: bl my_func ; NOPAIR-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; NOPAIR-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload @@ -257,17 +257,17 @@ define void @fhalf(<vscale x 8 x half> %v) { ; PAIR-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; PAIR-NEXT: .cfi_offset w30, -8 ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; PAIR-NEXT: bl my_func ; PAIR-NEXT: ptrue pn8.b ; PAIR-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload @@ -310,11 +310,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs() { ; NOPAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; NOPAIR-NEXT: //APP ; NOPAIR-NEXT: //NO_APP ; NOPAIR-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload @@ -336,11 +336,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs() { ; PAIR-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP ; PAIR-NEXT: ptrue pn8.b @@ -368,11 +368,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs2() { ; NOPAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; NOPAIR-NEXT: //APP ; NOPAIR-NEXT: //NO_APP ; NOPAIR-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload @@ -393,11 +393,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs2() { ; PAIR-NEXT: str p10, [sp, #6, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: st1b { z8.b, z9.b }, pn9, [sp, #2, mul vl] // 32-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP ; PAIR-NEXT: ptrue pn9.b @@ -421,10 +421,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_regs() { ; NOPAIR-NEXT: addvl sp, sp, #-2 ; NOPAIR-NEXT: str z9, [sp] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 ; NOPAIR-NEXT: //APP ; NOPAIR-NEXT: //NO_APP ; NOPAIR-NEXT: ldr z9, [sp] // 16-byte Folded Reload @@ -440,10 +440,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_regs() { ; PAIR-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; PAIR-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill ; PAIR-NEXT: str z8, [sp, #2, mul vl] // 16-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP ; PAIR-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload @@ -494,10 +494,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_2_z_regs_negative() { ; NOPAIR-NEXT: addvl sp, sp, #-2 ; NOPAIR-NEXT: str z10, [sp] // 16-byte Folded Spill ; NOPAIR-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; NOPAIR-NEXT: .cfi_offset w29, -16 -; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 16 * VG +; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 16 * VG - 16 ; NOPAIR-NEXT: //APP ; NOPAIR-NEXT: //NO_APP ; NOPAIR-NEXT: ldr z10, [sp] // 16-byte Folded Reload @@ -512,10 +512,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_2_z_regs_negative() { ; PAIR-NEXT: addvl sp, sp, #-2 ; PAIR-NEXT: str z10, [sp] // 16-byte Folded Spill ; PAIR-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; PAIR-NEXT: .cfi_offset w29, -16 -; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 16 * VG +; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 16 * VG - 16 ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP ; PAIR-NEXT: ldr z10, [sp] // 16-byte Folded Reload @@ -536,7 +536,7 @@ define aarch64_sve_vector_pcs void @test_clobbers_p_reg_negative() { ; NOPAIR-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; NOPAIR-NEXT: addvl sp, sp, #-1 ; NOPAIR-NEXT: str p10, [sp, #7, mul vl] // 2-byte Folded Spill -; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; NOPAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; NOPAIR-NEXT: .cfi_offset w29, -16 ; NOPAIR-NEXT: //APP ; NOPAIR-NEXT: //NO_APP @@ -550,7 +550,7 @@ define aarch64_sve_vector_pcs void @test_clobbers_p_reg_negative() { ; PAIR-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; PAIR-NEXT: addvl sp, sp, #-1 ; PAIR-NEXT: str p10, [sp, #7, mul vl] // 2-byte Folded Spill -; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; PAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; PAIR-NEXT: .cfi_offset w29, -16 ; PAIR-NEXT: //APP ; PAIR-NEXT: //NO_APP diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll index 5e4c891..9066051 100644 --- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll +++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll @@ -438,7 +438,7 @@ define void @non_sve_caller_non_sve_callee_high_range() { ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: movi d0, #0000000000000000 @@ -464,7 +464,7 @@ define void @non_sve_caller_high_range_non_sve_callee_high_range(float %f0, floa ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: movi d0, #0000000000000000 @@ -523,17 +523,17 @@ define <vscale x 4 x float> @sve_caller_non_sve_callee_high_range(<vscale x 4 x ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 168 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x01, 0x1e, 0x22 // sp + 16 + 168 * VG ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: mov z25.d, z0.d ; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: movi d0, #0000000000000000 @@ -621,17 +621,17 @@ define <vscale x 4 x float> @sve_ret_caller_non_sve_callee_high_range() { ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 16 + 160 * VG ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: movi d0, #0000000000000000 ; CHECK-NEXT: fmov s1, #1.00000000 ; CHECK-NEXT: addvl x0, sp, #1 @@ -686,7 +686,7 @@ define void @verify_all_operands_are_initialised() { ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill ; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: movi d0, #0000000000000000 diff --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll index d02aa06..6c6a691 100644 --- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll @@ -8,7 +8,7 @@ define <4 x i32> @extract_v4i32_nxv16i32_12(<vscale x 16 x i32> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: str z3, [sp, #3, mul vl] ; CHECK-NEXT: str z2, [sp, #2, mul vl] @@ -27,7 +27,7 @@ define <8 x i16> @extract_v8i16_nxv32i16_8(<vscale x 32 x i16> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: str z1, [sp, #1, mul vl] ; CHECK-NEXT: str z0, [sp] @@ -44,7 +44,7 @@ define <4 x i16> @extract_v4i16_nxv32i16_8(<vscale x 32 x i16> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: str z3, [sp, #3, mul vl] ; CHECK-NEXT: str z2, [sp, #2, mul vl] @@ -65,7 +65,7 @@ define <2 x i16> @extract_v2i16_nxv32i16_8(<vscale x 32 x i16> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-8 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: str z3, [sp, #3, mul vl] @@ -94,7 +94,7 @@ define <2 x i64> @extract_v2i64_nxv8i64_8(<vscale x 8 x i64> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov w9, #8 // =0x8 @@ -120,7 +120,7 @@ define <4 x float> @extract_v4f32_nxv16f32_12(<vscale x 16 x float> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: str z3, [sp, #3, mul vl] ; CHECK-NEXT: str z2, [sp, #2, mul vl] @@ -168,7 +168,7 @@ define <4 x i1> @extract_v4i1_nxv32i1_16(<vscale x 32 x i1> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-8 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1 ; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1 @@ -224,7 +224,7 @@ define <4 x i3> @extract_v4i3_nxv32i3_16(<vscale x 32 x i3> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-8 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: str z1, [sp, #1, mul vl] @@ -271,7 +271,7 @@ define <4 x i64> @extract_v4i64_nxv8i64_0(<vscale x 8 x i64> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: str z1, [sp, #1, mul vl] ; CHECK-NEXT: str z0, [sp] diff --git a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll index cbede1b..4aaa25e 100644 --- a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll @@ -63,7 +63,7 @@ define <vscale x 14 x i1> @extract_nxv14i1_nxv28i1_14(<vscale x 28 x i1> %in) uw ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: punpkhi p2.h, p1.b ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: punpklo p1.h, p1.b diff --git a/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll b/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll index 4b93900..8750867 100644 --- a/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll +++ b/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll @@ -49,7 +49,7 @@ define half @fadda_nxv6f16(<vscale x 6 x half> %v, half %s) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov w8, #32768 // =0x8000 ; CHECK-NEXT: ptrue p0.d @@ -73,7 +73,7 @@ define half @fadda_nxv10f16(<vscale x 10 x half> %v, half %s) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-3 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: // kill: def $h2 killed $h2 def $z2 diff --git a/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll b/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll index 1b6b92a..4374409 100644 --- a/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll +++ b/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll @@ -254,7 +254,7 @@ define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, #-4476578029606273024 // =0xc1e0000000000000 ; CHECK-NEXT: ptrue p0.d @@ -341,7 +341,7 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, #-4548635623644200960 // =0xc0e0000000000000 ; CHECK-NEXT: ptrue p0.d diff --git a/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll b/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll index b3aefb8..1df2819 100644 --- a/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll +++ b/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll @@ -208,7 +208,7 @@ define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) { ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #281474974613504 // =0xffffffe00000 @@ -275,7 +275,7 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) { ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #281337537757184 // =0xffe000000000 diff --git a/llvm/test/CodeGen/AArch64/sve-insert-element.ll b/llvm/test/CodeGen/AArch64/sve-insert-element.ll index 7f558e3..8ca005a 100644 --- a/llvm/test/CodeGen/AArch64/sve-insert-element.ll +++ b/llvm/test/CodeGen/AArch64/sve-insert-element.ll @@ -588,7 +588,7 @@ define <vscale x 32 x i1> @test_predicate_insert_32xi1(<vscale x 32 x i1> %val, ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: rdvl x8, #2 ; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1 ; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1 diff --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll index dcf3317..73c783d 100644 --- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll @@ -186,7 +186,7 @@ define void @insert_v2i64_nxv16i64(<2 x i64> %sv0, <2 x i64> %sv1, ptr %out) uwt ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: str z0, [sp] ; CHECK-NEXT: str q1, [sp, #32] @@ -229,7 +229,7 @@ define void @insert_v2i64_nxv16i64_lo2(ptr %psv, ptr %out) uwtable { ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: str q0, [sp, #16] ; CHECK-NEXT: ldr z0, [sp, #1, mul vl] @@ -896,7 +896,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_0(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -923,7 +923,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_1(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -950,7 +950,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_2(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -977,7 +977,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_3(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -1004,7 +1004,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_4(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -1031,7 +1031,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_5(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -1058,7 +1058,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_6(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -1085,7 +1085,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_7(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b @@ -1112,7 +1112,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_8(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1139,7 +1139,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_9(<vscale x 16 x i1> %vec, <vsc ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1166,7 +1166,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_10(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1193,7 +1193,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_11(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1220,7 +1220,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_12(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1247,7 +1247,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_13(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1274,7 +1274,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_14(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b @@ -1301,7 +1301,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_15(<vscale x 16 x i1> %vec, <vs ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b diff --git a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir index 6d09425..2a7e8a43c 100644 --- a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir +++ b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir @@ -41,13 +41,13 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_positive_offset - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) @@ -64,7 +64,7 @@ body: | ; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) ; CHECK-NEXT: renamable $z0 = LDNF1D_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s64) from %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -100,13 +100,13 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_negative_offset - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) @@ -123,7 +123,7 @@ body: | ; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) ; CHECK-NEXT: renamable $z0 = LDNF1D_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s64) from %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -159,44 +159,44 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_positive_offset_out_of_range - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_H_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SH_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SH_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1W_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -231,44 +231,44 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_negative_offset_out_of_range - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1B_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_H_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SB_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1H_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SH_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SH_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1W_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 + ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 diff --git a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir index 1352b9d..863d4d1 100644 --- a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir +++ b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir @@ -41,13 +41,13 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_positive_offset - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, $sp, 7 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, $sp, 7 :: (load (s16) from %ir.object) ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, $sp, 7 :: (load (s32) from %ir.object, align 8) @@ -56,7 +56,7 @@ body: | ; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, $sp, 7 :: (store (s16) into %ir.object, align 8) ; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, $sp, 7 :: (store (s32) into %ir.object, align 8) ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, $sp, 7 :: (store (s64) into %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -84,13 +84,13 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_negative_offset - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, $sp, -8 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, $sp, -8 :: (load (s16) from %ir.object) ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, $sp, -8 :: (load (s32) from %ir.object) @@ -99,7 +99,7 @@ body: | ; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, $sp, -8 :: (store (s16) into %ir.object, align 8) ; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, $sp, -8 :: (store (s32) into %ir.object, align 8) ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, $sp, -8 :: (store (s64) into %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -127,30 +127,30 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_positive_offset_out_of_range - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, killed $x8, 7 :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, killed $x8, 7 :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, killed $x8, 7 :: (load (s32) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1D_ZRI renamable $p0, killed $x8, 7 :: (load (s64) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: STNT1B_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s8) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s16) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s32) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s64) into %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 @@ -178,30 +178,30 @@ body: | liveins: $p0 ; CHECK-LABEL: name: testcase_negative_offset_out_of_range - ; CHECK: liveins: $p0 + ; CHECK: liveins: $p0, $fp ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 - ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, killed $x8, -8 :: (load (s8) from %ir.object, align 2) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, killed $x8, -8 :: (load (s16) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, killed $x8, -8 :: (load (s32) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: renamable $z0 = LDNT1D_ZRI renamable $p0, killed $x8, -8 :: (load (s64) from %ir.object) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: STNT1B_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s8) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s16) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s32) into %ir.object, align 8) - ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 + ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg ; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s64) into %ir.object) - ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4 + ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 ; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 diff --git a/llvm/test/CodeGen/AArch64/sve-llrint.ll b/llvm/test/CodeGen/AArch64/sve-llrint.ll index b0198cf..12d4918 100644 --- a/llvm/test/CodeGen/AArch64/sve-llrint.ll +++ b/llvm/test/CodeGen/AArch64/sve-llrint.ll @@ -88,7 +88,7 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f16(<vscale x 8 x half> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: uunpklo z1.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h @@ -161,11 +161,11 @@ define <vscale x 16 x i64> @llrint_v16i64_v16f16(<vscale x 16 x half> %x) { ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; CHECK-NEXT: uunpklo z2.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: mov w8, #64511 // =0xfbff @@ -299,16 +299,16 @@ define <vscale x 32 x i64> @llrint_v32i64_v32f16(<vscale x 32 x half> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: uunpklo z4.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: mov w9, #64511 // =0xfbff @@ -614,7 +614,7 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f32(<vscale x 8 x float> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: uunpklo z2.d, z0.s ; CHECK-NEXT: uunpkhi z0.d, z0.s @@ -684,11 +684,11 @@ define <vscale x 16 x i64> @llrint_v16i64_v16f32(<vscale x 16 x float> %x) { ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; CHECK-NEXT: uunpklo z4.d, z0.s ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: mov w8, #-553648128 // =0xdf000000 @@ -818,16 +818,16 @@ define <vscale x 32 x i64> @llrint_v32i64_v32f32(<vscale x 32 x float> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: uunpklo z24.d, z0.s ; CHECK-NEXT: uunpkhi z25.d, z0.s ; CHECK-NEXT: mov w9, #-553648128 // =0xdf000000 @@ -1125,7 +1125,7 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f64(<vscale x 8 x double> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000 @@ -1190,10 +1190,10 @@ define <vscale x 16 x i64> @llrint_v16f64(<vscale x 16 x double> %x) { ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000 ; CHECK-NEXT: mov z26.d, #0x8000000000000000 @@ -1312,16 +1312,16 @@ define <vscale x 32 x i64> @llrint_v32f64(<vscale x 32 x double> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: ldr z0, [x0] ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ldr z2, [x0, #2, mul vl] diff --git a/llvm/test/CodeGen/AArch64/sve-lrint.ll b/llvm/test/CodeGen/AArch64/sve-lrint.ll index aa586390..58ac53d 100644 --- a/llvm/test/CodeGen/AArch64/sve-lrint.ll +++ b/llvm/test/CodeGen/AArch64/sve-lrint.ll @@ -89,7 +89,7 @@ define <vscale x 8 x iXLen> @lrint_v8f16(<vscale x 8 x half> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: uunpklo z1.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h @@ -162,11 +162,11 @@ define <vscale x 16 x iXLen> @lrint_v16f16(<vscale x 16 x half> %x) { ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; CHECK-NEXT: uunpklo z2.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: mov w8, #64511 // =0xfbff @@ -300,16 +300,16 @@ define <vscale x 32 x iXLen> @lrint_v32f16(<vscale x 32 x half> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: uunpklo z4.s, z0.h ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: mov w9, #64511 // =0xfbff @@ -615,7 +615,7 @@ define <vscale x 8 x iXLen> @lrint_v8f32(<vscale x 8 x float> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: uunpklo z2.d, z0.s ; CHECK-NEXT: uunpkhi z0.d, z0.s @@ -685,11 +685,11 @@ define <vscale x 16 x iXLen> @lrint_v16f32(<vscale x 16 x float> %x) { ; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 ; CHECK-NEXT: uunpklo z4.d, z0.s ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: mov w8, #-553648128 // =0xdf000000 @@ -819,16 +819,16 @@ define <vscale x 32 x iXLen> @lrint_v32f32(<vscale x 32 x float> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: uunpklo z24.d, z0.s ; CHECK-NEXT: uunpkhi z25.d, z0.s ; CHECK-NEXT: mov w9, #-553648128 // =0xdf000000 @@ -1126,7 +1126,7 @@ define <vscale x 8 x iXLen> @lrint_v8f64(<vscale x 8 x double> %x) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000 @@ -1191,10 +1191,10 @@ define <vscale x 16 x iXLen> @lrint_v16f64(<vscale x 16 x double> %x) { ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000 ; CHECK-NEXT: mov z26.d, #0x8000000000000000 @@ -1313,16 +1313,16 @@ define <vscale x 32 x iXLen> @lrint_v32f64(<vscale x 32 x double> %x) { ; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: ldr z0, [x0] ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ldr z2, [x0, #2, mul vl] diff --git a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll index 6e08606..24df76b 100644 --- a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll +++ b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll @@ -53,7 +53,7 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @add_nxv64i1(<vscale x 64 x i1> ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill @@ -137,7 +137,7 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @sub_nxv64i1(<vscale x 64 x i1> ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill diff --git a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll index 9a4231a..0bc8cb8 100644 --- a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll +++ b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll @@ -20,7 +20,7 @@ define i8 @split_extract_32i8_idx(<vscale x 32 x i8> %a, i32 %idx) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #2 ; CHECK-NEXT: mov w9, w0 @@ -43,7 +43,7 @@ define i16 @split_extract_16i16_idx(<vscale x 16 x i16> %a, i32 %idx) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #1 ; CHECK-NEXT: mov w9, w0 @@ -66,7 +66,7 @@ define i32 @split_extract_8i32_idx(<vscale x 8 x i32> %a, i32 %idx) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov w9, w0 @@ -89,7 +89,7 @@ define i64 @split_extract_8i64_idx(<vscale x 8 x i64> %a, i32 %idx) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov w9, w0 @@ -134,7 +134,7 @@ define i16 @split_extract_16i16(<vscale x 16 x i16> %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #1 ; CHECK-NEXT: mov w9, #128 // =0x80 @@ -157,7 +157,7 @@ define i32 @split_extract_16i32(<vscale x 16 x i32> %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #1 ; CHECK-NEXT: mov w9, #34464 // =0x86a0 @@ -183,7 +183,7 @@ define i64 @split_extract_4i64(<vscale x 4 x i64> %a) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cntw x8 ; CHECK-NEXT: mov w9, #10 // =0xa diff --git a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll index d7ed42d..4ed59bc 100644 --- a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll +++ b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll @@ -21,7 +21,7 @@ define <vscale x 32 x i8> @split_insert_32i8_idx(<vscale x 32 x i8> %a, i8 %elt, ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #2 ; CHECK-NEXT: mov x9, sp @@ -45,7 +45,7 @@ define <vscale x 8 x float> @split_insert_8f32_idx(<vscale x 8 x float> %a, floa ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov x9, sp @@ -69,7 +69,7 @@ define <vscale x 8 x i64> @split_insert_8i64_idx(<vscale x 8 x i64> %a, i64 %elt ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov x9, sp @@ -130,7 +130,7 @@ define <vscale x 32 x i16> @split_insert_32i16(<vscale x 32 x i16> %a, i16 %elt) ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #2 ; CHECK-NEXT: mov w9, #128 // =0x80 @@ -159,7 +159,7 @@ define <vscale x 8 x i32> @split_insert_8i32(<vscale x 8 x i32> %a, i32 %elt) { ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov w9, #16960 // =0x4240 diff --git a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll index c5cf459..e0da9b57 100644 --- a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll +++ b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll @@ -16,7 +16,7 @@ define i32 @csr_d8_allocnxv4i32i32f64(double %d) "aarch64_pstate_sm_compatible" ; CHECK-NEXT: str x29, [sp, #8] // 8-byte Folded Spill ; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -8 ; CHECK-NEXT: .cfi_offset b8, -16 ; CHECK-NEXT: mov z1.s, #0 // =0x0 @@ -219,7 +219,7 @@ define i32 @csr_d8_allocnxv4i32i32f64_stackargsi32f64(double %d0, double %d1, do ; CHECK-NEXT: str x29, [sp, #8] // 8-byte Folded Spill ; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -8 ; CHECK-NEXT: .cfi_offset b8, -16 ; CHECK-NEXT: mov z1.s, #0 // =0x0 @@ -266,7 +266,7 @@ define i32 @svecc_z8_allocnxv4i32i32f64_fp(double %d, <vscale x 4 x i32> %v) "aa ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK-NEXT: mov w0, wzr ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP @@ -310,7 +310,7 @@ define i32 @svecc_z8_allocnxv4i32i32f64_stackargsi32_fp(double %d, i32 %i0, i32 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 ; CHECK-NEXT: mov w0, wzr ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP @@ -383,7 +383,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK-NEXT: .cfi_offset w30, -40 ; CHECK-NEXT: .cfi_offset w29, -48 ; CHECK-NEXT: addvl sp, sp, #-18 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG ; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -412,14 +412,14 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48 ; CHECK-NEXT: mov x8, x0 ; CHECK-NEXT: //APP ; CHECK-NEXT: //NO_APP diff --git a/llvm/test/CodeGen/AArch64/sve-trunc.ll b/llvm/test/CodeGen/AArch64/sve-trunc.ll index 0ec6538..50580cb 100644 --- a/llvm/test/CodeGen/AArch64/sve-trunc.ll +++ b/llvm/test/CodeGen/AArch64/sve-trunc.ll @@ -115,7 +115,7 @@ define <vscale x 16 x i1> @trunc_i64toi1_split3(<vscale x 16 x i64> %in) { ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: and z7.d, z7.d, #0x1 ; CHECK-NEXT: and z6.d, z6.d, #0x1 diff --git a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll index 8a504cd..198e0a3 100644 --- a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll +++ b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll @@ -105,7 +105,7 @@ define <vscale x 8 x i32> @test_compress_large(<vscale x 8 x i32> %vec, <vscale ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: cnth x9 diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll index 0eacac2..1dbd7dd 100644 --- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll +++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll @@ -276,7 +276,7 @@ define <vscale x 16 x i8> @ld1_x2_i8_z0_taken(target("aarch64.svcount") %pn, ptr ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ld1b { z2.b, z3.b }, pn8/z, [x0] @@ -298,7 +298,7 @@ define <vscale x 16 x i8> @ld1_x2_i8_z0_taken_scalar(target("aarch64.svcount") % ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ld1b { z2.b, z3.b }, pn8/z, [x0, x1] @@ -585,7 +585,7 @@ define <vscale x 8 x i16> @ld1_x4_i16_z0_taken(target("aarch64.svcount") %pn, pt ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ld1h { z4.h - z7.h }, pn8/z, [x0] @@ -607,7 +607,7 @@ define <vscale x 8 x i16> @ld1_x4_i16_z0_taken_scalar(target("aarch64.svcount") ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ld1h { z4.h - z7.h }, pn8/z, [x0, x1, lsl #1] @@ -896,7 +896,7 @@ define <vscale x 4 x i32> @ldnt1_x2_i32_z0_taken(target("aarch64.svcount") %pn, ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ldnt1w { z2.s, z3.s }, pn8/z, [x0] @@ -918,7 +918,7 @@ define <vscale x 4 x i32> @ldnt1_x2_i32_z0_taken_scalar(target("aarch64.svcount" ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ldnt1w { z2.s, z3.s }, pn8/z, [x0, x1, lsl #2] @@ -1205,7 +1205,7 @@ define <vscale x 2 x i64> @ldnt1_x4_i64_z0_taken(target("aarch64.svcount") %pn, ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ldnt1d { z4.d - z7.d }, pn8/z, [x0] @@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @ldnt1_x4_i64_z0_taken_scalar(target("aarch64.svcount" ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: ldnt1d { z4.d - z7.d }, pn8/z, [x0, x1, lsl #3] diff --git a/llvm/test/CodeGen/AArch64/unwind-preserved.ll b/llvm/test/CodeGen/AArch64/unwind-preserved.ll index 822be14..7e1f63d 100644 --- a/llvm/test/CodeGen/AArch64/unwind-preserved.ll +++ b/llvm/test/CodeGen/AArch64/unwind-preserved.ll @@ -13,7 +13,7 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-18 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -42,27 +42,27 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 16 + 160 * VG ; CHECK-NEXT: .cfi_remember_state ; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: .Ltmp0: +; CHECK-NEXT: .Ltmp0: // EH_LABEL ; CHECK-NEXT: bl may_throw_sve -; CHECK-NEXT: .Ltmp1: +; CHECK-NEXT: .Ltmp1: // EH_LABEL ; CHECK-NEXT: str z0, [sp, #1, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: b .LBB0_1 ; CHECK-NEXT: .LBB0_1: // %.Lcontinue ; CHECK-NEXT: ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -108,10 +108,10 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB0_2: // %.Lunwind ; CHECK-NEXT: .cfi_restore_state -; CHECK-NEXT: .Ltmp2: +; CHECK-NEXT: .Ltmp2: // EH_LABEL ; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -165,7 +165,7 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; GISEL-NEXT: .cfi_offset w30, -8 ; GISEL-NEXT: .cfi_offset w29, -16 ; GISEL-NEXT: addvl sp, sp, #-18 -; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; GISEL-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; GISEL-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; GISEL-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -194,27 +194,27 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; GISEL-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; GISEL-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; GISEL-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; GISEL-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG -; GISEL-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; GISEL-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16 +; GISEL-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16 ; GISEL-NEXT: addvl sp, sp, #-2 -; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG +; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 16 + 160 * VG ; GISEL-NEXT: .cfi_remember_state ; GISEL-NEXT: str z0, [sp] // 16-byte Folded Spill -; GISEL-NEXT: .Ltmp0: +; GISEL-NEXT: .Ltmp0: // EH_LABEL ; GISEL-NEXT: bl may_throw_sve -; GISEL-NEXT: .Ltmp1: +; GISEL-NEXT: .Ltmp1: // EH_LABEL ; GISEL-NEXT: str z0, [sp, #1, mul vl] // 16-byte Folded Spill ; GISEL-NEXT: b .LBB0_1 ; GISEL-NEXT: .LBB0_1: // %.Lcontinue ; GISEL-NEXT: ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: addvl sp, sp, #2 -; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; GISEL-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -260,10 +260,10 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw ; GISEL-NEXT: ret ; GISEL-NEXT: .LBB0_2: // %.Lunwind ; GISEL-NEXT: .cfi_restore_state -; GISEL-NEXT: .Ltmp2: +; GISEL-NEXT: .Ltmp2: // EH_LABEL ; GISEL-NEXT: ldr z0, [sp] // 16-byte Folded Reload ; GISEL-NEXT: addvl sp, sp, #2 -; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG +; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG ; GISEL-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload ; GISEL-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload @@ -355,9 +355,9 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v) ; CHECK-NEXT: .cfi_offset b23, -272 ; CHECK-NEXT: .cfi_remember_state ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: .Ltmp3: +; CHECK-NEXT: .Ltmp3: // EH_LABEL ; CHECK-NEXT: bl may_throw_neon -; CHECK-NEXT: .Ltmp4: +; CHECK-NEXT: .Ltmp4: // EH_LABEL ; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: b .LBB1_1 ; CHECK-NEXT: .LBB1_1: // %.Lcontinue @@ -394,7 +394,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v) ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB1_2: // %.Lunwind ; CHECK-NEXT: .cfi_restore_state -; CHECK-NEXT: .Ltmp5: +; CHECK-NEXT: .Ltmp5: // EH_LABEL ; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-NEXT: ldp x29, x30, [sp, #288] // 16-byte Folded Reload ; CHECK-NEXT: ldp q9, q8, [sp, #256] // 32-byte Folded Reload @@ -462,10 +462,10 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v) ; GISEL-NEXT: .cfi_offset b23, -272 ; GISEL-NEXT: .cfi_remember_state ; GISEL-NEXT: str q0, [sp] // 16-byte Folded Spill -; GISEL-NEXT: .Ltmp3: +; GISEL-NEXT: .Ltmp3: // EH_LABEL ; GISEL-NEXT: bl may_throw_neon ; GISEL-NEXT: str q0, [sp, #16] // 16-byte Folded Spill -; GISEL-NEXT: .Ltmp4: +; GISEL-NEXT: .Ltmp4: // EH_LABEL ; GISEL-NEXT: b .LBB1_1 ; GISEL-NEXT: .LBB1_1: // %.Lcontinue ; GISEL-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload @@ -501,7 +501,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v) ; GISEL-NEXT: ret ; GISEL-NEXT: .LBB1_2: // %.Lunwind ; GISEL-NEXT: .cfi_restore_state -; GISEL-NEXT: .Ltmp5: +; GISEL-NEXT: .Ltmp5: // EH_LABEL ; GISEL-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; GISEL-NEXT: ldp x29, x30, [sp, #288] // 16-byte Folded Reload ; GISEL-NEXT: ldp q9, q8, [sp, #256] // 32-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/xray-custom-log.ll b/llvm/test/CodeGen/AArch64/xray-custom-log.ll index fd8ddf9..2432808 100644 --- a/llvm/test/CodeGen/AArch64/xray-custom-log.ll +++ b/llvm/test/CodeGen/AArch64/xray-custom-log.ll @@ -1,7 +1,5 @@ ; RUN: llc -mtriple=aarch64 < %s | FileCheck %s ; RUN: llc -mtriple=arm64-apple-darwin < %s | FileCheck %s --check-prefix=MACHO -; RUN: llc -filetype=obj -mtriple=aarch64 %s -o %t -; RUN: llvm-dwarfdump -debug-info %t | FileCheck %s --check-prefix=DBG ; MACHO: bl ___xray_CustomEvent ; MACHO: bl ___xray_CustomEvent @@ -92,18 +90,6 @@ entry: ; CHECK-NEXT: .byte 0x02 ; CHECK-NEXT: .zero 13 -;; Construct call site entries for PATCHABLE_EVENT_CALL. -; DBG: DW_TAG_subprogram -; DBG: DW_AT_name -; DBG-SAME: ("customevent") -; DBG: DW_TAG_call_site -; DBG-NEXT: DW_AT_call_target (DW_OP_reg0 {{.*}}) -; DBG-NEXT: DW_AT_call_return_pc -; DBG-EMPTY: -; DBG: DW_TAG_call_site -; DBG-NEXT: DW_AT_call_target (DW_OP_reg2 {{.*}}) -; DBG-NEXT: DW_AT_call_return_pc - declare void @llvm.xray.customevent(ptr, i64) declare void @llvm.xray.typedevent(i64, ptr, i64) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll index a066b15..e6a8bac 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll @@ -1917,8 +1917,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX9-NEXT: s_mov_b32 s0, 0 ; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:4 ; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_movk_i32 s0, 0x3e80 ; GFX9-NEXT: v_mov_b32_e32 v0, 15 -; GFX9-NEXT: s_movk_i32 s0, 0x3e84 +; GFX9-NEXT: s_add_i32 s0, s0, 4 ; GFX9-NEXT: scratch_store_dword off, v0, s0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: scratch_load_dword v0, off, s0 glc @@ -1933,7 +1934,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s9 ; GFX10-NEXT: v_mov_b32_e32 v0, 13 ; GFX10-NEXT: v_mov_b32_e32 v1, 15 -; GFX10-NEXT: s_movk_i32 s0, 0x3e84 +; GFX10-NEXT: s_movk_i32 s0, 0x3e80 +; GFX10-NEXT: s_add_i32 s0, s0, 4 ; GFX10-NEXT: scratch_store_dword off, v0, off offset:4 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: scratch_store_dword off, v1, s0 @@ -1945,10 +1947,11 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX942-LABEL: store_load_large_imm_offset_kernel: ; GFX942: ; %bb.0: ; %bb ; GFX942-NEXT: v_mov_b32_e32 v0, 13 +; GFX942-NEXT: s_movk_i32 s0, 0x3e80 ; GFX942-NEXT: scratch_store_dword off, v0, off offset:4 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v0, 15 -; GFX942-NEXT: s_movk_i32 s0, 0x3e84 +; GFX942-NEXT: s_add_i32 s0, s0, 4 ; GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1 @@ -1958,7 +1961,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX11-LABEL: store_load_large_imm_offset_kernel: ; GFX11: ; %bb.0: ; %bb ; GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15 -; GFX11-NEXT: s_movk_i32 s0, 0x3e84 +; GFX11-NEXT: s_movk_i32 s0, 0x3e80 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_add_i32 s0, s0, 4 ; GFX11-NEXT: scratch_store_b32 off, v0, off offset:4 dlc ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc @@ -1986,8 +1991,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; UNALIGNED_GFX9-NEXT: s_mov_b32 s0, 0 ; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s0 offset:4 ; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) +; UNALIGNED_GFX9-NEXT: s_movk_i32 s0, 0x3e80 ; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v0, 15 -; UNALIGNED_GFX9-NEXT: s_movk_i32 s0, 0x3e84 +; UNALIGNED_GFX9-NEXT: s_add_i32 s0, s0, 4 ; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s0 ; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX9-NEXT: scratch_load_dword v0, off, s0 glc @@ -2002,7 +2008,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; UNALIGNED_GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s9 ; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v0, 13 ; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 15 -; UNALIGNED_GFX10-NEXT: s_movk_i32 s0, 0x3e84 +; UNALIGNED_GFX10-NEXT: s_movk_i32 s0, 0x3e80 +; UNALIGNED_GFX10-NEXT: s_add_i32 s0, s0, 4 ; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v0, off offset:4 ; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v1, s0 @@ -2014,10 +2021,11 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; UNALIGNED_GFX942-LABEL: store_load_large_imm_offset_kernel: ; UNALIGNED_GFX942: ; %bb.0: ; %bb ; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 13 +; UNALIGNED_GFX942-NEXT: s_movk_i32 s0, 0x3e80 ; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, off offset:4 sc0 sc1 ; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 15 -; UNALIGNED_GFX942-NEXT: s_movk_i32 s0, 0x3e84 +; UNALIGNED_GFX942-NEXT: s_add_i32 s0, s0, 4 ; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1 ; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1 @@ -2027,7 +2035,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; UNALIGNED_GFX11-LABEL: store_load_large_imm_offset_kernel: ; UNALIGNED_GFX11: ; %bb.0: ; %bb ; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15 -; UNALIGNED_GFX11-NEXT: s_movk_i32 s0, 0x3e84 +; UNALIGNED_GFX11-NEXT: s_movk_i32 s0, 0x3e80 +; UNALIGNED_GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; UNALIGNED_GFX11-NEXT: s_add_i32 s0, s0, 4 ; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v0, off offset:4 dlc ; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc @@ -2061,11 +2071,13 @@ define void @store_load_large_imm_offset_foo() { ; GFX9-LABEL: store_load_large_imm_offset_foo: ; GFX9: ; %bb.0: ; %bb ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_movk_i32 s0, 0x3e80 ; GFX9-NEXT: v_mov_b32_e32 v0, 13 +; GFX9-NEXT: s_add_i32 s1, s32, s0 ; GFX9-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, 15 -; GFX9-NEXT: s_add_i32 s0, s32, 0x3e84 +; GFX9-NEXT: s_add_i32 s0, s1, 4 ; GFX9-NEXT: scratch_store_dword off, v0, s0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: scratch_load_dword v0, off, s0 glc @@ -2076,8 +2088,10 @@ define void @store_load_large_imm_offset_foo() { ; GFX10: ; %bb.0: ; %bb ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v0, 13 +; GFX10-NEXT: s_movk_i32 s0, 0x3e80 ; GFX10-NEXT: v_mov_b32_e32 v1, 15 -; GFX10-NEXT: s_add_i32 s0, s32, 0x3e84 +; GFX10-NEXT: s_add_i32 s1, s32, s0 +; GFX10-NEXT: s_add_i32 s0, s1, 4 ; GFX10-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: scratch_store_dword off, v1, s0 @@ -2089,11 +2103,13 @@ define void @store_load_large_imm_offset_foo() { ; GFX942-LABEL: store_load_large_imm_offset_foo: ; GFX942: ; %bb.0: ; %bb ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: s_movk_i32 s0, 0x3e80 ; GFX942-NEXT: v_mov_b32_e32 v0, 13 +; GFX942-NEXT: s_add_i32 s1, s32, s0 ; GFX942-NEXT: scratch_store_dword off, v0, s32 offset:4 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v0, 15 -; GFX942-NEXT: s_add_i32 s0, s32, 0x3e84 +; GFX942-NEXT: s_add_i32 s0, s1, 4 ; GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1 @@ -2104,7 +2120,10 @@ define void @store_load_large_imm_offset_foo() { ; GFX11: ; %bb.0: ; %bb ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15 -; GFX11-NEXT: s_add_i32 s0, s32, 0x3e84 +; GFX11-NEXT: s_movk_i32 s0, 0x3e80 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_add_i32 s1, s32, s0 +; GFX11-NEXT: s_add_i32 s0, s1, 4 ; GFX11-NEXT: scratch_store_b32 off, v0, s32 offset:4 dlc ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc @@ -2133,11 +2152,13 @@ define void @store_load_large_imm_offset_foo() { ; UNALIGNED_GFX9-LABEL: store_load_large_imm_offset_foo: ; UNALIGNED_GFX9: ; %bb.0: ; %bb ; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; UNALIGNED_GFX9-NEXT: s_movk_i32 s0, 0x3e80 ; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v0, 13 +; UNALIGNED_GFX9-NEXT: s_add_i32 s1, s32, s0 ; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s32 offset:4 ; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v0, 15 -; UNALIGNED_GFX9-NEXT: s_add_i32 s0, s32, 0x3e84 +; UNALIGNED_GFX9-NEXT: s_add_i32 s0, s1, 4 ; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s0 ; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX9-NEXT: scratch_load_dword v0, off, s0 glc @@ -2148,8 +2169,10 @@ define void @store_load_large_imm_offset_foo() { ; UNALIGNED_GFX10: ; %bb.0: ; %bb ; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v0, 13 +; UNALIGNED_GFX10-NEXT: s_movk_i32 s0, 0x3e80 ; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 15 -; UNALIGNED_GFX10-NEXT: s_add_i32 s0, s32, 0x3e84 +; UNALIGNED_GFX10-NEXT: s_add_i32 s1, s32, s0 +; UNALIGNED_GFX10-NEXT: s_add_i32 s0, s1, 4 ; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v0, s32 offset:4 ; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v1, s0 @@ -2161,11 +2184,13 @@ define void @store_load_large_imm_offset_foo() { ; UNALIGNED_GFX942-LABEL: store_load_large_imm_offset_foo: ; UNALIGNED_GFX942: ; %bb.0: ; %bb ; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; UNALIGNED_GFX942-NEXT: s_movk_i32 s0, 0x3e80 ; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 13 +; UNALIGNED_GFX942-NEXT: s_add_i32 s1, s32, s0 ; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, s32 offset:4 sc0 sc1 ; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 15 -; UNALIGNED_GFX942-NEXT: s_add_i32 s0, s32, 0x3e84 +; UNALIGNED_GFX942-NEXT: s_add_i32 s0, s1, 4 ; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1 ; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) ; UNALIGNED_GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1 @@ -2176,7 +2201,10 @@ define void @store_load_large_imm_offset_foo() { ; UNALIGNED_GFX11: ; %bb.0: ; %bb ; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15 -; UNALIGNED_GFX11-NEXT: s_add_i32 s0, s32, 0x3e84 +; UNALIGNED_GFX11-NEXT: s_movk_i32 s0, 0x3e80 +; UNALIGNED_GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; UNALIGNED_GFX11-NEXT: s_add_i32 s1, s32, s0 +; UNALIGNED_GFX11-NEXT: s_add_i32 s0, s1, 4 ; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v0, s32 offset:4 dlc ; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll index 2785b78..481a254 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll @@ -2243,36 +2243,22 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr ; ; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat: ; GFX1250: ; %bb.0: ; %main_body +; GFX1250-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-NEXT: s_mov_b32 s1, exec_lo -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s1, 0 -; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0 -; GFX1250-NEXT: s_cbranch_execz .LBB51_3 +; GFX1250-NEXT: s_cbranch_execz .LBB51_2 ; GFX1250-NEXT: ; %bb.1: -; GFX1250-NEXT: s_bcnt1_i32_b32 s1, s1 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s1 -; GFX1250-NEXT: s_load_b32 s1, s[4:5], 0x24 +; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0 +; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_mov_b32_e32 v4, s1 -; GFX1250-NEXT: ds_load_b64 v[2:3], v4 -; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1] -; GFX1250-NEXT: .LBB51_2: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_add_f64_e32 v[6:7], v[2:3], v[0:1] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[6:7], v4, v[6:7], v[2:3] +; GFX1250-NEXT: v_dual_mul_f64 v[0:1], 4.0, v[0:1] :: v_dual_mov_b32 v2, s0 +; GFX1250-NEXT: ds_add_f64 v2, v[0:1] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3] -; GFX1250-NEXT: v_mov_b64_e32 v[2:3], v[6:7] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB51_2 -; GFX1250-NEXT: .LBB51_3: +; GFX1250-NEXT: .LBB51_2: ; GFX1250-NEXT: s_endpgm main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0 @@ -2322,36 +2308,22 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3 ; ; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush: ; GFX1250: ; %bb.0: ; %main_body +; GFX1250-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-NEXT: s_mov_b32 s1, exec_lo -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s1, 0 -; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0 -; GFX1250-NEXT: s_cbranch_execz .LBB52_3 +; GFX1250-NEXT: s_cbranch_execz .LBB52_2 ; GFX1250-NEXT: ; %bb.1: -; GFX1250-NEXT: s_bcnt1_i32_b32 s1, s1 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s1 -; GFX1250-NEXT: s_load_b32 s1, s[4:5], 0x24 +; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0 +; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_mov_b32_e32 v4, s1 -; GFX1250-NEXT: ds_load_b64 v[2:3], v4 -; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1] -; GFX1250-NEXT: .LBB52_2: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: v_dual_mul_f64 v[0:1], 4.0, v[0:1] :: v_dual_mov_b32 v2, s0 +; GFX1250-NEXT: ds_add_f64 v2, v[0:1] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_add_f64_e32 v[6:7], v[2:3], v[0:1] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[6:7], v4, v[6:7], v[2:3] -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3] -; GFX1250-NEXT: v_mov_b64_e32 v[2:3], v[6:7] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB52_2 -; GFX1250-NEXT: .LBB52_3: +; GFX1250-NEXT: .LBB52_2: ; GFX1250-NEXT: s_endpgm main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0 @@ -2401,36 +2373,22 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp ; ; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe: ; GFX1250: ; %bb.0: ; %main_body +; GFX1250-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-NEXT: s_mov_b32 s1, exec_lo -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s1, 0 -; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0 -; GFX1250-NEXT: s_cbranch_execz .LBB53_3 +; GFX1250-NEXT: s_cbranch_execz .LBB53_2 ; GFX1250-NEXT: ; %bb.1: -; GFX1250-NEXT: s_bcnt1_i32_b32 s1, s1 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s1 -; GFX1250-NEXT: s_load_b32 s1, s[4:5], 0x24 +; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0 +; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_mov_b32_e32 v4, s1 -; GFX1250-NEXT: ds_load_b64 v[2:3], v4 -; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1] -; GFX1250-NEXT: .LBB53_2: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_add_f64_e32 v[6:7], v[2:3], v[0:1] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[6:7], v4, v[6:7], v[2:3] +; GFX1250-NEXT: v_dual_mul_f64 v[0:1], 4.0, v[0:1] :: v_dual_mov_b32 v2, s0 +; GFX1250-NEXT: ds_add_f64 v2, v[0:1] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3] -; GFX1250-NEXT: v_mov_b64_e32 v[2:3], v[6:7] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB53_2 -; GFX1250-NEXT: .LBB53_3: +; GFX1250-NEXT: .LBB53_2: ; GFX1250-NEXT: s_endpgm main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0 @@ -2459,23 +2417,9 @@ define double @local_atomic_fadd_f64_rtn_pat(ptr addrspace(3) %ptr, double %data ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_mov_b32_e32 v2, v0 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: .LBB54_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_mov_b64_e32 v[4:5], v[0:1] -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_add_f64_e32 v[0:1], 4.0, v[4:5] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[4:5] +; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0 +; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB54_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end -; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir index 6a4522f..d69a3e1 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir @@ -141,11 +141,11 @@ body: | ; SIVI-NEXT: {{ $}} ; SIVI-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5 ; SIVI-NEXT: [[COPY1:%[0-9]+]]:_(p5) = COPY $vgpr0 + ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p5) ; SIVI-NEXT: [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY]](p4) ; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 68 ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY2]], [[C]](s64) ; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), addrspace 4) - ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p5) ; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32) ; SIVI-NEXT: [[C1:%[0-9]+]]:_(p5) = G_CONSTANT i32 -1 ; SIVI-NEXT: [[C2:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 @@ -157,9 +157,9 @@ body: | ; GFX9: liveins: $vgpr0 ; GFX9-NEXT: {{ $}} ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0 + ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p5) ; GFX9-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_private_base ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[S_MOV_B64_]](s64) - ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p5) ; GFX9-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[UV1]](s32) ; GFX9-NEXT: [[C:%[0-9]+]]:_(p5) = G_CONSTANT i32 -1 ; GFX9-NEXT: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 @@ -210,11 +210,11 @@ body: | ; SIVI-NEXT: {{ $}} ; SIVI-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5 ; SIVI-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr0 + ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p3) ; SIVI-NEXT: [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY]](p4) ; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY2]], [[C]](s64) ; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), align 64, addrspace 4) - ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p3) ; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32) ; SIVI-NEXT: [[C1:%[0-9]+]]:_(p3) = G_CONSTANT i32 -1 ; SIVI-NEXT: [[C2:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 @@ -226,9 +226,9 @@ body: | ; GFX9: liveins: $vgpr0 ; GFX9-NEXT: {{ $}} ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0 + ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p3) ; GFX9-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_shared_base ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[S_MOV_B64_]](s64) - ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p3) ; GFX9-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[UV1]](s32) ; GFX9-NEXT: [[C:%[0-9]+]]:_(p3) = G_CONSTANT i32 -1 ; GFX9-NEXT: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 @@ -354,20 +354,20 @@ body: | ; SIVI-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5 ; SIVI-NEXT: [[COPY1:%[0-9]+]]:_(<2 x p3>) = COPY $vgpr0_vgpr1 ; SIVI-NEXT: [[UV:%[0-9]+]]:_(p3), [[UV1:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[COPY1]](<2 x p3>) + ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3) ; SIVI-NEXT: [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY]](p4) ; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY2]], [[C]](s64) ; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), align 64, addrspace 4) - ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3) ; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32) ; SIVI-NEXT: [[C1:%[0-9]+]]:_(p3) = G_CONSTANT i32 -1 ; SIVI-NEXT: [[C2:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 ; SIVI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](p3), [[C1]] ; SIVI-NEXT: [[SELECT:%[0-9]+]]:_(p0) = G_SELECT [[ICMP]](s1), [[MV]], [[C2]] + ; SIVI-NEXT: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3) ; SIVI-NEXT: [[COPY3:%[0-9]+]]:_(p4) = COPY [[COPY]](p4) ; SIVI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY3]], [[C]](s64) ; SIVI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (dereferenceable invariant load (s32), align 64, addrspace 4) - ; SIVI-NEXT: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3) ; SIVI-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT1]](s32), [[LOAD1]](s32) ; SIVI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](p3), [[C1]] ; SIVI-NEXT: [[SELECT1:%[0-9]+]]:_(p0) = G_SELECT [[ICMP1]](s1), [[MV1]], [[C2]] @@ -379,17 +379,17 @@ body: | ; GFX9-NEXT: {{ $}} ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(<2 x p3>) = COPY $vgpr0_vgpr1 ; GFX9-NEXT: [[UV:%[0-9]+]]:_(p3), [[UV1:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[COPY]](<2 x p3>) + ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3) ; GFX9-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_shared_base ; GFX9-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[S_MOV_B64_]](s64) - ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3) ; GFX9-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[UV3]](s32) ; GFX9-NEXT: [[C:%[0-9]+]]:_(p3) = G_CONSTANT i32 -1 ; GFX9-NEXT: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 0 ; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](p3), [[C]] ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(p0) = G_SELECT [[ICMP]](s1), [[MV]], [[C1]] + ; GFX9-NEXT: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3) ; GFX9-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_shared_base ; GFX9-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[S_MOV_B64_1]](s64) - ; GFX9-NEXT: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3) ; GFX9-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT1]](s32), [[UV5]](s32) ; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](p3), [[C]] ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(p0) = G_SELECT [[ICMP1]](s1), [[MV1]], [[C1]] @@ -506,19 +506,19 @@ body: | ; SIVI-NEXT: {{ $}} ; SIVI-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5 ; SIVI-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0 + ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[FRAME_INDEX]](p5) ; SIVI-NEXT: [[COPY1:%[0-9]+]]:_(p4) = COPY [[COPY]](p4) ; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 68 ; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64) ; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), addrspace 4) - ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[FRAME_INDEX]](p5) ; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32) ; SIVI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p0) ; ; GFX9-LABEL: name: test_addrspacecast_p5_fi_to_p0 ; GFX9: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0 + ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[FRAME_INDEX]](p5) ; GFX9-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_private_base ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[S_MOV_B64_]](s64) - ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[FRAME_INDEX]](p5) ; GFX9-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[UV1]](s32) ; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p0) %0:_(p5) = G_FRAME_INDEX %stack.0 diff --git a/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll new file mode 100644 index 0000000..4b6375c --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll @@ -0,0 +1,134 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s + +; Test code sequences for addrspacecast with globally addressable scratch. + +target triple = "amdgcn-amd-amdhsa" + +define amdgpu_kernel void @use_private_to_flat_addrspacecast(ptr addrspace(5) %ptr) { +; GFX1250-SDAG-LABEL: use_private_to_flat_addrspacecast: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x24 +; GFX1250-SDAG-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0 +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo +; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_lshlrev_b32 v1, 20, v0 +; GFX1250-SDAG-NEXT: s_cmp_lg_u32 s2, -1 +; GFX1250-SDAG-NEXT: s_cselect_b32 vcc_lo, -1, 0 +; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_cndmask_b32 v1, 0, v1 +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc_lo +; GFX1250-SDAG-NEXT: flat_store_b32 v[0:1], v2 scope:SCOPE_SYS +; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0 +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: use_private_to_flat_addrspacecast: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: s_load_b32 s2, s[4:5], 0x24 +; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo +; GFX1250-GISEL-NEXT: v_mbcnt_lo_u32_b32 v2, -1, 0 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s2, -1 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, s2, v0 +; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v2, 20, v2 +; GFX1250-GISEL-NEXT: s_cselect_b32 s0, 1, 0 +; GFX1250-GISEL-NEXT: s_and_b32 s0, 1, s0 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, v2, v1, vcc_lo +; GFX1250-GISEL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_cndmask_b32 v1, 0, v1 +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc_lo +; GFX1250-GISEL-NEXT: flat_store_b32 v[0:1], v2 scope:SCOPE_SYS +; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0 +; GFX1250-GISEL-NEXT: s_endpgm + %stof = addrspacecast ptr addrspace(5) %ptr to ptr + store volatile i32 0, ptr %stof + ret void +} + +define amdgpu_kernel void @use_private_to_flat_addrspacecast_nonnull(ptr addrspace(5) %ptr) { +; GFX1250-SDAG-LABEL: use_private_to_flat_addrspacecast_nonnull: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24 +; GFX1250-SDAG-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 20, v0 +; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, s0 +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] +; GFX1250-SDAG-NEXT: flat_store_b32 v[0:1], v2 scope:SCOPE_SYS +; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0 +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: use_private_to_flat_addrspacecast_nonnull: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: s_load_b32 s2, s[4:5], 0x24 +; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo +; GFX1250-GISEL-NEXT: v_mbcnt_lo_u32_b32 v2, -1, 0 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_lshlrev_b32 v2, 20, v2 +; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, s2, v0 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, v2, v1, vcc_lo +; GFX1250-GISEL-NEXT: flat_store_b32 v[0:1], v3 scope:SCOPE_SYS +; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0 +; GFX1250-GISEL-NEXT: s_endpgm + %stof = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) %ptr) + store volatile i32 0, ptr %stof + ret void +} + +define amdgpu_kernel void @use_flat_to_private_addrspacecast(ptr %ptr) { +; GFX1250-LABEL: use_flat_to_private_addrspacecast: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-NEXT: s_cmp_lg_u64 s[0:1], 0 +; GFX1250-NEXT: s_cselect_b32 s0, s2, -1 +; GFX1250-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_storecnt 0x0 +; GFX1250-NEXT: s_endpgm + %ftos = addrspacecast ptr %ptr to ptr addrspace(5) + store volatile i32 0, ptr addrspace(5) %ftos + ret void +} + +define amdgpu_kernel void @use_flat_to_private_addrspacecast_nonnull(ptr %ptr) { +; GFX1250-SDAG-LABEL: use_flat_to_private_addrspacecast_nonnull: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24 +; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX1250-SDAG-NEXT: s_sub_co_i32 s0, s0, s1 +; GFX1250-SDAG-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS +; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0 +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: use_flat_to_private_addrspacecast_nonnull: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s0, s1 +; GFX1250-GISEL-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS +; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0 +; GFX1250-GISEL-NEXT: s_endpgm + %ftos = call ptr addrspace(5) @llvm.amdgcn.addrspacecast.nonnull.p5.p0(ptr %ptr) + store volatile i32 0, ptr addrspace(5) %ftos + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll new file mode 100644 index 0000000..5fc9f4a --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll @@ -0,0 +1,1486 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN:llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefix=GFX1250 %s + +define float @global_system_atomic_fadd_f32(ptr addrspace(1) %ptr, float %val) { +; GFX1250-LABEL: global_system_atomic_fadd_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_add_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic + ret float %result +} + +define float @global_one_as_atomic_fadd_f32(ptr addrspace(1) %ptr, float %val) { +; GFX1250-LABEL: global_one_as_atomic_fadd_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_add_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic + ret float %result +} + +define double @global_system_atomic_fadd_f64(ptr addrspace(1) %ptr, double %val) { +; GFX1250-LABEL: global_system_atomic_fadd_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic + ret double %result +} + +define double @global_one_as_atomic_fadd_f64(ptr addrspace(1) %ptr, double %val) { +; GFX1250-LABEL: global_one_as_atomic_fadd_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic + ret double %result +} + +define float @global_system_atomic_fmin_f32(ptr addrspace(1) %ptr, float %val) { +; GFX1250-LABEL: global_system_atomic_fmin_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr addrspace(1) %ptr, float %val monotonic + ret float %result +} + +define float @global_one_as_atomic_fmin_f32(ptr addrspace(1) %ptr, float %val) { +; GFX1250-LABEL: global_one_as_atomic_fmin_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic + ret float %result +} + +define double @global_system_atomic_fmin_f64(ptr addrspace(1) %ptr, double %val) { +; GFX1250-LABEL: global_system_atomic_fmin_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val monotonic + ret double %result +} + +define double @global_one_as_atomic_fmin_f64(ptr addrspace(1) %ptr, double %val) { +; GFX1250-LABEL: global_one_as_atomic_fmin_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic + ret double %result +} + +define float @global_system_atomic_fmax_f32(ptr addrspace(1) %ptr, float %val) { +; GFX1250-LABEL: global_system_atomic_fmax_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val monotonic + ret float %result +} + +define float @global_one_as_atomic_fmax_f32(ptr addrspace(1) %ptr, float %val) { +; GFX1250-LABEL: global_one_as_atomic_fmax_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic + ret float %result +} + +define double @global_system_atomic_fmax_f64(ptr addrspace(1) %ptr, double %val) { +; GFX1250-LABEL: global_system_atomic_fmax_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val monotonic + ret double %result +} + +define double @global_one_as_atomic_fmax_f64(ptr addrspace(1) %ptr, double %val) { +; GFX1250-LABEL: global_one_as_atomic_fmax_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic + ret double %result +} + +define i32 @global_one_as_atomic_min_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_one_as_atomic_min_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @global_system_atomic_min_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_system_atomic_min_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr addrspace(1) %ptr, i32 %val monotonic + ret i32 %result +} + +define i32 @global_one_as_atomic_max_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_one_as_atomic_max_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @global_system_atomic_max_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_system_atomic_max_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr addrspace(1) %ptr, i32 %val monotonic + ret i32 %result +} + +define i32 @global_one_as_atomic_umin_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_one_as_atomic_umin_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @global_system_atomic_umin_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_system_atomic_umin_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr addrspace(1) %ptr, i32 %val monotonic + ret i32 %result +} + +define i32 @global_one_as_atomic_umax_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_one_as_atomic_umax_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @global_system_atomic_umax_i32(ptr addrspace(1) %ptr, i32 %val) { +; GFX1250-LABEL: global_system_atomic_umax_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr addrspace(1) %ptr, i32 %val monotonic + ret i32 %result +} + +define i64 @global_one_as_atomic_min_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_one_as_atomic_min_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @global_system_atomic_min_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_system_atomic_min_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr addrspace(1) %ptr, i64 %val monotonic + ret i64 %result +} + +define i64 @global_one_as_atomic_max_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_one_as_atomic_max_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @global_system_atomic_max_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_system_atomic_max_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr addrspace(1) %ptr, i64 %val monotonic + ret i64 %result +} + +define i64 @global_one_as_atomic_umin_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_one_as_atomic_umin_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @global_system_atomic_umin_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_system_atomic_umin_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_min_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr addrspace(1) %ptr, i64 %val monotonic + ret i64 %result +} + +define i64 @global_one_as_atomic_umax_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_one_as_atomic_umax_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @global_system_atomic_umax_i64(ptr addrspace(1) %ptr, i64 %val) { +; GFX1250-LABEL: global_system_atomic_umax_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_atomic_max_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr addrspace(1) %ptr, i64 %val monotonic + ret i64 %result +} + +define i16 @global_one_as_atomic_min_i16(ptr addrspace(1) %ptr, i16 %val) { +; GFX1250-LABEL: global_one_as_atomic_min_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: global_load_b32 v5, v[0:1], off +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB28_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_min_i16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB28_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} + +define i16 @global_one_as_atomic_umin_i16(ptr addrspace(1) %ptr, i16 %val) { +; GFX1250-LABEL: global_one_as_atomic_umin_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: global_load_b32 v5, v[0:1], off +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB29_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_min_u16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB29_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} + +define i16 @global_one_as_atomic_max_i16(ptr addrspace(1) %ptr, i16 %val) { +; GFX1250-LABEL: global_one_as_atomic_max_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: global_load_b32 v5, v[0:1], off +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB30_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_max_i16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB30_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} + +define i16 @global_one_as_atomic_umax_i16(ptr addrspace(1) %ptr, i16 %val) { +; GFX1250-LABEL: global_one_as_atomic_umax_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: global_load_b32 v5, v[0:1], off +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB31_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_max_u16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB31_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} + +define float @flat_system_atomic_fadd_f32(ptr %ptr, float %val) { +; GFX1250-LABEL: flat_system_atomic_fadd_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_add_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr %ptr, float %val monotonic + ret float %result +} + +define float @flat_one_as_atomic_fadd_f32(ptr %ptr, float %val) { +; GFX1250-LABEL: flat_one_as_atomic_fadd_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_add_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr %ptr, float %val syncscope("one-as") monotonic + ret float %result +} + +define double @flat_system_atomic_fadd_f64(ptr %ptr, double %val) { +; GFX1250-LABEL: flat_system_atomic_fadd_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base +; GFX1250-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB34_6 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1 +; GFX1250-NEXT: s_cbranch_execz .LBB34_3 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global +; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB34_3: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1 +; GFX1250-NEXT: s_cbranch_execz .LBB34_5 +; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB34_5: ; %Flow1 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB34_6: ; %Flow2 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB34_8 +; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3] +; GFX1250-NEXT: .LBB34_8: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr %ptr, double %val monotonic + ret double %result +} + +define double @flat_one_as_atomic_fadd_f64(ptr %ptr, double %val) { +; GFX1250-LABEL: flat_one_as_atomic_fadd_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base +; GFX1250-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB35_6 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1 +; GFX1250-NEXT: s_cbranch_execz .LBB35_3 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global +; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB35_3: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1 +; GFX1250-NEXT: s_cbranch_execz .LBB35_5 +; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB35_5: ; %Flow1 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB35_6: ; %Flow2 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB35_8 +; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3] +; GFX1250-NEXT: .LBB35_8: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fadd ptr %ptr, double %val syncscope("one-as") monotonic + ret double %result +} + +define float @flat_system_atomic_fmin_f32(ptr %ptr, float %val) { +; GFX1250-LABEL: flat_system_atomic_fmin_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_min_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr %ptr, float %val monotonic + ret float %result +} + +define float @flat_one_as_atomic_fmin_f32(ptr %ptr, float %val) { +; GFX1250-LABEL: flat_one_as_atomic_fmin_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_min_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr %ptr, float %val syncscope("one-as") monotonic + ret float %result +} + +define double @flat_system_atomic_fmin_f64(ptr %ptr, double %val) { +; GFX1250-LABEL: flat_system_atomic_fmin_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB38_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB38_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB38_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] +; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB38_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr %ptr, double %val monotonic + ret double %result +} + +define double @flat_one_as_atomic_fmin_f64(ptr %ptr, double %val) { +; GFX1250-LABEL: flat_one_as_atomic_fmin_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB39_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB39_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB39_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] +; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB39_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmin ptr %ptr, double %val syncscope("one-as") monotonic + ret double %result +} + +define float @flat_system_atomic_fmax_f32(ptr %ptr, float %val) { +; GFX1250-LABEL: flat_system_atomic_fmax_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_max_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr %ptr, float %val monotonic + ret float %result +} + +define float @flat_one_as_atomic_fmax_f32(ptr %ptr, float %val) { +; GFX1250-LABEL: flat_one_as_atomic_fmax_f32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_max_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr %ptr, float %val syncscope("one-as") monotonic + ret float %result +} + +define double @flat_system_atomic_fmax_f64(ptr %ptr, double %val) { +; GFX1250-LABEL: flat_system_atomic_fmax_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB42_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB42_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB42_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] +; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB42_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr %ptr, double %val monotonic + ret double %result +} + +define double @flat_one_as_atomic_fmax_f64(ptr %ptr, double %val) { +; GFX1250-LABEL: flat_one_as_atomic_fmax_f64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB43_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB43_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB43_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] +; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB43_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw fmax ptr %ptr, double %val syncscope("one-as") monotonic + ret double %result +} + +define i32 @flat_one_as_atomic_min_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_one_as_atomic_min_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_min_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @flat_system_atomic_min_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_system_atomic_min_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_min_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr %ptr, i32 %val monotonic + ret i32 %result +} + +define i32 @flat_one_as_atomic_max_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_one_as_atomic_max_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_max_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @flat_system_atomic_max_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_system_atomic_max_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_max_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr %ptr, i32 %val monotonic + ret i32 %result +} + +define i32 @flat_one_as_atomic_umin_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_one_as_atomic_umin_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_min_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @flat_system_atomic_umin_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_system_atomic_umin_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_min_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr %ptr, i32 %val monotonic + ret i32 %result +} + +define i32 @flat_one_as_atomic_umax_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_one_as_atomic_umax_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_max_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr %ptr, i32 %val syncscope("one-as") monotonic + ret i32 %result +} + +define i32 @flat_system_atomic_umax_i32(ptr %ptr, i32 %val) { +; GFX1250-LABEL: flat_system_atomic_umax_i32: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: flat_atomic_max_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr %ptr, i32 %val monotonic + ret i32 %result +} + +define i64 @flat_one_as_atomic_min_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_one_as_atomic_min_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB52_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB52_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB52_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_min_i64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB52_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @flat_system_atomic_min_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_system_atomic_min_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB53_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB53_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB53_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_min_i64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB53_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr %ptr, i64 %val monotonic + ret i64 %result +} + +define i64 @flat_one_as_atomic_max_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_one_as_atomic_max_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB54_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB54_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB54_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_i64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB54_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @flat_system_atomic_max_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_system_atomic_max_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB55_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB55_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB55_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_i64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB55_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr %ptr, i64 %val monotonic + ret i64 %result +} + +define i64 @flat_one_as_atomic_umin_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_one_as_atomic_umin_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB56_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB56_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB56_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_min_u64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB56_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @flat_system_atomic_umin_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_system_atomic_umin_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB57_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB57_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB57_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_min_u64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB57_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr %ptr, i64 %val monotonic + ret i64 %result +} + +define i64 @flat_one_as_atomic_umax_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_one_as_atomic_umax_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB58_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB58_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB58_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_u64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB58_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr %ptr, i64 %val syncscope("one-as") monotonic + ret i64 %result +} + +define i64 @flat_system_atomic_umax_i64(ptr %ptr, i64 %val) { +; GFX1250-LABEL: flat_system_atomic_umax_i64: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB59_2 +; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-NEXT: .LBB59_2: ; %Flow +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execz .LBB59_4 +; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private +; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_max_u64 v[0:1], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE +; GFX1250-NEXT: .LBB59_4: ; %atomicrmw.phi +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr %ptr, i64 %val monotonic + ret i64 %result +} + +define i16 @flat_one_as_atomic_min_i16(ptr %ptr, i16 %val) { +; GFX1250-LABEL: flat_one_as_atomic_min_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: flat_load_b32 v5, v[0:1] +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB60_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_min_i16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB60_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw min ptr %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} + +define i16 @flat_one_as_atomic_umin_i16(ptr %ptr, i16 %val) { +; GFX1250-LABEL: flat_one_as_atomic_umin_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: flat_load_b32 v5, v[0:1] +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB61_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_min_u16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB61_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umin ptr %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} + +define i16 @flat_one_as_atomic_max_i16(ptr %ptr, i16 %val) { +; GFX1250-LABEL: flat_one_as_atomic_max_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: flat_load_b32 v5, v[0:1] +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB62_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_max_i16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB62_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw max ptr %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} + +define i16 @flat_one_as_atomic_umax_i16(ptr %ptr, i16 %val) { +; GFX1250-LABEL: flat_one_as_atomic_umax_i16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v3, v0 +; GFX1250-NEXT: s_mov_b32 s0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3 +; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX1250-NEXT: flat_load_b32 v5, v[0:1] +; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_not_b32_e32 v4, v4 +; GFX1250-NEXT: .LBB63_1: ; %atomicrmw.start +; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7 +; GFX1250-NEXT: v_max_u16 v5, v5, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5 +; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB63_1 +; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %result = atomicrmw umax ptr %ptr, i16 %val syncscope("one-as") monotonic + ret i16 %result +} diff --git a/llvm/test/CodeGen/AMDGPU/empty-text.ll b/llvm/test/CodeGen/AMDGPU/empty-text.ll new file mode 100644 index 0000000..8aa8600 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/empty-text.ll @@ -0,0 +1,9 @@ +; Test that there is no s_code_end padding if .text is otherwise empty. + +; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 < %s | FileCheck %s --check-prefixes=GCN + +@globalVar = global i32 37 + +declare amdgpu_ps void @funcDecl() + +; GCN-NOT: .fill diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll index 2ff66c9..7d36c9f 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll @@ -252,13 +252,15 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-LABEL: flat_xchg_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB10_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -277,9 +279,11 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB10_2 ; GFX1250-SDAG-NEXT: .LBB10_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 @@ -292,15 +296,16 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB10_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -314,13 +319,16 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB10_2 ; GFX1250-GISEL-NEXT: .LBB10_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 @@ -344,11 +352,13 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB11_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -367,8 +377,11 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB11_2 ; GFX1250-SDAG-NEXT: .LBB11_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 @@ -381,18 +394,19 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB11_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -406,13 +420,16 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB11_2 ; GFX1250-GISEL-NEXT: .LBB11_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 @@ -433,11 +450,13 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB12_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -455,9 +474,11 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB12_2 ; GFX1250-SDAG-NEXT: .LBB12_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_endpgm ; @@ -465,13 +486,14 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB12_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -483,14 +505,17 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB12_2 ; GFX1250-GISEL-NEXT: .LBB12_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 @@ -508,10 +533,12 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB13_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -529,8 +556,11 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB13_2 ; GFX1250-SDAG-NEXT: .LBB13_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_endpgm ; @@ -538,16 +568,17 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB13_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -559,14 +590,17 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB13_2 ; GFX1250-GISEL-NEXT: .LBB13_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 @@ -642,13 +676,15 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_add_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB18_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -667,9 +703,11 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB18_2 ; GFX1250-SDAG-NEXT: .LBB18_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[2:3] @@ -683,15 +721,16 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB18_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -705,13 +744,16 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB18_2 ; GFX1250-GISEL-NEXT: .LBB18_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[4:5] @@ -736,11 +778,13 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB19_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -759,8 +803,11 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB19_2 ; GFX1250-SDAG-NEXT: .LBB19_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[2:3] @@ -774,18 +821,19 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB19_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -799,13 +847,16 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB19_2 ; GFX1250-GISEL-NEXT: .LBB19_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[4:5] @@ -827,11 +878,13 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB20_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -849,9 +902,11 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB20_2 ; GFX1250-SDAG-NEXT: .LBB20_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[2:3] @@ -862,13 +917,14 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB20_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -880,14 +936,17 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB20_2 ; GFX1250-GISEL-NEXT: .LBB20_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[4:5] @@ -908,10 +967,12 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB21_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -929,8 +990,11 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB21_2 ; GFX1250-SDAG-NEXT: .LBB21_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[2:3] @@ -941,16 +1005,17 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB21_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -962,14 +1027,17 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB21_2 ; GFX1250-GISEL-NEXT: .LBB21_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[4:5] @@ -1048,13 +1116,15 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_sub_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB26_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1073,9 +1143,11 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB26_2 ; GFX1250-SDAG-NEXT: .LBB26_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[2:3] @@ -1089,15 +1161,16 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB26_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1111,13 +1184,16 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB26_2 ; GFX1250-GISEL-NEXT: .LBB26_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[4:5] @@ -1142,11 +1218,13 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB27_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1165,8 +1243,11 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB27_2 ; GFX1250-SDAG-NEXT: .LBB27_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[2:3] @@ -1180,18 +1261,19 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB27_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1205,13 +1287,16 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB27_2 ; GFX1250-GISEL-NEXT: .LBB27_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[4:5] @@ -1233,11 +1318,13 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB28_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1255,9 +1342,11 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB28_2 ; GFX1250-SDAG-NEXT: .LBB28_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[2:3] @@ -1268,13 +1357,14 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB28_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1286,14 +1376,17 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB28_2 ; GFX1250-GISEL-NEXT: .LBB28_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[4:5] @@ -1314,10 +1407,12 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB29_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1335,8 +1430,11 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB29_2 ; GFX1250-SDAG-NEXT: .LBB29_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[2:3] @@ -1347,16 +1445,17 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB29_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1368,14 +1467,17 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB29_2 ; GFX1250-GISEL-NEXT: .LBB29_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[4:5] @@ -1454,13 +1556,15 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_and_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB34_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1479,9 +1583,11 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB34_2 ; GFX1250-SDAG-NEXT: .LBB34_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v3, v1, v3 @@ -1496,15 +1602,16 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB34_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1518,13 +1625,16 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB34_2 ; GFX1250-GISEL-NEXT: .LBB34_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, v0, v4 @@ -1550,11 +1660,13 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB35_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1573,8 +1685,11 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB35_2 ; GFX1250-SDAG-NEXT: .LBB35_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v3, v1, v3 @@ -1589,18 +1704,19 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB35_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1614,13 +1730,16 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB35_2 ; GFX1250-GISEL-NEXT: .LBB35_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, v0, v4 @@ -1643,11 +1762,13 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB36_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1665,9 +1786,11 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB36_2 ; GFX1250-SDAG-NEXT: .LBB36_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v1, v1, v3 @@ -1679,13 +1802,14 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB36_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1697,14 +1821,17 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB36_2 ; GFX1250-GISEL-NEXT: .LBB36_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v0, v4 @@ -1726,10 +1853,12 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB37_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1747,8 +1876,11 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB37_2 ; GFX1250-SDAG-NEXT: .LBB37_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v1, v1, v3 @@ -1760,16 +1892,17 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB37_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1781,14 +1914,17 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB37_2 ; GFX1250-GISEL-NEXT: .LBB37_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v0, v4 @@ -1868,13 +2004,15 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-LABEL: flat_or_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB42_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1893,9 +2031,11 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB42_2 ; GFX1250-SDAG-NEXT: .LBB42_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_or_b32_e32 v3, v1, v3 @@ -1910,15 +2050,16 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB42_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -1932,13 +2073,16 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB42_2 ; GFX1250-GISEL-NEXT: .LBB42_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_or_b32_e32 v2, v0, v4 @@ -1964,11 +2108,13 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB43_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1987,8 +2133,11 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB43_2 ; GFX1250-SDAG-NEXT: .LBB43_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_or_b32_e32 v3, v1, v3 @@ -2003,18 +2152,19 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB43_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2028,13 +2178,16 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB43_2 ; GFX1250-GISEL-NEXT: .LBB43_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_or_b32_e32 v2, v0, v4 @@ -2057,11 +2210,13 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB44_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2079,9 +2234,11 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB44_2 ; GFX1250-SDAG-NEXT: .LBB44_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_or_b32_e32 v1, v1, v3 @@ -2093,13 +2250,14 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB44_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2111,14 +2269,17 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB44_2 ; GFX1250-GISEL-NEXT: .LBB44_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_or_b32_e32 v0, v0, v4 @@ -2140,10 +2301,12 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB45_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2161,8 +2324,11 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB45_2 ; GFX1250-SDAG-NEXT: .LBB45_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_or_b32_e32 v1, v1, v3 @@ -2174,16 +2340,17 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB45_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2195,14 +2362,17 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB45_2 ; GFX1250-GISEL-NEXT: .LBB45_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_or_b32_e32 v0, v0, v4 @@ -2282,13 +2452,15 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_xor_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB50_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2307,9 +2479,11 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB50_2 ; GFX1250-SDAG-NEXT: .LBB50_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_xor_b32_e32 v3, v1, v3 @@ -2324,15 +2498,16 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB50_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2346,13 +2521,16 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB50_2 ; GFX1250-GISEL-NEXT: .LBB50_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_xor_b32_e32 v2, v0, v4 @@ -2378,11 +2556,13 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB51_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2401,8 +2581,11 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB51_2 ; GFX1250-SDAG-NEXT: .LBB51_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_xor_b32_e32 v3, v1, v3 @@ -2417,18 +2600,19 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB51_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2442,13 +2626,16 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB51_2 ; GFX1250-GISEL-NEXT: .LBB51_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_xor_b32_e32 v2, v0, v4 @@ -2471,11 +2658,13 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB52_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2493,9 +2682,11 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB52_2 ; GFX1250-SDAG-NEXT: .LBB52_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_xor_b32_e32 v1, v1, v3 @@ -2507,13 +2698,14 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB52_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2525,14 +2717,17 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB52_2 ; GFX1250-GISEL-NEXT: .LBB52_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v0, v4 @@ -2554,10 +2749,12 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB53_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2575,8 +2772,11 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB53_2 ; GFX1250-SDAG-NEXT: .LBB53_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_xor_b32_e32 v1, v1, v3 @@ -2588,16 +2788,17 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB53_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2609,14 +2810,17 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB53_2 ; GFX1250-GISEL-NEXT: .LBB53_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v0, v4 @@ -2690,13 +2894,15 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_max_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB58_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2715,10 +2921,12 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB58_2 ; GFX1250-SDAG-NEXT: .LBB58_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_i64 v[2:3], v[0:1], v[2:3] @@ -2732,15 +2940,16 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB58_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2753,15 +2962,18 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: .LBB58_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB58_2 ; GFX1250-GISEL-NEXT: .LBB58_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_i64 v[2:3], v[0:1], v[4:5] @@ -2786,11 +2998,13 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB59_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2809,9 +3023,12 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB59_2 ; GFX1250-SDAG-NEXT: .LBB59_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_i64 v[2:3], v[0:1], v[2:3] @@ -2825,18 +3042,19 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB59_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2849,15 +3067,18 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: .LBB59_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB59_2 ; GFX1250-GISEL-NEXT: .LBB59_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_i64 v[2:3], v[0:1], v[4:5] @@ -2879,11 +3100,13 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB60_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2900,9 +3123,11 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB60_2 ; GFX1250-SDAG-NEXT: .LBB60_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_i64 v[0:1], v[0:1], v[2:3] @@ -2913,13 +3138,14 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB60_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -2930,14 +3156,17 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: .LBB60_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v0, v[4:5], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB60_2 ; GFX1250-GISEL-NEXT: .LBB60_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_i64 v[0:1], v[0:1], v[4:5] @@ -2958,10 +3187,12 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB61_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2978,8 +3209,11 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB61_2 ; GFX1250-SDAG-NEXT: .LBB61_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_i64 v[0:1], v[0:1], v[2:3] @@ -2990,16 +3224,17 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB61_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3010,14 +3245,17 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: .LBB61_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v0, v[4:5], s[2:3] offset:-128 ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB61_2 ; GFX1250-GISEL-NEXT: .LBB61_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_i64 v[0:1], v[0:1], v[4:5] @@ -3090,13 +3328,15 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_min_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB66_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3115,10 +3355,12 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB66_2 ; GFX1250-SDAG-NEXT: .LBB66_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_i64 v[2:3], v[0:1], v[2:3] @@ -3132,15 +3374,16 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB66_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3153,15 +3396,18 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: .LBB66_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB66_2 ; GFX1250-GISEL-NEXT: .LBB66_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_i64 v[2:3], v[0:1], v[4:5] @@ -3186,11 +3432,13 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB67_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3209,9 +3457,12 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB67_2 ; GFX1250-SDAG-NEXT: .LBB67_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_i64 v[2:3], v[0:1], v[2:3] @@ -3225,18 +3476,19 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB67_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3249,15 +3501,18 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: .LBB67_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB67_2 ; GFX1250-GISEL-NEXT: .LBB67_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_i64 v[2:3], v[0:1], v[4:5] @@ -3279,11 +3534,13 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB68_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3300,9 +3557,11 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB68_2 ; GFX1250-SDAG-NEXT: .LBB68_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_i64 v[0:1], v[0:1], v[2:3] @@ -3313,13 +3572,14 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB68_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3330,14 +3590,17 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: .LBB68_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v0, v[4:5], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB68_2 ; GFX1250-GISEL-NEXT: .LBB68_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_i64 v[0:1], v[0:1], v[4:5] @@ -3358,10 +3621,12 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB69_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3378,8 +3643,11 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB69_2 ; GFX1250-SDAG-NEXT: .LBB69_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_i64 v[0:1], v[0:1], v[2:3] @@ -3390,16 +3658,17 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB69_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3410,14 +3679,17 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: .LBB69_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v0, v[4:5], s[2:3] offset:-128 ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB69_2 ; GFX1250-GISEL-NEXT: .LBB69_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_i64 v[0:1], v[0:1], v[4:5] @@ -3490,13 +3762,15 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-LABEL: flat_umax_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB74_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3515,10 +3789,12 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB74_2 ; GFX1250-SDAG-NEXT: .LBB74_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_u64 v[2:3], v[0:1], v[2:3] @@ -3532,15 +3808,16 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB74_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3553,15 +3830,18 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: .LBB74_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB74_2 ; GFX1250-GISEL-NEXT: .LBB74_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_u64 v[2:3], v[0:1], v[4:5] @@ -3586,11 +3866,13 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB75_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3609,9 +3891,12 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB75_2 ; GFX1250-SDAG-NEXT: .LBB75_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_u64 v[2:3], v[0:1], v[2:3] @@ -3625,18 +3910,19 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB75_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3649,15 +3935,18 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: .LBB75_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB75_2 ; GFX1250-GISEL-NEXT: .LBB75_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_u64 v[2:3], v[0:1], v[4:5] @@ -3679,11 +3968,13 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB76_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3700,9 +3991,11 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB76_2 ; GFX1250-SDAG-NEXT: .LBB76_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_u64 v[0:1], v[0:1], v[2:3] @@ -3713,13 +4006,14 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB76_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3730,14 +4024,17 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: .LBB76_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v0, v[4:5], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB76_2 ; GFX1250-GISEL-NEXT: .LBB76_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_u64 v[0:1], v[0:1], v[4:5] @@ -3758,10 +4055,12 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB77_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3778,8 +4077,11 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB77_2 ; GFX1250-SDAG-NEXT: .LBB77_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_u64 v[0:1], v[0:1], v[2:3] @@ -3790,16 +4092,17 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB77_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3810,14 +4113,17 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: .LBB77_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v0, v[4:5], s[2:3] offset:-128 ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB77_2 ; GFX1250-GISEL-NEXT: .LBB77_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_max_u64 v[0:1], v[0:1], v[4:5] @@ -3890,13 +4196,15 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-LABEL: flat_umin_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB82_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3915,10 +4223,12 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB82_2 ; GFX1250-SDAG-NEXT: .LBB82_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_u64 v[2:3], v[0:1], v[2:3] @@ -3932,15 +4242,16 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB82_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -3953,15 +4264,18 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: .LBB82_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB82_2 ; GFX1250-GISEL-NEXT: .LBB82_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_u64 v[2:3], v[0:1], v[4:5] @@ -3986,11 +4300,13 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB83_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4009,9 +4325,12 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB83_2 ; GFX1250-SDAG-NEXT: .LBB83_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_u64 v[2:3], v[0:1], v[2:3] @@ -4025,18 +4344,19 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB83_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4049,15 +4369,18 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: .LBB83_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB83_2 ; GFX1250-GISEL-NEXT: .LBB83_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_u64 v[2:3], v[0:1], v[4:5] @@ -4079,11 +4402,13 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB84_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4100,9 +4425,11 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB84_2 ; GFX1250-SDAG-NEXT: .LBB84_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_u64 v[0:1], v[0:1], v[2:3] @@ -4113,13 +4440,14 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB84_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4130,14 +4458,17 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: .LBB84_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v0, v[4:5], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB84_2 ; GFX1250-GISEL-NEXT: .LBB84_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_u64 v[0:1], v[0:1], v[4:5] @@ -4158,10 +4489,12 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB85_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4178,8 +4511,11 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB85_2 ; GFX1250-SDAG-NEXT: .LBB85_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_min_u64 v[0:1], v[0:1], v[2:3] @@ -4190,16 +4526,17 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB85_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4210,14 +4547,17 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: .LBB85_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v0, v[4:5], s[2:3] offset:-128 ; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB85_2 ; GFX1250-GISEL-NEXT: .LBB85_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_min_u64 v[0:1], v[0:1], v[4:5] @@ -4310,14 +4650,16 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v5, v4 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB90_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4338,9 +4680,11 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB90_2 ; GFX1250-SDAG-NEXT: .LBB90_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v2, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v8, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7] @@ -4356,15 +4700,16 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v7, v4 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v0, v5 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v0, s0, v3 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB90_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4380,13 +4725,16 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v[0:1], v5, v[6:9], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB90_2 ; GFX1250-GISEL-NEXT: .LBB90_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] @@ -4414,11 +4762,13 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB91_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4439,8 +4789,11 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB91_2 ; GFX1250-SDAG-NEXT: .LBB91_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v2, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v8, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7] @@ -4456,18 +4809,19 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v7, v4 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v5 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v0, s0, v3 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB91_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4483,13 +4837,16 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v[0:1], v5, v[6:9], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_SYS ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB91_2 ; GFX1250-GISEL-NEXT: .LBB91_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] @@ -4512,13 +4869,15 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_nortn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v5, v4 -; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v2 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB92_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4538,9 +4897,11 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB92_2 ; GFX1250-SDAG-NEXT: .LBB92_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7] @@ -4553,13 +4914,14 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2 ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB92_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4573,14 +4935,17 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v0, v[6:9], s[2:3] scope:SCOPE_SYS ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB92_2 ; GFX1250-GISEL-NEXT: .LBB92_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] @@ -4603,10 +4968,12 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v2 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB93_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4626,8 +4993,11 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB93_2 ; GFX1250-SDAG-NEXT: .LBB93_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7] @@ -4640,16 +5010,17 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2 ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB93_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4663,14 +5034,17 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v0, v[6:9], s[2:3] offset:-128 scope:SCOPE_SYS ; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB93_2 ; GFX1250-GISEL-NEXT: .LBB93_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] @@ -4742,13 +5116,15 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_inc_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB98_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4766,15 +5142,16 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB98_2 ; GFX1250-SDAG-NEXT: .LBB98_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], 1, v[0:1] ; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3] -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, 0, v5 :: v_dual_cndmask_b32 v2, 0, v4 ; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 @@ -4786,15 +5163,16 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB98_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4806,21 +5184,24 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_branch .LBB98_5 ; GFX1250-GISEL-NEXT: .LBB98_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB98_2 ; GFX1250-GISEL-NEXT: .LBB98_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 @@ -4843,11 +5224,13 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB99_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4865,14 +5248,16 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB99_2 ; GFX1250-SDAG-NEXT: .LBB99_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], 1, v[0:1] ; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3] -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, 0, v5 :: v_dual_cndmask_b32 v2, 0, v4 ; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 @@ -4884,18 +5269,19 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB99_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4907,21 +5293,24 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_branch .LBB99_5 ; GFX1250-GISEL-NEXT: .LBB99_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB99_2 ; GFX1250-GISEL-NEXT: .LBB99_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 @@ -4941,11 +5330,13 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB100_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4961,14 +5352,15 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB100_2 ; GFX1250-SDAG-NEXT: .LBB100_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], 1, v[0:1] ; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3] -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, 0, v5 :: v_dual_cndmask_b32 v0, 0, v4 ; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_endpgm @@ -4977,13 +5369,14 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB100_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -4993,20 +5386,23 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_endpgm ; GFX1250-GISEL-NEXT: .LBB100_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB100_2 ; GFX1250-GISEL-NEXT: .LBB100_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_endpgm @@ -5025,10 +5421,12 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB101_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -5044,13 +5442,15 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB101_2 ; GFX1250-SDAG-NEXT: .LBB101_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], 1, v[0:1] ; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3] -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, 0, v5 :: v_dual_cndmask_b32 v0, 0, v4 ; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_endpgm @@ -5059,16 +5459,17 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB101_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -5078,20 +5479,23 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_endpgm ; GFX1250-GISEL-NEXT: .LBB101_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB101_2 ; GFX1250-GISEL-NEXT: .LBB101_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_endpgm @@ -5161,13 +5565,15 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_dec_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB106_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -5185,10 +5591,12 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB106_2 ; GFX1250-SDAG-NEXT: .LBB106_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s0, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] @@ -5207,15 +5615,16 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB106_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -5227,15 +5636,18 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_branch .LBB106_5 ; GFX1250-GISEL-NEXT: .LBB106_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB106_2 ; GFX1250-GISEL-NEXT: .LBB106_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] @@ -5265,11 +5677,13 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5 +; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB107_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -5287,9 +5701,12 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB107_2 ; GFX1250-SDAG-NEXT: .LBB107_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s0, v4 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] @@ -5308,18 +5725,19 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7 +; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB107_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -5331,15 +5749,18 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_branch .LBB107_5 ; GFX1250-GISEL-NEXT: .LBB107_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB107_2 ; GFX1250-GISEL-NEXT: .LBB107_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo ; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] @@ -5366,11 +5787,13 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB108_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -5386,9 +5809,11 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB108_2 ; GFX1250-SDAG-NEXT: .LBB108_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] @@ -5404,13 +5829,14 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB108_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -5420,14 +5846,17 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_endpgm ; GFX1250-GISEL-NEXT: .LBB108_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB108_2 ; GFX1250-GISEL-NEXT: .LBB108_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] @@ -5453,10 +5882,12 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB109_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -5472,8 +5903,11 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB109_2 ; GFX1250-SDAG-NEXT: .LBB109_4: ; %atomicrmw.private +; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] @@ -5489,16 +5923,17 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base -; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3 +; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB109_3 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow @@ -5508,14 +5943,17 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_endpgm ; GFX1250-GISEL-NEXT: .LBB109_3: ; %atomicrmw.global ; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV -; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 +; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB109_2 ; GFX1250-GISEL-NEXT: .LBB109_4: ; %atomicrmw.private +; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1] diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll index b25d9b2..fc88839 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll @@ -3621,7 +3621,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX9-NEXT: s_mov_b32 s0, 0 ; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:4 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: s_movk_i32 s0, 0x3004 +; GFX9-NEXT: s_movk_i32 s0, 0x3000 +; GFX9-NEXT: s_add_i32 s0, s0, 4 ; GFX9-NEXT: v_mov_b32_e32 v0, 15 ; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:3712 ; GFX9-NEXT: s_waitcnt vmcnt(0) @@ -3637,7 +3638,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s9 ; GFX10-NEXT: v_mov_b32_e32 v0, 13 ; GFX10-NEXT: v_mov_b32_e32 v1, 15 -; GFX10-NEXT: s_movk_i32 s0, 0x3804 +; GFX10-NEXT: s_movk_i32 s0, 0x3800 +; GFX10-NEXT: s_add_i32 s0, s0, 4 ; GFX10-NEXT: scratch_store_dword off, v0, off offset:4 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: scratch_store_dword off, v1, s0 offset:1664 @@ -3682,7 +3684,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX9-PAL-NEXT: s_addc_u32 flat_scratch_hi, s13, 0 ; GFX9-PAL-NEXT: scratch_store_dword off, v0, s0 offset:4 ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) -; GFX9-PAL-NEXT: s_movk_i32 s0, 0x3004 +; GFX9-PAL-NEXT: s_movk_i32 s0, 0x3000 +; GFX9-PAL-NEXT: s_add_i32 s0, s0, 4 ; GFX9-PAL-NEXT: v_mov_b32_e32 v0, 15 ; GFX9-PAL-NEXT: scratch_store_dword off, v0, s0 offset:3712 ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) @@ -3716,8 +3719,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX1010-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13 ; GFX1010-PAL-NEXT: v_mov_b32_e32 v0, 13 ; GFX1010-PAL-NEXT: v_mov_b32_e32 v1, 15 +; GFX1010-PAL-NEXT: s_movk_i32 s0, 0x3800 ; GFX1010-PAL-NEXT: s_mov_b32 s1, 0 -; GFX1010-PAL-NEXT: s_movk_i32 s0, 0x3804 +; GFX1010-PAL-NEXT: s_add_i32 s0, s0, 4 ; GFX1010-PAL-NEXT: scratch_store_dword off, v0, s1 offset:4 ; GFX1010-PAL-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX1010-PAL-NEXT: scratch_store_dword off, v1, s0 offset:1664 @@ -3739,7 +3743,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() { ; GFX1030-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13 ; GFX1030-PAL-NEXT: v_mov_b32_e32 v0, 13 ; GFX1030-PAL-NEXT: v_mov_b32_e32 v1, 15 -; GFX1030-PAL-NEXT: s_movk_i32 s0, 0x3804 +; GFX1030-PAL-NEXT: s_movk_i32 s0, 0x3800 +; GFX1030-PAL-NEXT: s_add_i32 s0, s0, 4 ; GFX1030-PAL-NEXT: scratch_store_dword off, v0, off offset:4 ; GFX1030-PAL-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX1030-PAL-NEXT: scratch_store_dword off, v1, s0 offset:1664 @@ -3785,10 +3790,12 @@ define void @store_load_large_imm_offset_foo() { ; GFX9-LABEL: store_load_large_imm_offset_foo: ; GFX9: ; %bb.0: ; %bb ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_movk_i32 s0, 0x3000 ; GFX9-NEXT: v_mov_b32_e32 v0, 13 +; GFX9-NEXT: s_add_i32 s1, s32, s0 ; GFX9-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: s_add_i32 s0, s32, 0x3004 +; GFX9-NEXT: s_add_i32 s0, s1, 4 ; GFX9-NEXT: v_mov_b32_e32 v0, 15 ; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:3712 ; GFX9-NEXT: s_waitcnt vmcnt(0) @@ -3800,8 +3807,10 @@ define void @store_load_large_imm_offset_foo() { ; GFX10: ; %bb.0: ; %bb ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v0, 13 +; GFX10-NEXT: s_movk_i32 s0, 0x3800 ; GFX10-NEXT: v_mov_b32_e32 v1, 15 -; GFX10-NEXT: s_add_i32 s0, s32, 0x3804 +; GFX10-NEXT: s_add_i32 s1, s32, s0 +; GFX10-NEXT: s_add_i32 s0, s1, 4 ; GFX10-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: scratch_store_dword off, v1, s0 offset:1664 @@ -3843,10 +3852,12 @@ define void @store_load_large_imm_offset_foo() { ; GFX9-PAL-LABEL: store_load_large_imm_offset_foo: ; GFX9-PAL: ; %bb.0: ; %bb ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-PAL-NEXT: s_movk_i32 s0, 0x3000 ; GFX9-PAL-NEXT: v_mov_b32_e32 v0, 13 +; GFX9-PAL-NEXT: s_add_i32 s1, s32, s0 ; GFX9-PAL-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) -; GFX9-PAL-NEXT: s_add_i32 s0, s32, 0x3004 +; GFX9-PAL-NEXT: s_add_i32 s0, s1, 4 ; GFX9-PAL-NEXT: v_mov_b32_e32 v0, 15 ; GFX9-PAL-NEXT: scratch_store_dword off, v0, s0 offset:3712 ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) @@ -3872,8 +3883,10 @@ define void @store_load_large_imm_offset_foo() { ; GFX10-PAL: ; %bb.0: ; %bb ; GFX10-PAL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-PAL-NEXT: v_mov_b32_e32 v0, 13 +; GFX10-PAL-NEXT: s_movk_i32 s0, 0x3800 ; GFX10-PAL-NEXT: v_mov_b32_e32 v1, 15 -; GFX10-PAL-NEXT: s_add_i32 s0, s32, 0x3804 +; GFX10-PAL-NEXT: s_add_i32 s1, s32, s0 +; GFX10-PAL-NEXT: s_add_i32 s0, s1, 4 ; GFX10-PAL-NEXT: scratch_store_dword off, v0, s32 offset:4 ; GFX10-PAL-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-PAL-NEXT: scratch_store_dword off, v1, s0 offset:1664 diff --git a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir index 7fad2f4..a88b1ec 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir @@ -75,7 +75,8 @@ stack: body: | bb.0: ; CHECK-LABEL: name: fold_frame_index__s_add_i32__fi_materializedconst_0 - ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 %stack.0, 256, implicit-def $scc + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 256 + ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 %stack.0, [[S_MOV_B32_]], implicit-def $scc ; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]] ; CHECK-NEXT: SI_RETURN implicit $sgpr4 %0:sreg_32 = S_MOV_B32 %stack.0 diff --git a/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir b/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir index cc43142..2f2d727 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir @@ -46,7 +46,8 @@ body: | %2:sreg_32 = S_LSHL2_ADD_U32 %0, %1, implicit-def $scc ... # GCN-LABEL: name: test_frameindex{{$}} -# GCN: %1:sreg_32 = S_ADD_I32 %stack.0, 70 +# GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 70 +# GCN-NEXT: %1:sreg_32 = S_ADD_I32 %stack.0, [[S_MOV_B32_]] --- name: test_frameindex tracksRegLiveness: true diff --git a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll index f9a24fe..0cb2b0b 100644 --- a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll +++ b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll @@ -2102,23 +2102,10 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret(ptr addrspace(3) %ptr, do ; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x24 ; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x2c ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v2, s2 -; GFX1250-NEXT: s_mov_b32 s2, 0 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: .LBB51_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: v_mov_b32_e32 v2, s2 +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-NEXT: ds_add_f64 v2, v[0:1] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_add_f64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1] -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1] -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5] -; GFX1250-NEXT: s_or_b32 s2, vcc_lo, s2 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2 -; GFX1250-NEXT: s_cbranch_execnz .LBB51_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX1250-NEXT: s_endpgm main_body: %ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0) @@ -2148,24 +2135,9 @@ define double @local_atomic_fadd_f64_rtn(ptr addrspace(3) %ptr, double %data) { ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v2, v0 -; GFX1250-NEXT: v_mov_b32_e32 v4, v1 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: .LBB52_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[6:7], v[4:5] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[6:7] +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB52_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end -; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] main_body: %ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0) @@ -2197,24 +2169,11 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr ; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat: ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v2, s0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: .LBB53_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: v_mov_b32_e32 v2, s0 +; GFX1250-NEXT: ds_add_f64 v2, v[0:1] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_add_f64_e32 v[4:5], 4.0, v[0:1] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1] -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1] -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB53_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX1250-NEXT: s_endpgm main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0 @@ -2246,24 +2205,11 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3 ; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush: ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v2, s0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: .LBB54_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_add_f64_e32 v[4:5], 4.0, v[0:1] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1] +; GFX1250-NEXT: v_mov_b32_e32 v2, s0 +; GFX1250-NEXT: ds_add_f64 v2, v[0:1] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1] -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB54_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX1250-NEXT: s_endpgm main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0 @@ -2295,24 +2241,11 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp ; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe: ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v2, s0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: .LBB55_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_add_f64_e32 v[4:5], 4.0, v[0:1] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1] +; GFX1250-NEXT: v_mov_b32_e32 v2, s0 +; GFX1250-NEXT: ds_add_f64 v2, v[0:1] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1] -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB55_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX1250-NEXT: s_endpgm main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst @@ -2341,23 +2274,9 @@ define double @local_atomic_fadd_f64_rtn_pat(ptr addrspace(3) %ptr, double %data ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_mov_b32_e32 v2, v0 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: .LBB56_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_mov_b64_e32 v[4:5], v[0:1] -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_add_f64_e32 v[0:1], 4.0, v[4:5] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[4:5] +; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0 +; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB56_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end -; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] main_body: %ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0 @@ -2387,24 +2306,9 @@ define double @local_atomic_fadd_f64_rtn_ieee_unsafe(ptr addrspace(3) %ptr, doub ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v2, v0 -; GFX1250-NEXT: v_mov_b32_e32 v4, v1 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: .LBB57_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[6:7], v[4:5] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[6:7] +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB57_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end -; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] main_body: %ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0) @@ -2434,24 +2338,9 @@ define double @local_atomic_fadd_f64_rtn_ieee_safe(ptr addrspace(3) %ptr, double ; GFX1250: ; %bb.0: ; %main_body ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v2, v0 -; GFX1250-NEXT: v_mov_b32_e32 v4, v1 -; GFX1250-NEXT: ds_load_b64 v[0:1], v0 -; GFX1250-NEXT: s_mov_b32 s0, 0 -; GFX1250-NEXT: .LBB58_1: ; %atomicrmw.start -; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[6:7], v[4:5] -; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[6:7] +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3] ; GFX1250-NEXT: s_wait_dscnt 0x0 -; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7] -; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0 -; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execnz .LBB58_1 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end -; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] main_body: %ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0) diff --git a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll index 15cda62..f2fe61f 100644 --- a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll +++ b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll @@ -360,7 +360,8 @@ entry: ; s_add_i32. ; GCN-LABEL: {{^}}fi_sop2_s_add_u32_literal_error: -; GCN: s_add_u32 [[ADD_LO:s[0-9]+]], 0, 0x2010 +; GCN: s_movk_i32 [[S_MOVK_I32_:s[0-9]+]], 0x1000 +; GCN: s_add_u32 [[ADD_LO:s[0-9]+]], 0x1010, [[S_MOVK_I32_]] ; GCN: s_addc_u32 [[ADD_HI:s[0-9]+]], s{{[0-9]+}}, 0 define amdgpu_kernel void @fi_sop2_s_add_u32_literal_error() #0 { entry: diff --git a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir index c7767cb8..b53bde6 100644 --- a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir +++ b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir @@ -20,11 +20,32 @@ ret void } + define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_physreg_src2() #0 { + ret void + } + define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_src2_different_subreg() #0 { ret void } + define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first() #1 { + ret void + } + + define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second() #1 { + ret void + } + + define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first_physreg() #1 { + ret void + } + + define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second_physreg() #1 { + ret void + } + attributes #0 = { "amdgpu-wave-limiter"="true" "amdgpu-waves-per-eu"="8,8" } + attributes #1 = { "amdgpu-wave-limiter"="true" "amdgpu-waves-per-eu"="10,10" } ... # Inflate pattern, except the defining instruction isn't an MFMA. @@ -403,6 +424,89 @@ body: | ... +# Non-mac variant, src2 is a physical register +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_physreg_src2 +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_physreg_src2 + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + # Non-mac variant, src2 is the same VGPR, but a different subregister. --- name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_src2_different_subreg @@ -489,3 +593,423 @@ body: | S_ENDPGM 0 ... + +# There isn't an assignable AGPR around the first MFMA. +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2 + %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9 + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 or %4 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# There isn't an assignable AGPR around the second MFMA. +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2 + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec + S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9 + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 or %4 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# There isn't an assignable AGPR around the first MFMA, with physreg interference +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first_physreg +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first_physreg + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + ; CHECK-NEXT: S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + ; CHECK-NEXT: S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + ; CHECK-NEXT: S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + ; CHECK-NEXT: S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + ; CHECK-NEXT: S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + ; CHECK-NEXT: S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 or %4 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# There isn't an assignable AGPR around the second MFMA, physreg interference +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second_physreg +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second_physreg + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + ; CHECK-NEXT: S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + ; CHECK-NEXT: S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + ; CHECK-NEXT: S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + ; CHECK-NEXT: S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + ; CHECK-NEXT: S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + ; CHECK-NEXT: S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec + S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 + S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23 + S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 + S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39 + S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47 + S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55 + S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 or %4 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... diff --git a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir index b907c13..b59f2de 100644 --- a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir +++ b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir @@ -445,6 +445,86 @@ body: | ... + +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_two_chained_uses_cannot_rewrite_final_use +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_two_chained_uses_cannot_rewrite_final_use + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr2_vgpr3 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, killed $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, %0:vreg_512_align2 + S_ENDPGM 0 + +... + # There is a rewrite candidate, but it is used by another MFMA which # does not have a tied result. --- @@ -619,10 +699,9 @@ body: | S_ENDPGM 0 ... - -# There isn't an assignable AGPR around the first MFMA. +# Chain of 2 untied cases, but the use isn't in src2. --- -name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2 tracksRegLiveness: true machineFunctionInfo: isEntryFunction: true @@ -630,7 +709,7 @@ machineFunctionInfo: occupancy: 10 sgprForEXECCopy: '$sgpr100_sgpr101' body: | - ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2 ; CHECK: bb.0: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} @@ -647,10 +726,8 @@ body: | ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) - ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 - ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr4_vgpr5, $vgpr8_vgpr9, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc ; CHECK-NEXT: S_BRANCH %bb.2 ; CHECK-NEXT: {{ $}} @@ -685,10 +762,8 @@ body: | liveins: $vcc undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) - S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2 %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec - S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9 - %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %3.sub4_sub5, %3.sub8_sub9, undef %6:vreg_512_align2, 0, 0, 0, implicit $mode, implicit $exec S_CBRANCH_VCCNZ %bb.1, implicit $vcc S_BRANCH %bb.2 @@ -711,9 +786,10 @@ body: | ... -# There isn't an assignable AGPR around the second MFMA. +# Chain of 2 untied cases, but the second mfma is a different size and +# uses a subregister. --- -name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg tracksRegLiveness: true machineFunctionInfo: isEntryFunction: true @@ -721,7 +797,7 @@ machineFunctionInfo: occupancy: 10 sgprForEXECCopy: '$sgpr100_sgpr101' body: | - ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg ; CHECK: bb.0: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} @@ -739,18 +815,16 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 - ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63 + ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3 = V_MFMA_F32_16X16X16F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc ; CHECK-NEXT: S_BRANCH %bb.2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2: - ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF + ; CHECK-NEXT: liveins: $vgpr0_vgpr1_vgpr2_vgpr3 ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 - ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 @@ -758,10 +832,7 @@ body: | ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec - ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) - ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) - ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) - ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) ; CHECK-NEXT: S_ENDPGM 0 bb.0: S_NOP 0, implicit-def $agpr0 @@ -777,9 +848,7 @@ body: | undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec - S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2 - %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec - S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9 + %4:vreg_128_align2 = V_MFMA_F32_16X16X16F16_vgprcd_e64 %1, %1, %3.sub2_sub3_sub4_sub5, 0, 0, 0, implicit $mode, implicit $exec S_CBRANCH_VCCNZ %bb.1, implicit $vcc S_BRANCH %bb.2 @@ -794,6 +863,229 @@ body: | S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %5, %4, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + S_ENDPGM 0 + +... + +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_local_split +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + bb.0: + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_local_split + ; CHECK: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X8F16_mac_e64 killed $vgpr0_vgpr1, $vgpr0_vgpr1, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr6_vgpr7_vgpr8_vgpr9, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# Performs a split and inflate around the single instruction +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + bb.0: + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split + ; CHECK: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr2_vgpr3 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X8F16_mac_e64 killed $vgpr2_vgpr3, $vgpr2_vgpr3, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr6_vgpr7_vgpr8_vgpr9, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# Performs a split and inflate around the single instruction, non-tied case +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + bb.0: + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split + ; CHECK: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $agpr0_agpr1 + ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr6_vgpr7_vgpr8_vgpr9, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) @@ -802,9 +1094,11 @@ body: | ... -# Chain of 2 untied cases, but the use isn't in src2. +# This case does not fully use %0 after the MFMA. As a result, +# SplitKits insert a copy bundle for the subset of used lanes instead +# of a simple copy. --- -name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2 +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split_partial_uses_only tracksRegLiveness: true machineFunctionInfo: isEntryFunction: true @@ -812,7 +1106,447 @@ machineFunctionInfo: occupancy: 10 sgprForEXECCopy: '$sgpr100_sgpr101' body: | - ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2 + bb.0: + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split_partial_uses_only + ; CHECK: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $agpr0_agpr1 + ; CHECK-NEXT: renamable $vgpr2_vgpr3 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 killed $vgpr2_vgpr3, $vgpr2_vgpr3, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY renamable $vgpr0_vgpr1_vgpr2_vgpr3 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = COPY renamable $agpr0_agpr1_agpr2_agpr3 + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + S_ENDPGM 0 + +... + +# Untied version of previous. This case does not fully use %4 after +# the MFMA. As a result, SplitKits insert a copy bundle for the subset +# of used lanes instead of a simple copy, +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split_partial_uses_only +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + bb.0: + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split_partial_uses_only + ; CHECK: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $agpr0_agpr1 + ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY renamable $vgpr2_vgpr3_vgpr4_vgpr5 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = COPY renamable $agpr0_agpr1_agpr2_agpr3 + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + ; No VGPRs available for %4 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + S_ENDPGM 0 + +... + +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr10 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: renamable $vgpr11 = COPY renamable $vgpr10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) + ; CHECK-NEXT: renamable $vgpr6_vgpr7_vgpr8_vgpr9 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) + ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) + ; CHECK-NEXT: renamable $vgpr14_vgpr15_vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) + ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_1024_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_1024_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + %0.sub0_sub1_sub2_sub3:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) + %0.sub4_sub5_sub6_sub7:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) + %0.sub8_sub9_sub10_sub11:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) + %0.sub12_sub13_sub14_sub15:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) + %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15:vreg_1024_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +--- +name: chained_mfma_dst_user_is_vgpr +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: chained_mfma_dst_user_is_vgpr + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + successors: %bb.1(0x80000000) + + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + successors: %bb.1(0x40000000), %bb.2(0x40000000) + liveins: $vcc + + undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec + early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, %4 + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# TODO: In this trivial case, the single copy required is cheaper than +# the tuple copy. +--- +name: chained_mfma_dst_user_is_vgpr_small_subreg +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: chained_mfma_dst_user_is_vgpr_small_subreg + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: dead renamable $vgpr0 = nofpexcept V_CVT_F16_F32_e32 killed $vgpr0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + successors: %bb.1(0x80000000) + + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + successors: %bb.1(0x40000000), %bb.2(0x40000000) + liveins: $vcc + + undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec + early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + %5:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 %4.sub0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# Transitive user of the register is an MFMA with non-register src2 +--- +name: chained_mfma_dst_user_has_imm_src2 +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: chained_mfma_dst_user_has_imm_src2 ; CHECK: bb.0: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} @@ -830,7 +1564,8 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr4_vgpr5, $vgpr8_vgpr9, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $vgpr20_vgpr21_vgpr22_vgpr23 = V_MFMA_F32_4X4X4F16_vgprcd_e64 $vgpr20_vgpr21, $vgpr18_vgpr19, 0, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc ; CHECK-NEXT: S_BRANCH %bb.2 ; CHECK-NEXT: {{ $}} @@ -853,6 +1588,8 @@ body: | ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: S_ENDPGM 0 bb.0: + successors: %bb.1(0x80000000) + S_NOP 0, implicit-def $agpr0 renamable $sgpr0 = S_MOV_B32 0 undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec @@ -862,16 +1599,104 @@ body: | %0.sub9:vreg_512_align2 = COPY %0.sub8 bb.1: + successors: %bb.1(0x40000000), %bb.2(0x40000000) liveins: $vcc - undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) - %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec - %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %3.sub4_sub5, %3.sub8_sub9, undef %6:vreg_512_align2, 0, 0, 0, implicit $mode, implicit $exec + undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec + early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + %4.sub0_sub1_sub2_sub3:vreg_512_align2 = V_MFMA_F32_4X4X4F16_vgprcd_e64 %4.sub0_sub1, %1, 0, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc S_BRANCH %bb.2 bb.2: - ; No VGPRs available for %0 or %4 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 @@ -889,10 +1714,8 @@ body: | ... -# Chain of 2 untied cases, but the second mfma is a different size and -# uses a subregister. --- -name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user_with_vgpr_use tracksRegLiveness: true machineFunctionInfo: isEntryFunction: true @@ -900,7 +1723,7 @@ machineFunctionInfo: occupancy: 10 sgprForEXECCopy: '$sgpr100_sgpr101' body: | - ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user_with_vgpr_use ; CHECK: bb.0: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} @@ -908,26 +1731,27 @@ body: | ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 - ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1 ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19 + ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) - ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3 = V_MFMA_F32_16X16X16F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc ; CHECK-NEXT: S_BRANCH %bb.2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2: - ; CHECK-NEXT: liveins: $vgpr0_vgpr1_vgpr2_vgpr3 + ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 @@ -935,7 +1759,10 @@ body: | ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec - ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: S_ENDPGM 0 bb.0: S_NOP 0, implicit-def $agpr0 @@ -949,14 +1776,15 @@ body: | bb.1: liveins: $vcc - undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) - %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec - %4:vreg_128_align2 = V_MFMA_F32_16X16X16F16_vgprcd_e64 %1, %1, %3.sub2_sub3_sub4_sub5, 0, 0, 0, implicit $mode, implicit $exec + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, %4 S_CBRANCH_VCCNZ %bb.1, implicit $vcc S_BRANCH %bb.2 bb.2: - ; No VGPRs available for %0 or %4 + ; No VGPRs available for %0 S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 @@ -966,13 +1794,16 @@ body: | S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec - GLOBAL_STORE_DWORDX4_SADDR %5, %4, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) S_ENDPGM 0 ... --- -name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_has_tied_user tracksRegLiveness: true machineFunctionInfo: isEntryFunction: true @@ -980,32 +1811,115 @@ machineFunctionInfo: occupancy: 10 sgprForEXECCopy: '$sgpr100_sgpr101' body: | - ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_has_tied_user ; CHECK: bb.0: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 - ; CHECK-NEXT: renamable $vgpr10 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 + ; CHECK-NEXT: renamable $vgpr2_vgpr3 = COPY killed renamable $sgpr0_sgpr1 + ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $vcc, $vgpr2_vgpr3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) + ; CHECK-NEXT: early-clobber renamable $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr2_vgpr3, $vgpr2_vgpr3, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr2_vgpr3, $vgpr2_vgpr3, killed $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc + ; CHECK-NEXT: S_BRANCH %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: liveins: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19:0x00000000FFFFFFFF + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + ; CHECK-NEXT: S_ENDPGM 0 + bb.0: + S_NOP 0, implicit-def $agpr0 + renamable $sgpr0 = S_MOV_B32 0 + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec + renamable $sgpr1 = COPY renamable $sgpr0 + %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 + renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc + %0.sub9:vreg_512_align2 = COPY %0.sub8 + + bb.1: + liveins: $vcc + + undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec + %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %4, 0, 0, 0, implicit $mode, implicit $exec + S_CBRANCH_VCCNZ %bb.1, implicit $vcc + S_BRANCH %bb.2 + + bb.2: + ; No VGPRs available for %0 + S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23 + S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 + S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39 + S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 + S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 + S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1) + GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1) + S_ENDPGM 0 + +... + +# Non-mac variant, src2 is an immediate. +--- +name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_imm_src2 +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true + stackPtrOffsetReg: '$sgpr32' + occupancy: 10 + sgprForEXECCopy: '$sgpr100_sgpr101' +body: | + ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_imm_src2 + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0 + ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0 + ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0 ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1 ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc - ; CHECK-NEXT: renamable $vgpr11 = COPY renamable $vgpr10 + ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) ; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) - ; CHECK-NEXT: renamable $vgpr6_vgpr7_vgpr8_vgpr9 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) - ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) - ; CHECK-NEXT: renamable $vgpr14_vgpr15_vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1) - ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, 0, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc ; CHECK-NEXT: S_BRANCH %bb.2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2: - ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF + ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17:0x00000000FFFFFFFF ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 @@ -1025,20 +1939,16 @@ body: | bb.0: S_NOP 0, implicit-def $agpr0 renamable $sgpr0 = S_MOV_B32 0 - undef %0.sub8:vreg_1024_align2 = V_MOV_B32_e32 0, implicit $exec + undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec renamable $sgpr1 = COPY renamable $sgpr0 %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1 renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc - %0.sub9:vreg_1024_align2 = COPY %0.sub8 + %0.sub9:vreg_512_align2 = COPY %0.sub8 bb.1: liveins: $vcc - %0.sub0_sub1_sub2_sub3:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) - %0.sub4_sub5_sub6_sub7:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) - %0.sub8_sub9_sub10_sub11:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) - %0.sub12_sub13_sub14_sub15:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1) - %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15:vreg_1024_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15, 0, 0, 0, implicit $mode, implicit $exec + %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, 0, 0, 0, 0, implicit $mode, implicit $exec S_CBRANCH_VCCNZ %bb.1, implicit $vcc S_BRANCH %bb.2 diff --git a/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll b/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll index 1c298014..3001248 100644 --- a/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll +++ b/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll @@ -6,16 +6,24 @@ define amdgpu_gfx [13 x i32] @issue130120() { ; CHECK: ; %bb.0: ; %bb ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CHECK-NEXT: v_mov_b32_e32 v0, 0 -; CHECK-NEXT: s_add_i32 s0, s32, 0xf0 -; CHECK-NEXT: s_add_i32 s1, s32, 0xf4 -; CHECK-NEXT: s_add_i32 s2, s32, 0xf8 -; CHECK-NEXT: s_add_i32 s3, s32, 0xfc +; CHECK-NEXT: s_movk_i32 s1, 0xf4 +; CHECK-NEXT: s_movk_i32 s2, 0xf8 +; CHECK-NEXT: s_movk_i32 s3, 0xfc +; CHECK-NEXT: s_movk_i32 s34, 0x100 ; CHECK-NEXT: v_mov_b32_e32 v1, v0 -; CHECK-NEXT: s_add_i32 s34, s32, 0x100 -; CHECK-NEXT: s_add_i32 s35, s32, 0x104 -; CHECK-NEXT: s_add_i32 s36, s32, 0x108 -; CHECK-NEXT: s_add_i32 s37, s32, 0x110 -; CHECK-NEXT: s_add_i32 s38, s32, 0x120 +; CHECK-NEXT: s_movk_i32 s35, 0x104 +; CHECK-NEXT: s_movk_i32 s36, 0x108 +; CHECK-NEXT: s_movk_i32 s37, 0x110 +; CHECK-NEXT: s_movk_i32 s38, 0x120 +; CHECK-NEXT: s_add_i32 s0, s32, 0xf0 +; CHECK-NEXT: s_add_i32 s1, s32, s1 +; CHECK-NEXT: s_add_i32 s2, s32, s2 +; CHECK-NEXT: s_add_i32 s3, s32, s3 +; CHECK-NEXT: s_add_i32 s34, s32, s34 +; CHECK-NEXT: s_add_i32 s35, s32, s35 +; CHECK-NEXT: s_add_i32 s36, s32, s36 +; CHECK-NEXT: s_add_i32 s37, s32, s37 +; CHECK-NEXT: s_add_i32 s38, s32, s38 ; CHECK-NEXT: s_or_b32 s39, s32, 4 ; CHECK-NEXT: s_or_b32 s40, s32, 8 ; CHECK-NEXT: s_or_b32 s41, s32, 12 diff --git a/llvm/test/CodeGen/AMDGPU/literal64.ll b/llvm/test/CodeGen/AMDGPU/literal64.ll index 768c972..98691d3 100644 --- a/llvm/test/CodeGen/AMDGPU/literal64.ll +++ b/llvm/test/CodeGen/AMDGPU/literal64.ll @@ -67,24 +67,8 @@ define void @v_mov_b64_double(ptr addrspace(1) %ptr) { ; GCN: ; %bb.0: ; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: global_load_b64 v[4:5], v[0:1], off -; GCN-NEXT: s_mov_b32 s0, 0 -; GCN-NEXT: .LBB6_1: ; %atomicrmw.start -; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-NEXT: s_wait_loadcnt 0x0 -; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GCN-NEXT: v_add_f64_e32 v[2:3], lit64(0x4063233333333333), v[4:5] -; GCN-NEXT: global_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GCN-NEXT: s_wait_loadcnt 0x0 -; GCN-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5] -; GCN-NEXT: s_wait_xcnt 0x0 -; GCN-NEXT: v_mov_b64_e32 v[4:5], v[2:3] -; GCN-NEXT: s_or_b32 s0, vcc_lo, s0 -; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GCN-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; GCN-NEXT: s_cbranch_execnz .LBB6_1 -; GCN-NEXT: ; %bb.2: ; %atomicrmw.end -; GCN-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GCN-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4063233333333333) +; GCN-NEXT: global_atomic_add_f64 v[0:1], v[2:3], off scope:SCOPE_SYS ; GCN-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw fadd ptr addrspace(1) %ptr, double 153.1 monotonic ret void diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.gfx1250.w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.gfx1250.w32.ll index 1c7c625..1bf865c 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.gfx1250.w32.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.gfx1250.w32.ll @@ -2236,6 +2236,170 @@ bb: ret void } +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], v40, v41 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[42:43], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[42:43], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[42:43], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[42:43], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], v40, v41 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[42:43], v[24:27], off +; GISEL-NEXT: global_store_b128 v[42:43], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[42:43], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[42:43], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 1, i32 0, i32 %scale_src0, i32 1, i32 0, i32 %scale_src1, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_ss(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i32 inreg %scale_src0, i32 inreg %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_ss: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s0, s1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_ss: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s0, s1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 2, i32 1, i32 %scale_src0, i32 1, i32 2, i32 %scale_src1, i1 true, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_si_scale(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i32 inreg %scale_src0, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_si_scale: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: s_movk_i32 s1, 0x64 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s0, s1 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_si_scale: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_mov_b32_e32 v42, 0x64 +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s0, v42 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 3, i32 2, i32 %scale_src0, i32 0, i32 1, i32 100, i1 false, i1 true) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], v[40:41], v[42:43] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[44:45], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[44:45], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[44:45], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[44:45], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], v[40:41], v[42:43] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[44:45], v[24:27], off +; GISEL-NEXT: global_store_b128 v[44:45], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[44:45], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[44:45], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 1, i32 0, i64 %scale_src0, i32 1, i32 0, i64 %scale_src1, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_ss(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i64 inreg %scale_src0, i64 inreg %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_ss: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s[0:1], s[2:3] matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_ss: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s[0:1], s[2:3] matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 2, i32 1, i64 %scale_src0, i32 1, i32 2, i64 %scale_src1, i1 true, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_si_scale(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i64 inreg %scale_src0, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_si_scale: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: s_mov_b64 s[2:3], 0x64 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s[0:1], s[2:3] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_si_scale: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_mov_b64_e32 v[42:43], 0x64 +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s[0:1], v[42:43] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 3, i32 2, i64 %scale_src0, i32 0, i32 1, i64 100, i1 false, i1 true) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + define amdgpu_ps void @test_swmmac_f32_16x16x64_bf16(<16 x bfloat> %A, <32 x bfloat> %B, <8 x float> %C, i16 %Index, ptr addrspace(1) %out) { ; GFX1250-LABEL: test_swmmac_f32_16x16x64_bf16: ; GFX1250: ; %bb.0: ; %bb @@ -2573,6 +2737,8 @@ declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.fp8.bf8.v8f32.v16i32(<16 x i declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.fp8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.bf8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1) declare <16 x float> @llvm.amdgcn.wmma.f32.32x16x128.f4.v16i32.v8i32.v16f32(<16 x i32>, <8 x i32>, i16, <16 x float>) +declare <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i32, i32, i32, i32, i1, i1) +declare <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i64, i32, i32, i64, i1, i1) declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x64.bf16.v8f32.v16bf16.v32bf16.i16(i1, <16 x bfloat>, i1, <32 x bfloat>, <8 x float>, i16, i1, i1) declare <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x64.bf16.v8bf16.v16bf16.v32bf16.i16(i1, <16 x bfloat>, i1, <32 x bfloat>, <8 x bfloat>, i16, i1, i1) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imm.gfx1250.w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imm.gfx1250.w32.ll index e602c31..48303c0 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imm.gfx1250.w32.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imm.gfx1250.w32.ll @@ -2530,6 +2530,312 @@ bb: ret void } +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4(<16 x i32> %A, <8 x i32> %B, i32 inreg %scale_src0, i32 inreg %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], 1.0, s0, s1 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], 1.0, s0, s1 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off +; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i32 %scale_src0, i32 1, i32 0, i32 %scale_src1, i1 true, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_non_splat(<16 x i32> %A, <8 x i32> %B, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_non_splat: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_dual_mov_b32 v26, 1.0 :: v_dual_mov_b32 v27, 2.0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_dual_mov_b32 v28, v26 :: v_dual_mov_b32 v29, v26 +; GFX1250-NEXT: v_dual_mov_b32 v30, v26 :: v_dual_mov_b32 v31, v26 +; GFX1250-NEXT: v_dual_mov_b32 v32, v26 :: v_dual_mov_b32 v33, v26 +; GFX1250-NEXT: v_dual_mov_b32 v34, v26 :: v_dual_mov_b32 v35, v26 +; GFX1250-NEXT: v_dual_mov_b32 v36, v26 :: v_dual_mov_b32 v37, v26 +; GFX1250-NEXT: v_dual_mov_b32 v38, v26 :: v_dual_mov_b32 v39, v26 +; GFX1250-NEXT: v_dual_mov_b32 v40, v26 :: v_dual_mov_b32 v41, v26 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_non_splat: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: s_mov_b32 s0, 1.0 +; GISEL-NEXT: s_mov_b32 s1, 2.0 +; GISEL-NEXT: s_mov_b32 s14, s0 +; GISEL-NEXT: s_mov_b32 s15, s0 +; GISEL-NEXT: s_mov_b32 s2, s0 +; GISEL-NEXT: s_mov_b32 s3, s0 +; GISEL-NEXT: s_mov_b32 s4, s0 +; GISEL-NEXT: s_mov_b32 s5, s0 +; GISEL-NEXT: s_mov_b32 s6, s0 +; GISEL-NEXT: s_mov_b32 s7, s0 +; GISEL-NEXT: s_mov_b32 s8, s0 +; GISEL-NEXT: s_mov_b32 s9, s0 +; GISEL-NEXT: s_mov_b32 s10, s0 +; GISEL-NEXT: s_mov_b32 s11, s0 +; GISEL-NEXT: s_mov_b32 s12, s0 +; GISEL-NEXT: s_mov_b32 s13, s0 +; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[14:15] +; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[8:9] +; GISEL-NEXT: v_mov_b64_e32 v[32:33], s[6:7] +; GISEL-NEXT: v_mov_b64_e32 v[30:31], s[4:5] +; GISEL-NEXT: v_mov_b64_e32 v[28:29], s[2:3] +; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[0:1] +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off +; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 1.0, float 2.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i32 1, i32 1, i32 0, i32 2, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_non_inlineable(<16 x i32> %A, <8 x i32> %B, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_non_inlineable: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_mov_b32_e32 v26, 0x40400000 +; GFX1250-NEXT: s_movk_i32 s0, 0x65 +; GFX1250-NEXT: s_movk_i32 s1, 0x64 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_dual_mov_b32 v27, v26 :: v_dual_mov_b32 v28, v26 +; GFX1250-NEXT: v_dual_mov_b32 v29, v26 :: v_dual_mov_b32 v30, v26 +; GFX1250-NEXT: v_dual_mov_b32 v31, v26 :: v_dual_mov_b32 v32, v26 +; GFX1250-NEXT: v_dual_mov_b32 v33, v26 :: v_dual_mov_b32 v34, v26 +; GFX1250-NEXT: v_dual_mov_b32 v35, v26 :: v_dual_mov_b32 v36, v26 +; GFX1250-NEXT: v_dual_mov_b32 v37, v26 :: v_dual_mov_b32 v38, v26 +; GFX1250-NEXT: v_dual_mov_b32 v39, v26 :: v_dual_mov_b32 v40, v26 +; GFX1250-NEXT: v_mov_b32_e32 v41, v26 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], s1, s0 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_non_inlineable: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: s_mov_b32 s0, 0x40400000 +; GISEL-NEXT: v_mov_b32_e32 v42, 0x64 +; GISEL-NEXT: s_mov_b32 s14, s0 +; GISEL-NEXT: s_mov_b32 s15, s0 +; GISEL-NEXT: s_mov_b32 s1, s0 +; GISEL-NEXT: s_mov_b32 s2, s0 +; GISEL-NEXT: s_mov_b32 s3, s0 +; GISEL-NEXT: s_mov_b32 s4, s0 +; GISEL-NEXT: s_mov_b32 s5, s0 +; GISEL-NEXT: s_mov_b32 s6, s0 +; GISEL-NEXT: s_mov_b32 s7, s0 +; GISEL-NEXT: s_mov_b32 s8, s0 +; GISEL-NEXT: s_mov_b32 s9, s0 +; GISEL-NEXT: s_mov_b32 s10, s0 +; GISEL-NEXT: s_mov_b32 s11, s0 +; GISEL-NEXT: s_mov_b32 s12, s0 +; GISEL-NEXT: s_mov_b32 s13, s0 +; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[14:15] +; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[8:9] +; GISEL-NEXT: v_mov_b64_e32 v[32:33], s[6:7] +; GISEL-NEXT: v_mov_b64_e32 v[30:31], s[4:5] +; GISEL-NEXT: v_mov_b64_e32 v[28:29], s[2:3] +; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[0:1] +; GISEL-NEXT: v_mov_b32_e32 v43, 0x65 +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], v42, v43 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off +; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0>, i32 1, i32 0, i32 100, i32 1, i32 0, i32 101, i1 true, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4(<16 x i32> %A, <8 x i32> %B, i64 inreg %scale_src0, i64 inreg %scale_src1, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], 1.0, s[0:1], s[2:3] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], 1.0, s[0:1], s[2:3] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off +; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i64 %scale_src0, i32 1, i32 0, i64 %scale_src1, i1 true, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_non_splat(<16 x i32> %A, <8 x i32> %B, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_non_splat: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_dual_mov_b32 v26, 1.0 :: v_dual_mov_b32 v27, 2.0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_dual_mov_b32 v28, v26 :: v_dual_mov_b32 v29, v26 +; GFX1250-NEXT: v_dual_mov_b32 v30, v26 :: v_dual_mov_b32 v31, v26 +; GFX1250-NEXT: v_dual_mov_b32 v32, v26 :: v_dual_mov_b32 v33, v26 +; GFX1250-NEXT: v_dual_mov_b32 v34, v26 :: v_dual_mov_b32 v35, v26 +; GFX1250-NEXT: v_dual_mov_b32 v36, v26 :: v_dual_mov_b32 v37, v26 +; GFX1250-NEXT: v_dual_mov_b32 v38, v26 :: v_dual_mov_b32 v39, v26 +; GFX1250-NEXT: v_dual_mov_b32 v40, v26 :: v_dual_mov_b32 v41, v26 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_non_splat: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: s_mov_b32 s0, 1.0 +; GISEL-NEXT: s_mov_b32 s1, 2.0 +; GISEL-NEXT: s_mov_b32 s14, s0 +; GISEL-NEXT: s_mov_b32 s15, s0 +; GISEL-NEXT: s_mov_b32 s2, s0 +; GISEL-NEXT: s_mov_b32 s3, s0 +; GISEL-NEXT: s_mov_b32 s4, s0 +; GISEL-NEXT: s_mov_b32 s5, s0 +; GISEL-NEXT: s_mov_b32 s6, s0 +; GISEL-NEXT: s_mov_b32 s7, s0 +; GISEL-NEXT: s_mov_b32 s8, s0 +; GISEL-NEXT: s_mov_b32 s9, s0 +; GISEL-NEXT: s_mov_b32 s10, s0 +; GISEL-NEXT: s_mov_b32 s11, s0 +; GISEL-NEXT: s_mov_b32 s12, s0 +; GISEL-NEXT: s_mov_b32 s13, s0 +; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[14:15] +; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[8:9] +; GISEL-NEXT: v_mov_b64_e32 v[32:33], s[6:7] +; GISEL-NEXT: v_mov_b64_e32 v[30:31], s[4:5] +; GISEL-NEXT: v_mov_b64_e32 v[28:29], s[2:3] +; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[0:1] +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off +; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 1.0, float 2.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i64 1, i32 1, i32 0, i64 2, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_non_inlineable(<16 x i32> %A, <8 x i32> %B, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_non_inlineable: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_mov_b32_e32 v26, 0x40400000 +; GFX1250-NEXT: s_mov_b64 s[0:1], 0x65 +; GFX1250-NEXT: s_mov_b64 s[2:3], 0x64 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_dual_mov_b32 v27, v26 :: v_dual_mov_b32 v28, v26 +; GFX1250-NEXT: v_dual_mov_b32 v29, v26 :: v_dual_mov_b32 v30, v26 +; GFX1250-NEXT: v_dual_mov_b32 v31, v26 :: v_dual_mov_b32 v32, v26 +; GFX1250-NEXT: v_dual_mov_b32 v33, v26 :: v_dual_mov_b32 v34, v26 +; GFX1250-NEXT: v_dual_mov_b32 v35, v26 :: v_dual_mov_b32 v36, v26 +; GFX1250-NEXT: v_dual_mov_b32 v37, v26 :: v_dual_mov_b32 v38, v26 +; GFX1250-NEXT: v_dual_mov_b32 v39, v26 :: v_dual_mov_b32 v40, v26 +; GFX1250-NEXT: v_mov_b32_e32 v41, v26 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], s[2:3], s[0:1] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_non_inlineable: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: s_mov_b32 s0, 0x40400000 +; GISEL-NEXT: v_mov_b64_e32 v[42:43], 0x64 +; GISEL-NEXT: s_mov_b32 s14, s0 +; GISEL-NEXT: s_mov_b32 s15, s0 +; GISEL-NEXT: s_mov_b32 s1, s0 +; GISEL-NEXT: s_mov_b32 s2, s0 +; GISEL-NEXT: s_mov_b32 s3, s0 +; GISEL-NEXT: s_mov_b32 s4, s0 +; GISEL-NEXT: s_mov_b32 s5, s0 +; GISEL-NEXT: s_mov_b32 s6, s0 +; GISEL-NEXT: s_mov_b32 s7, s0 +; GISEL-NEXT: s_mov_b32 s8, s0 +; GISEL-NEXT: s_mov_b32 s9, s0 +; GISEL-NEXT: s_mov_b32 s10, s0 +; GISEL-NEXT: s_mov_b32 s11, s0 +; GISEL-NEXT: s_mov_b32 s12, s0 +; GISEL-NEXT: s_mov_b32 s13, s0 +; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[14:15] +; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[8:9] +; GISEL-NEXT: v_mov_b64_e32 v[32:33], s[6:7] +; GISEL-NEXT: v_mov_b64_e32 v[30:31], s[4:5] +; GISEL-NEXT: v_mov_b64_e32 v[28:29], s[2:3] +; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[0:1] +; GISEL-NEXT: v_mov_b64_e32 v[44:45], 0x65 +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], v[42:43], v[44:45] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off +; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16 +; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32 +; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0>, i32 1, i32 0, i64 100, i32 1, i32 0, i64 101, i1 true, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x4.f32.v8f32.v2f32(i1, <2 x float>, i1, <2 x float>, i16, <8 x float>, i1, i1) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.bf16.v8f32.v16bf16(i1, <16 x bfloat>, i1, <16 x bfloat>, i16, <8 x float>, i1, i1) declare <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x32.bf16.v8bf16.v16bf16(i1, <16 x bfloat>, i1, <16 x bfloat>, i16, <8 x bfloat>, i1, i1) @@ -2557,3 +2863,5 @@ declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.fp8.bf8.v8f32.v16i32(<16 x i declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.fp8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.bf8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1) declare <16 x float> @llvm.amdgcn.wmma.f32.32x16x128.f4.v16i32.v8i32.v16f32(<16 x i32>, <8 x i32>, i16, <16 x float>) +declare <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i32, i32, i32, i32, i1, i1) +declare <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i64, i32, i32, i64, i1, i1) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imod.gfx1250.w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imod.gfx1250.w32.ll index 14699ce..8f674f8 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imod.gfx1250.w32.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imod.gfx1250.w32.ll @@ -1882,6 +1882,162 @@ bb: ret void } +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_negC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_negC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_negC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 1, <16 x float> %C, i32 1, i32 0, i32 2, i32 1, i32 0, i32 4, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_neg_absC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_neg_absC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_neg_absC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 3, <16 x float> %C, i32 1, i32 0, i32 2, i32 1, i32 0, i32 4, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_ignoreC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_ignoreC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_ignoreC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 4, <16 x float> %C, i32 1, i32 0, i32 2, i32 1, i32 0, i32 4, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_negC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_negC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_negC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 1, <16 x float> %C, i32 1, i32 0, i64 2, i32 1, i32 0, i64 4, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_neg_absC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_neg_absC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_neg_absC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 3, <16 x float> %C, i32 1, i32 0, i64 2, i32 1, i32 0, i64 4, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_ignoreC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) { +; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_ignoreC: +; GFX1250: ; %bb.0: ; %bb +; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GFX1250-NEXT: s_clause 0x3 +; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off +; GFX1250-NEXT: s_endpgm +; +; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_ignoreC: +; GISEL: ; %bb.0: ; %bb +; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 +; GISEL-NEXT: s_clause 0x3 +; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off +; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16 +; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32 +; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48 +; GISEL-NEXT: s_endpgm +bb: + %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 4, <16 x float> %C, i32 1, i32 0, i64 2, i32 1, i32 0, i64 4, i1 false, i1 false) + store <16 x float> %res, ptr addrspace(1) %out + ret void +} + define amdgpu_ps void @test_swmmac_f32_16x16x64_bf16_negA(<16 x bfloat> %A, <32 x bfloat> %B, <8 x float> %C, i16 %Index, ptr addrspace(1) %out) { ; GFX1250-LABEL: test_swmmac_f32_16x16x64_bf16_negA: ; GFX1250: ; %bb.0: ; %bb @@ -2177,6 +2333,8 @@ declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.fp8.bf8.v8f32.v16i32(<16 x i declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.fp8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.bf8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1) declare <16 x float> @llvm.amdgcn.wmma.f32.32x16x128.f4.v16i32.v8i32.v16f32(<16 x i32>, <8 x i32>, i16, <16 x float>) +declare <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i32, i32, i32, i32, i1, i1) +declare <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i64, i32, i32, i64, i1, i1) declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x64.bf16.v8f32.v16bf16.v32bf16.i16(i1, <16 x bfloat>, i1, <32 x bfloat>, <8 x float>, i16, i1, i1) declare <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x64.bf16.v8bf16.v16bf16.v32bf16.i16(i1, <16 x bfloat>, i1, <32 x bfloat>, <8 x bfloat>, i16, i1, i1) diff --git a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll index a3ebaec..5f0ca7b 100644 --- a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll +++ b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll @@ -74,7 +74,8 @@ define amdgpu_kernel void @local_stack_offset_uses_sp(ptr addrspace(1) %out) { ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: s_cbranch_scc1 .LBB0_1 ; FLATSCR-NEXT: ; %bb.2: ; %split -; FLATSCR-NEXT: s_movk_i32 s0, 0x5000 +; FLATSCR-NEXT: s_movk_i32 s0, 0x2000 +; FLATSCR-NEXT: s_addk_i32 s0, 0x3000 ; FLATSCR-NEXT: scratch_load_dwordx2 v[0:1], off, s0 offset:208 glc ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: s_movk_i32 s0, 0x3000 @@ -175,7 +176,9 @@ define void @func_local_stack_offset_uses_sp(ptr addrspace(1) %out) { ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: s_cbranch_scc1 .LBB1_1 ; FLATSCR-NEXT: ; %bb.2: ; %split -; FLATSCR-NEXT: s_add_i32 s0, s33, 0x5000 +; FLATSCR-NEXT: s_movk_i32 s0, 0x2000 +; FLATSCR-NEXT: s_add_i32 s1, s33, s0 +; FLATSCR-NEXT: s_add_i32 s0, s1, 0x3000 ; FLATSCR-NEXT: scratch_load_dwordx2 v[2:3], off, s0 offset:208 glc ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: s_add_i32 s0, s33, 0x3000 @@ -223,30 +226,35 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(ptr addrspace(1) %out ; MUBUF-NEXT: s_waitcnt vmcnt(0) ; MUBUF-NEXT: s_cbranch_scc1 .LBB2_1 ; MUBUF-NEXT: ; %bb.2: ; %split +; MUBUF-NEXT: s_movk_i32 s5, 0x12d4 ; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000 -; MUBUF-NEXT: v_or_b32_e32 v0, 0x12d4, v1 +; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1 +; MUBUF-NEXT: s_movk_i32 s5, 0x12d0 ; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000 ; MUBUF-NEXT: s_movk_i32 s4, 0x4000 ; MUBUF-NEXT: buffer_load_dword v5, v0, s[0:3], 0 offen glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) -; MUBUF-NEXT: v_or_b32_e32 v0, 0x12d0, v1 +; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1 +; MUBUF-NEXT: s_movk_i32 s5, 0x12c4 ; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000 ; MUBUF-NEXT: s_or_b32 s4, s4, 0x12c0 ; MUBUF-NEXT: buffer_load_dword v4, v0, s[0:3], 0 offen glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) -; MUBUF-NEXT: v_or_b32_e32 v0, 0x12c4, v1 -; MUBUF-NEXT: v_mov_b32_e32 v3, 0x4000 +; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1 ; MUBUF-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) ; MUBUF-NEXT: v_mov_b32_e32 v0, s4 -; MUBUF-NEXT: v_or_b32_e32 v2, 0x12cc, v3 +; MUBUF-NEXT: s_movk_i32 s4, 0x12cc +; MUBUF-NEXT: v_mov_b32_e32 v3, 0x4000 +; MUBUF-NEXT: v_or_b32_e32 v2, s4, v3 +; MUBUF-NEXT: s_movk_i32 s4, 0x12c8 ; MUBUF-NEXT: v_mov_b32_e32 v6, 0x4000 ; MUBUF-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) ; MUBUF-NEXT: v_mov_b32_e32 v7, 0x4000 ; MUBUF-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen glc ; MUBUF-NEXT: s_waitcnt vmcnt(0) -; MUBUF-NEXT: v_or_b32_e32 v2, 0x12c8, v6 +; MUBUF-NEXT: v_or_b32_e32 v2, s4, v6 ; MUBUF-NEXT: v_mov_b32_e32 v8, 0x4000 ; MUBUF-NEXT: v_mov_b32_e32 v9, 0x4000 ; MUBUF-NEXT: buffer_load_dword v2, v2, s[0:3], 0 offen glc @@ -298,7 +306,8 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(ptr addrspace(1) %out ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: s_cbranch_scc1 .LBB2_1 ; FLATSCR-NEXT: ; %bb.2: ; %split -; FLATSCR-NEXT: s_movk_i32 s0, 0x3000 +; FLATSCR-NEXT: s_movk_i32 s0, 0x1000 +; FLATSCR-NEXT: s_addk_i32 s0, 0x2000 ; FLATSCR-NEXT: scratch_load_dwordx2 v[8:9], off, s0 offset:720 glc ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 offset:704 glc diff --git a/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll new file mode 100644 index 0000000..6d0aa1e --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll @@ -0,0 +1,108 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck %s + +define protected amdgpu_kernel void @no_folding_imm_to_inst_with_fi(<4 x i64> %val4, <16 x i64> %val16) { +; CHECK-LABEL: no_folding_imm_to_inst_with_fi: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: s_clause 0x2 +; CHECK-NEXT: s_load_b256 s[36:43], s[4:5], 0x24 +; CHECK-NEXT: s_load_b512 s[16:31], s[4:5], 0xe4 +; CHECK-NEXT: s_load_b512 s[0:15], s[4:5], 0xa4 +; CHECK-NEXT: s_mov_b64 s[34:35], src_private_base +; CHECK-NEXT: s_movk_i32 s33, 0x70 +; CHECK-NEXT: s_movk_i32 s34, 0x60 +; CHECK-NEXT: s_or_b32 s44, 0x80, s33 +; CHECK-NEXT: s_mov_b32 s45, s35 +; CHECK-NEXT: s_or_b32 s46, 0x80, s34 +; CHECK-NEXT: s_mov_b32 s47, s35 +; CHECK-NEXT: v_dual_mov_b32 v20, s44 :: v_dual_mov_b32 v21, s45 +; CHECK-NEXT: v_dual_mov_b32 v22, s46 :: v_dual_mov_b32 v23, s47 +; CHECK-NEXT: s_movk_i32 s34, 0x80 +; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; CHECK-NEXT: v_dual_mov_b32 v34, s34 :: v_dual_mov_b32 v35, s35 +; CHECK-NEXT: s_wait_kmcnt 0x0 +; CHECK-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v1, s41 +; CHECK-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43 +; CHECK-NEXT: v_dual_mov_b32 v4, s36 :: v_dual_mov_b32 v5, s37 +; CHECK-NEXT: v_dual_mov_b32 v6, s38 :: v_dual_mov_b32 v7, s39 +; CHECK-NEXT: scratch_store_b128 off, v[0:3], off offset:16 scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: v_dual_mov_b32 v0, s20 :: v_dual_mov_b32 v1, s21 +; CHECK-NEXT: s_movk_i32 s20, 0x50 +; CHECK-NEXT: v_dual_mov_b32 v8, s28 :: v_dual_mov_b32 v9, s29 +; CHECK-NEXT: v_dual_mov_b32 v10, s30 :: v_dual_mov_b32 v11, s31 +; CHECK-NEXT: s_wait_alu 0xfffe +; CHECK-NEXT: s_or_b32 s20, 0x80, s20 +; CHECK-NEXT: s_mov_b32 s21, s35 +; CHECK-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25 +; CHECK-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27 +; CHECK-NEXT: v_dual_mov_b32 v2, s22 :: v_dual_mov_b32 v3, s23 +; CHECK-NEXT: s_wait_alu 0xfffe +; CHECK-NEXT: v_dual_mov_b32 v25, s21 :: v_dual_mov_b32 v24, s20 +; CHECK-NEXT: scratch_store_b128 off, v[4:7], off scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[20:21], v[8:11] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[22:23], v[12:15] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[24:25], v[0:3] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v1, s17 +; CHECK-NEXT: s_or_b32 s16, 0x80, 64 +; CHECK-NEXT: s_mov_b32 s17, s35 +; CHECK-NEXT: v_dual_mov_b32 v4, s12 :: v_dual_mov_b32 v5, s13 +; CHECK-NEXT: s_or_b32 s12, 0x80, 48 +; CHECK-NEXT: s_mov_b32 s13, s35 +; CHECK-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9 +; CHECK-NEXT: s_or_b32 s8, 0x80, 32 +; CHECK-NEXT: s_mov_b32 s9, s35 +; CHECK-NEXT: v_dual_mov_b32 v12, s4 :: v_dual_mov_b32 v13, s5 +; CHECK-NEXT: s_or_b32 s4, 0x80, 16 +; CHECK-NEXT: s_mov_b32 s5, s35 +; CHECK-NEXT: v_dual_mov_b32 v2, s18 :: v_dual_mov_b32 v3, s19 +; CHECK-NEXT: s_wait_alu 0xfffe +; CHECK-NEXT: v_dual_mov_b32 v27, s17 :: v_dual_mov_b32 v26, s16 +; CHECK-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s15 +; CHECK-NEXT: v_dual_mov_b32 v29, s13 :: v_dual_mov_b32 v28, s12 +; CHECK-NEXT: v_dual_mov_b32 v31, s9 :: v_dual_mov_b32 v30, s8 +; CHECK-NEXT: v_dual_mov_b32 v33, s5 :: v_dual_mov_b32 v32, s4 +; CHECK-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11 +; CHECK-NEXT: v_dual_mov_b32 v14, s6 :: v_dual_mov_b32 v15, s7 +; CHECK-NEXT: v_dual_mov_b32 v16, s0 :: v_dual_mov_b32 v17, s1 +; CHECK-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v19, s3 +; CHECK-NEXT: flat_store_b128 v[26:27], v[0:3] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[28:29], v[4:7] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[30:31], v[8:11] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[32:33], v[12:15] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_store_b128 v[34:35], v[16:19] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[22:23] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[26:27] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[24:25] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[30:31] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[28:29] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[34:35] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 +; CHECK-NEXT: flat_load_b128 v[0:3], v[32:33] scope:SCOPE_SYS +; CHECK-NEXT: s_wait_loadcnt 0x0 +; CHECK-NEXT: s_endpgm +bb: + %alloca = alloca <4 x i64>, align 32, addrspace(5) + %alloca1 = alloca <16 x i64>, align 128, addrspace(5) + store volatile <4 x i64> %val4, ptr addrspace(5) %alloca + %ascast = addrspacecast ptr addrspace(5) %alloca1 to ptr + store volatile <16 x i64> %val16, ptr %ascast + %load = load volatile <16 x i64>, ptr %ascast + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll b/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll index 131c5f3..f67cbe3 100644 --- a/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll +++ b/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll @@ -10,6 +10,8 @@ ; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-vopd=0 < %s | FileCheck -check-prefixes=GETREG,GETREG-GISEL -check-prefix=GCN %s ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GCN,GFX12 %s ; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GCN,GFX12 %s +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250 %s +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250 %s declare i64 @llvm.readcyclecounter() #0 @@ -21,6 +23,7 @@ declare i64 @llvm.readcyclecounter() #0 ; GFX12: s_getreg_b32 [[HI2:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_HI) ; GFX12: s_cmp_eq_u32 [[HI1]], [[HI2]] ; GFX12: s_cselect_b32 {{s[0-9]+}}, [[LO1]], 0 +; GFX1250: s_get_shader_cycles_u64 s{{\[[0-9]+:[0-9]+\]}} ; GCN-DAG: kmcnt ; MEMTIME: store_dwordx2 ; SIVI-NOT: kmcnt @@ -53,6 +56,7 @@ define amdgpu_kernel void @test_readcyclecounter(ptr addrspace(1) %out) #0 { ; GFX12: s_getreg_b32 [[HI1:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_HI) ; GFX12: s_getreg_b32 [[LO1:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_LO) ; GFX12: s_getreg_b32 [[HI2:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_HI) +; GFX1250: s_get_shader_cycles_u64 s{{\[[0-9]+:[0-9]+\]}} ; GCN-DAG: s_load_{{dword|b32|b64}} ; GETREG-DAG: s_getreg_b32 s{{[0-9]+}}, hwreg(HW_REG_SHADER_CYCLES, 0, 20) ; GFX12: s_cmp_eq_u32 [[HI1]], [[HI2]] diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll index 0c6339e..0b43ff2 100644 --- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mcpu=gfx90a < %s | FileCheck %s +; RUN: llc -mcpu=gfx942 -amdgpu-mfma-vgpr-form < %s | FileCheck %s target triple = "amdgcn-amd-amdhsa" @@ -7,7 +7,10 @@ define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma(ptr addrsp ; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma: ; CHECK: ; %bb.0: ; %bb ; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0 +; CHECK-NEXT: v_mov_b32_e32 v32, 1.0 +; CHECK-NEXT: v_mov_b32_e32 v33, 2.0 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-NEXT: global_load_dwordx4 v[28:31], v0, s[0:1] offset:112 ; CHECK-NEXT: global_load_dwordx4 v[24:27], v0, s[0:1] offset:96 @@ -18,117 +21,58 @@ define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma(ptr addrsp ; CHECK-NEXT: global_load_dwordx4 v[4:7], v0, s[0:1] offset:16 ; CHECK-NEXT: s_nop 0 ; CHECK-NEXT: global_load_dwordx4 v[0:3], v0, s[0:1] +; CHECK-NEXT: v_accvgpr_write_b32 a0, 1.0 +; CHECK-NEXT: v_accvgpr_write_b32 a1, 2.0 ; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_accvgpr_write_b32 a0, v0 -; CHECK-NEXT: v_accvgpr_write_b32 a1, v1 -; CHECK-NEXT: v_accvgpr_write_b32 a2, v2 -; CHECK-NEXT: v_accvgpr_write_b32 a3, v3 -; CHECK-NEXT: v_accvgpr_write_b32 a4, v4 -; CHECK-NEXT: v_accvgpr_write_b32 a5, v5 -; CHECK-NEXT: v_accvgpr_write_b32 a6, v6 -; CHECK-NEXT: v_accvgpr_write_b32 a7, v7 -; CHECK-NEXT: v_accvgpr_write_b32 a8, v8 -; CHECK-NEXT: v_accvgpr_write_b32 a9, v9 -; CHECK-NEXT: v_accvgpr_write_b32 a10, v10 -; CHECK-NEXT: v_accvgpr_write_b32 a11, v11 -; CHECK-NEXT: v_accvgpr_write_b32 a12, v12 -; CHECK-NEXT: v_accvgpr_write_b32 a13, v13 -; CHECK-NEXT: v_accvgpr_write_b32 a14, v14 -; CHECK-NEXT: v_accvgpr_write_b32 a15, v15 -; CHECK-NEXT: v_accvgpr_write_b32 a16, v16 -; CHECK-NEXT: v_accvgpr_write_b32 a17, v17 -; CHECK-NEXT: v_accvgpr_write_b32 a18, v18 -; CHECK-NEXT: v_accvgpr_write_b32 a19, v19 -; CHECK-NEXT: v_accvgpr_write_b32 a20, v20 -; CHECK-NEXT: v_accvgpr_write_b32 a21, v21 -; CHECK-NEXT: v_accvgpr_write_b32 a22, v22 -; CHECK-NEXT: v_accvgpr_write_b32 a23, v23 -; CHECK-NEXT: v_accvgpr_write_b32 a24, v24 -; CHECK-NEXT: v_accvgpr_write_b32 a25, v25 -; CHECK-NEXT: v_accvgpr_write_b32 a26, v26 -; CHECK-NEXT: v_accvgpr_write_b32 a27, v27 -; CHECK-NEXT: v_accvgpr_write_b32 a28, v28 -; CHECK-NEXT: v_accvgpr_write_b32 a29, v29 -; CHECK-NEXT: v_accvgpr_write_b32 a30, v30 -; CHECK-NEXT: v_accvgpr_write_b32 a31, v31 -; CHECK-NEXT: v_mov_b32_e32 v0, 1.0 -; CHECK-NEXT: v_mov_b32_e32 v1, 2.0 -; CHECK-NEXT: s_nop 1 -; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31] -; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v0, v1, a[0:31] +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[32:63], a0, a1, v[0:31] ; CHECK-NEXT: s_nop 7 ; CHECK-NEXT: s_nop 7 -; CHECK-NEXT: s_nop 2 -; CHECK-NEXT: v_accvgpr_read_b32 v4, a59 -; CHECK-NEXT: v_accvgpr_read_b32 v5, a58 -; CHECK-NEXT: v_accvgpr_read_b32 v6, a57 -; CHECK-NEXT: v_accvgpr_read_b32 v7, a56 -; CHECK-NEXT: v_accvgpr_read_b32 v8, a55 -; CHECK-NEXT: v_accvgpr_read_b32 v9, a54 -; CHECK-NEXT: v_accvgpr_read_b32 v10, a53 -; CHECK-NEXT: v_accvgpr_read_b32 v11, a52 -; CHECK-NEXT: v_accvgpr_read_b32 v12, a51 -; CHECK-NEXT: v_accvgpr_read_b32 v13, a50 -; CHECK-NEXT: v_accvgpr_read_b32 v14, a49 -; CHECK-NEXT: v_accvgpr_read_b32 v15, a48 -; CHECK-NEXT: v_accvgpr_read_b32 v16, a47 -; CHECK-NEXT: v_accvgpr_read_b32 v17, a46 -; CHECK-NEXT: v_accvgpr_read_b32 v18, a45 -; CHECK-NEXT: v_accvgpr_read_b32 v19, a44 -; CHECK-NEXT: v_accvgpr_read_b32 v20, a43 -; CHECK-NEXT: v_accvgpr_read_b32 v21, a42 -; CHECK-NEXT: v_accvgpr_read_b32 v22, a41 -; CHECK-NEXT: v_accvgpr_read_b32 v23, a40 -; CHECK-NEXT: v_accvgpr_read_b32 v24, a39 -; CHECK-NEXT: v_accvgpr_read_b32 v25, a38 -; CHECK-NEXT: v_accvgpr_read_b32 v26, a37 -; CHECK-NEXT: v_accvgpr_read_b32 v27, a36 -; CHECK-NEXT: v_accvgpr_read_b32 v28, a35 -; CHECK-NEXT: v_accvgpr_read_b32 v29, a34 -; CHECK-NEXT: v_accvgpr_mov_b32 a2, a32 -; CHECK-NEXT: v_accvgpr_mov_b32 a3, a33 -; CHECK-NEXT: v_accvgpr_write_b32 a4, v29 -; CHECK-NEXT: v_accvgpr_write_b32 a5, v28 -; CHECK-NEXT: v_accvgpr_write_b32 a6, v27 -; CHECK-NEXT: v_accvgpr_write_b32 a7, v26 -; CHECK-NEXT: v_accvgpr_write_b32 a8, v25 -; CHECK-NEXT: v_accvgpr_write_b32 a9, v24 -; CHECK-NEXT: v_accvgpr_write_b32 a10, v23 -; CHECK-NEXT: v_accvgpr_write_b32 a11, v22 -; CHECK-NEXT: v_accvgpr_write_b32 a12, v21 -; CHECK-NEXT: v_accvgpr_write_b32 a13, v20 -; CHECK-NEXT: v_accvgpr_write_b32 a14, v19 -; CHECK-NEXT: v_accvgpr_write_b32 a15, v18 -; CHECK-NEXT: v_accvgpr_write_b32 a16, v17 -; CHECK-NEXT: v_accvgpr_write_b32 a17, v16 -; CHECK-NEXT: v_accvgpr_write_b32 a18, v15 -; CHECK-NEXT: v_accvgpr_write_b32 a19, v14 -; CHECK-NEXT: v_accvgpr_write_b32 a20, v13 -; CHECK-NEXT: v_accvgpr_write_b32 a21, v12 -; CHECK-NEXT: v_accvgpr_write_b32 a22, v11 -; CHECK-NEXT: v_accvgpr_write_b32 a23, v10 -; CHECK-NEXT: v_accvgpr_write_b32 a24, v9 -; CHECK-NEXT: v_accvgpr_write_b32 a25, v8 -; CHECK-NEXT: v_accvgpr_write_b32 a26, v7 -; CHECK-NEXT: v_accvgpr_write_b32 a27, v6 -; CHECK-NEXT: v_accvgpr_write_b32 a28, v5 -; CHECK-NEXT: v_accvgpr_write_b32 a29, v4 -; CHECK-NEXT: v_accvgpr_mov_b32 a30, a60 -; CHECK-NEXT: v_accvgpr_mov_b32 a31, a61 ; CHECK-NEXT: s_nop 1 -; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31] -; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: v_mov_b32_e32 v2, v32 +; CHECK-NEXT: v_mov_b32_e32 v3, v33 +; CHECK-NEXT: v_mov_b32_e32 v4, v34 +; CHECK-NEXT: v_mov_b32_e32 v5, v35 +; CHECK-NEXT: v_mov_b32_e32 v6, v36 +; CHECK-NEXT: v_mov_b32_e32 v7, v37 +; CHECK-NEXT: v_mov_b32_e32 v8, v38 +; CHECK-NEXT: v_mov_b32_e32 v9, v39 +; CHECK-NEXT: v_mov_b32_e32 v10, v40 +; CHECK-NEXT: v_mov_b32_e32 v11, v41 +; CHECK-NEXT: v_mov_b32_e32 v12, v42 +; CHECK-NEXT: v_mov_b32_e32 v13, v43 +; CHECK-NEXT: v_mov_b32_e32 v14, v44 +; CHECK-NEXT: v_mov_b32_e32 v15, v45 +; CHECK-NEXT: v_mov_b32_e32 v16, v46 +; CHECK-NEXT: v_mov_b32_e32 v17, v47 +; CHECK-NEXT: v_mov_b32_e32 v18, v48 +; CHECK-NEXT: v_mov_b32_e32 v19, v49 +; CHECK-NEXT: v_mov_b32_e32 v20, v50 +; CHECK-NEXT: v_mov_b32_e32 v21, v51 +; CHECK-NEXT: v_mov_b32_e32 v22, v52 +; CHECK-NEXT: v_mov_b32_e32 v23, v53 +; CHECK-NEXT: v_mov_b32_e32 v24, v54 +; CHECK-NEXT: v_mov_b32_e32 v25, v55 +; CHECK-NEXT: v_mov_b32_e32 v26, v56 +; CHECK-NEXT: v_mov_b32_e32 v27, v57 +; CHECK-NEXT: v_mov_b32_e32 v28, v58 +; CHECK-NEXT: v_mov_b32_e32 v29, v59 +; CHECK-NEXT: v_mov_b32_e32 v30, v60 +; CHECK-NEXT: v_mov_b32_e32 v31, v61 +; CHECK-NEXT: v_mov_b32_e32 v32, 0 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], a0, a1, v[0:31] ; CHECK-NEXT: s_nop 7 ; CHECK-NEXT: s_nop 7 ; CHECK-NEXT: s_nop 1 -; CHECK-NEXT: global_store_dwordx4 v0, a[24:27], s[0:1] offset:96 -; CHECK-NEXT: global_store_dwordx4 v0, a[28:31], s[0:1] offset:112 -; CHECK-NEXT: global_store_dwordx4 v0, a[16:19], s[0:1] offset:64 -; CHECK-NEXT: global_store_dwordx4 v0, a[20:23], s[0:1] offset:80 -; CHECK-NEXT: global_store_dwordx4 v0, a[8:11], s[0:1] offset:32 -; CHECK-NEXT: global_store_dwordx4 v0, a[12:15], s[0:1] offset:48 -; CHECK-NEXT: global_store_dwordx4 v0, a[0:3], s[0:1] -; CHECK-NEXT: global_store_dwordx4 v0, a[4:7], s[0:1] offset:16 +; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96 +; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112 +; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64 +; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80 +; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32 +; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48 +; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1] +; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16 ; CHECK-NEXT: s_endpgm bb: %id = call i32 @llvm.amdgcn.workitem.id.x() @@ -146,35 +90,36 @@ define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_noshuffle( ; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_noshuffle: ; CHECK: ; %bb.0: ; %bb ; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0 -; CHECK-NEXT: v_mov_b32_e32 v1, 2.0 +; CHECK-NEXT: v_mov_b32_e32 v32, 1.0 +; CHECK-NEXT: v_mov_b32_e32 v33, 2.0 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: global_load_dwordx4 a[28:31], v0, s[0:1] offset:112 -; CHECK-NEXT: global_load_dwordx4 a[24:27], v0, s[0:1] offset:96 -; CHECK-NEXT: global_load_dwordx4 a[20:23], v0, s[0:1] offset:80 -; CHECK-NEXT: global_load_dwordx4 a[16:19], v0, s[0:1] offset:64 -; CHECK-NEXT: global_load_dwordx4 a[12:15], v0, s[0:1] offset:48 -; CHECK-NEXT: global_load_dwordx4 a[8:11], v0, s[0:1] offset:32 -; CHECK-NEXT: global_load_dwordx4 a[4:7], v0, s[0:1] offset:16 -; CHECK-NEXT: global_load_dwordx4 a[0:3], v0, s[0:1] -; CHECK-NEXT: v_mov_b32_e32 v0, 1.0 -; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: global_load_dwordx4 v[28:31], v0, s[0:1] offset:112 +; CHECK-NEXT: global_load_dwordx4 v[24:27], v0, s[0:1] offset:96 +; CHECK-NEXT: global_load_dwordx4 v[20:23], v0, s[0:1] offset:80 +; CHECK-NEXT: global_load_dwordx4 v[16:19], v0, s[0:1] offset:64 +; CHECK-NEXT: global_load_dwordx4 v[12:15], v0, s[0:1] offset:48 +; CHECK-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1] offset:32 +; CHECK-NEXT: global_load_dwordx4 v[4:7], v0, s[0:1] offset:16 ; CHECK-NEXT: s_nop 0 -; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31] -; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31] -; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31] -; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: global_load_dwordx4 v[0:3], v0, s[0:1] +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mov_b32_e32 v32, 0 ; CHECK-NEXT: s_nop 7 ; CHECK-NEXT: s_nop 7 -; CHECK-NEXT: s_nop 1 -; CHECK-NEXT: global_store_dwordx4 v0, a[24:27], s[0:1] offset:96 -; CHECK-NEXT: global_store_dwordx4 v0, a[28:31], s[0:1] offset:112 -; CHECK-NEXT: global_store_dwordx4 v0, a[16:19], s[0:1] offset:64 -; CHECK-NEXT: global_store_dwordx4 v0, a[20:23], s[0:1] offset:80 -; CHECK-NEXT: global_store_dwordx4 v0, a[8:11], s[0:1] offset:32 -; CHECK-NEXT: global_store_dwordx4 v0, a[12:15], s[0:1] offset:48 -; CHECK-NEXT: global_store_dwordx4 v0, a[0:3], s[0:1] -; CHECK-NEXT: global_store_dwordx4 v0, a[4:7], s[0:1] offset:16 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96 +; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112 +; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64 +; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80 +; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32 +; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48 +; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1] +; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16 ; CHECK-NEXT: s_endpgm bb: %id = call i32 @llvm.amdgcn.workitem.id.x() @@ -187,9 +132,77 @@ bb: ret void } +define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm0_src2(ptr addrspace(1) %arg) #0 { +; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm0_src2: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: v_mov_b32_e32 v32, 1.0 +; CHECK-NEXT: v_mov_b32_e32 v33, 2.0 +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, 0 +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mov_b32_e32 v32, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112 +; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96 +; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80 +; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64 +; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48 +; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32 +; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16 +; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1] +; CHECK-NEXT: s_endpgm +bb: + %id = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id + %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128 + %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> zeroinitializer, i32 0, i32 0, i32 0) + %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0) + %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.2, i32 0, i32 0, i32 0) + store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128 + ret void +} + +define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm1_src2(ptr addrspace(1) %arg) #0 { +; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm1_src2: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: v_mov_b32_e32 v32, 1.0 +; CHECK-NEXT: v_mov_b32_e32 v33, 2.0 +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, 1.0 +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31] +; CHECK-NEXT: v_mov_b32_e32 v32, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: s_nop 7 +; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112 +; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96 +; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80 +; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64 +; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48 +; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32 +; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16 +; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1] +; CHECK-NEXT: s_endpgm +bb: + %id = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id + %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128 + %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> splat (float 1.0), i32 0, i32 0, i32 0) + %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0) + %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.2, i32 0, i32 0, i32 0) + store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128 + ret void +} + declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg) #1 declare noundef i32 @llvm.amdgcn.workitem.id.x() #2 -attributes #0 = { "amdgpu-flat-work-group-size"="1,256" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,4" } +attributes #0 = { nounwind "amdgpu-flat-work-group-size"="1,256" "amdgpu-waves-per-eu"="4,4" } attributes #1 = { convergent nocallback nofree nosync nounwind willreturn memory(none) } attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } diff --git a/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll index 735720a..725d57d 100644 --- a/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll +++ b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll @@ -285,7 +285,7 @@ define amdgpu_ps void @flat_store_b32_idxprom(ptr align 4 inreg %p, i32 %idx) { ; GCN-LABEL: flat_store_b32_idxprom: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: v_mov_b32_e32 v1, 1.0 -; GCN-NEXT: flat_store_b32 v0, v1, s[0:1] scale_offset +; GCN-NEXT: flat_store_b32 v0, v1, s[0:1] scale_offset scope:SCOPE_SE ; GCN-NEXT: s_endpgm entry: %idxprom = sext i32 %idx to i64 @@ -298,7 +298,7 @@ define amdgpu_ps void @flat_store_b16_idxprom(ptr align 2 inreg %p, i32 %idx) { ; GCN-LABEL: flat_store_b16_idxprom: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: v_mov_b32_e32 v1, 1 -; GCN-NEXT: flat_store_b16 v0, v1, s[0:1] scale_offset +; GCN-NEXT: flat_store_b16 v0, v1, s[0:1] scale_offset scope:SCOPE_SE ; GCN-NEXT: s_endpgm entry: %idxprom = sext i32 %idx to i64 @@ -311,7 +311,7 @@ define amdgpu_ps void @flat_store_b64_idxprom(ptr align 4 inreg %p, i32 %idx) { ; GCN-LABEL: flat_store_b64_idxprom: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: v_mov_b64_e32 v[2:3], 1.0 -; GCN-NEXT: flat_store_b64 v0, v[2:3], s[0:1] scale_offset +; GCN-NEXT: flat_store_b64 v0, v[2:3], s[0:1] scale_offset scope:SCOPE_SE ; GCN-NEXT: s_endpgm entry: %idxprom = sext i32 %idx to i64 @@ -337,12 +337,15 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; SDAG-LABEL: flat_atomicrmw_b64_rtn_idxprom: ; SDAG: ; %bb.0: ; %entry ; SDAG-NEXT: v_ashrrev_i32_e32 v1, 31, v0 -; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; SDAG-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 3, s[0:1] -; SDAG-NEXT: s_mov_b64 s[0:1], src_private_base -; SDAG-NEXT: s_mov_b32 s0, exec_lo +; SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; SDAG-NEXT: v_xor_b32_e32 v0, s0, v3 +; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 -; SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v3 +; SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo ; SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; SDAG-NEXT: s_cbranch_execnz .LBB21_3 ; SDAG-NEXT: ; %bb.1: ; %Flow @@ -360,13 +363,16 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; SDAG-NEXT: s_cbranch_execz .LBB21_2 ; SDAG-NEXT: .LBB21_4: ; %atomicrmw.private +; SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v2, vcc_lo ; SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 +; SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; SDAG-NEXT: s_wait_loadcnt 0x0 ; SDAG-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] -; SDAG-NEXT: scratch_store_b64 v4, v[2:3], off +; SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE ; SDAG-NEXT: s_wait_xcnt 0x0 ; SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; SDAG-NEXT: s_branch .LBB21_5 @@ -374,19 +380,21 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; ; GISEL-LABEL: flat_atomicrmw_b64_rtn_idxprom: ; GISEL: ; %bb.0: ; %entry +; GISEL-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi ; GISEL-NEXT: v_mov_b32_e32 v2, v0 ; GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1] -; GISEL-NEXT: s_mov_b64 s[2:3], src_private_base -; GISEL-NEXT: s_mov_b32 s2, exec_lo ; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GISEL-NEXT: v_ashrrev_i32_e32 v3, 31, v2 ; GISEL-NEXT: v_lshlrev_b64_e32 v[0:1], 3, v[2:3] ; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v4, v0 ; GISEL-NEXT: v_add_co_ci_u32_e64 v5, null, v5, v1, vcc_lo +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GISEL-NEXT: v_xor_b32_e32 v0, s2, v5 +; GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GISEL-NEXT: v_cmpx_ne_u32_e64 s3, v5 +; GISEL-NEXT: s_and_saveexec_b32 s2, vcc_lo +; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GISEL-NEXT: s_xor_b32 s2, exec_lo, s2 ; GISEL-NEXT: s_cbranch_execnz .LBB21_3 ; GISEL-NEXT: ; %bb.1: ; %Flow @@ -398,19 +406,22 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; GISEL-NEXT: s_branch .LBB21_5 ; GISEL-NEXT: .LBB21_3: ; %atomicrmw.global ; GISEL-NEXT: v_mov_b64_e32 v[0:1], 1 -; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GISEL-NEXT: ; implicit-def: $vgpr4 ; GISEL-NEXT: flat_atomic_add_u64 v[0:1], v2, v[0:1], s[0:1] scale_offset th:TH_ATOMIC_RETURN scope:SCOPE_SYS ; GISEL-NEXT: s_wait_xcnt 0x0 ; GISEL-NEXT: s_and_not1_saveexec_b32 s0, s2 ; GISEL-NEXT: s_cbranch_execz .LBB21_2 ; GISEL-NEXT: .LBB21_4: ; %atomicrmw.private +; GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GISEL-NEXT: scratch_load_b64 v[0:1], v4, off ; GISEL-NEXT: s_wait_loadcnt 0x0 ; GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] -; GISEL-NEXT: scratch_store_b64 v4, v[2:3], off +; GISEL-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE ; GISEL-NEXT: s_wait_xcnt 0x0 ; GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0 ; GISEL-NEXT: s_branch .LBB21_5 diff --git a/llvm/test/CodeGen/ARM/bad-constraint.ll b/llvm/test/CodeGen/ARM/bad-constraint.ll index 9b8fcd5..7d80f0c 100644 --- a/llvm/test/CodeGen/ARM/bad-constraint.ll +++ b/llvm/test/CodeGen/ARM/bad-constraint.ll @@ -1,6 +1,7 @@ ; RUN: not llc -filetype=obj %s -o /dev/null 2>&1 | FileCheck %s ; CHECK: error: couldn't allocate input reg for constraint '{d2}' ; CHECK-NEXT: error: couldn't allocate input reg for constraint '{s2}' +; CHECK-NEXT: error: couldn't allocate input reg for constraint '{d3}' target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" target triple = "armv8a-unknown-linux-gnueabihf" @@ -23,3 +24,8 @@ entry: ret void } +define void @_Z1dv() local_unnamed_addr { +entry: + tail call void asm sideeffect "", "{d3}"(<16 x i8> splat (i8 -1)) + ret void +} diff --git a/llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll b/llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll new file mode 100644 index 0000000..0c01bb9 --- /dev/null +++ b/llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll @@ -0,0 +1,14 @@ +; RUN: llc %s -filetype=asm -o - | FileCheck %s + +; CHECK: vmov.i8 d3, #0xff + +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" +target triple = "armv8a-unknown-linux-gnueabihf" + +; Function Attrs: mustprogress noimplicitfloat nounwind +define void @cvt_vec() local_unnamed_addr { +entry: + tail call void asm sideeffect "", "{d3}"(<8 x i8> splat (i8 -1)) + ret void +} + diff --git a/llvm/test/CodeGen/AVR/cmp.ll b/llvm/test/CodeGen/AVR/cmp.ll index efc9b8d..c932bda1 100644 --- a/llvm/test/CodeGen/AVR/cmp.ll +++ b/llvm/test/CodeGen/AVR/cmp.ll @@ -298,3 +298,18 @@ define i16 @cmp_i16_gt_1023(i16 %0) { %3 = zext i1 %2 to i16 ret i16 %3 } + +define void @cmp_issue152097(i16 %a) addrspace(1) { +; See: https://github.com/llvm/llvm-project/issues/152097 +; CHECK-LABEL: cmp_issue152097 +; CHECK: ldi r18, -1 +; CHECK-NEXT: cpi r24, -2 +; CHECK-NEXT: cpc r25, r18 +; CHECK-NEXT: ret + %cmp = icmp ugt i16 -2, %a + br i1 %cmp, label %if.then, label %if.else +if.then: + ret void +if.else: + ret void +} diff --git a/llvm/test/CodeGen/DirectX/issue-140819_allow_forward_handle_on_alloca.ll b/llvm/test/CodeGen/DirectX/issue-140819_allow_forward_handle_on_alloca.ll new file mode 100644 index 0000000..7c0813b --- /dev/null +++ b/llvm/test/CodeGen/DirectX/issue-140819_allow_forward_handle_on_alloca.ll @@ -0,0 +1,33 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S -dxil-forward-handle-accesses %s | FileCheck %s + +%"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", i32, 1, 0) } +@global = internal unnamed_addr global %"class.hlsl::RWStructuredBuffer" poison, align 4 +@name = private unnamed_addr constant [5 x i8] c"dest\00", align 1 + + +; NOTE: intent of this test is to confirm load target("dx.RawBuffer", i32, 1, 0) +; is replaced with call @llvm.dx.resource.getpointer +define void @CSMain() local_unnamed_addr { +; CHECK-LABEL: define void @CSMain() local_unnamed_addr { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[AGG_TMP_I1_SROA_0:%.*]] = alloca target("dx.RawBuffer", i32, 1, 0), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = tail call target("dx.RawBuffer", i32, 1, 0) @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i32_1_0t(i32 0, i32 3, i32 1, i32 0, i1 false, ptr nonnull @name) +; CHECK-NEXT: store target("dx.RawBuffer", i32, 1, 0) [[TMP0]], ptr @global, align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @global, align 4 +; CHECK-NEXT: store i32 [[TMP2]], ptr [[AGG_TMP_I1_SROA_0]], align 8 +; CHECK-NEXT: [[TMP3:%.*]] = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_i32_1_0t(target("dx.RawBuffer", i32, 1, 0) [[TMP0]], i32 0) +; CHECK-NEXT: store i32 0, ptr [[TMP3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %alloca = alloca target("dx.RawBuffer", i32, 1, 0), align 8 + %handle = tail call target("dx.RawBuffer", i32, 1, 0) @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i32_1_0t(i32 0, i32 3, i32 1, i32 0, i1 false, ptr nonnull @name) + store target("dx.RawBuffer", i32, 1, 0) %handle , ptr @global, align 4 + %val = load i32, ptr @global, align 4 + store i32 %val , ptr %alloca, align 8 + %indirect = load target("dx.RawBuffer", i32, 1, 0), ptr %alloca, align 8 + %buff = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_i32_1_0t(target("dx.RawBuffer", i32, 1, 0) %indirect, i32 0) + store i32 0, ptr %buff, align 4 + ret void +} diff --git a/llvm/test/CodeGen/Generic/allow-check.ll b/llvm/test/CodeGen/Generic/allow-check.ll index 148ee81..97719a7 100644 --- a/llvm/test/CodeGen/Generic/allow-check.ll +++ b/llvm/test/CodeGen/Generic/allow-check.ll @@ -6,6 +6,7 @@ ; XFAIL: target=nvptx{{.*}} ; XFAIL: target=sparc{{.*}} ; XFAIL: target=hexagon-{{.*}} +; XFAIL: target=arm64ec-{{.*}} ; RUN: llc < %s -O3 -global-isel=0 -fast-isel=0 ; RUN: llc < %s -O3 -global-isel=1 -fast-isel=0 diff --git a/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll b/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll index 3800712..f0277a7 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll @@ -11,16 +11,16 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill ; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill ; CHECK-NEXT: addi.w $fp, $a0, 0 -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 1 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 1 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 ; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 ; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 0 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 0 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -29,8 +29,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 1 ; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 2 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 2 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -39,8 +39,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 2 ; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 3 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 3 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -49,8 +49,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 3 ; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 4 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 4 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -59,8 +59,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 4 ; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 5 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 5 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -69,8 +69,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 5 ; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 6 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 6 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -79,8 +79,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 6 ; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 7 -; CHECK-NEXT: movgr2fr.w $fa0, $a0 +; CHECK-NEXT: xvpickve.w $xr0, $xr0, 7 +; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -107,16 +107,16 @@ define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind { ; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill ; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: addi.w $fp, $a0, 0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1 -; CHECK-NEXT: movgr2fr.d $fa0, $a0 +; CHECK-NEXT: xvpickve.d $xr0, $xr0, 1 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 ; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 ; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0 -; CHECK-NEXT: movgr2fr.d $fa0, $a0 +; CHECK-NEXT: xvpickve.d $xr0, $xr0, 0 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -125,8 +125,8 @@ define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1 ; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 2 -; CHECK-NEXT: movgr2fr.d $fa0, $a0 +; CHECK-NEXT: xvpickve.d $xr0, $xr0, 2 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 @@ -135,8 +135,8 @@ define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind { ; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 2 ; CHECK-NEXT: xvst $xr1, $sp, 16 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3 -; CHECK-NEXT: movgr2fr.d $fa0, $a0 +; CHECK-NEXT: xvpickve.d $xr0, $xr0, 3 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll index 221aba3..8ee567c 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll @@ -6,12 +6,12 @@ define <4 x double> @shufflevector_v4f64(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: shufflevector_v4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvpickve2gr.d $a0, $xr1, 2 -; CHECK-NEXT: xvpickve2gr.d $a1, $xr0, 3 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 1 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 2 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr1, 3 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 3 +; CHECK-NEXT: xvpickve.d $xr2, $xr1, 2 +; CHECK-NEXT: xvpickve.d $xr3, $xr0, 3 +; CHECK-NEXT: xvinsve0.d $xr0, $xr2, 1 +; CHECK-NEXT: xvinsve0.d $xr0, $xr3, 2 +; CHECK-NEXT: xvpickve.d $xr1, $xr1, 3 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 3 ; CHECK-NEXT: ret entry: %c = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 6, i32 3, i32 7> diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll index 271e3ec..ac5a214 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll @@ -42,8 +42,8 @@ entry: define <8 x float> @insert_extract_v8f32(<8 x float> %a) nounwind { ; CHECK-LABEL: insert_extract_v8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 7 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 1 +; CHECK-NEXT: xvpickve.w $xr1, $xr0, 7 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 1 ; CHECK-NEXT: ret entry: %b = extractelement <8 x float> %a, i32 7 @@ -66,8 +66,8 @@ entry: define <4 x double> @insert_extract_v4f64(<4 x double> %a) nounwind { ; CHECK-LABEL: insert_extract_v4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 1 +; CHECK-NEXT: xvpickve.d $xr1, $xr0, 3 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1 ; CHECK-NEXT: ret entry: %b = extractelement <4 x double> %a, i32 3 diff --git a/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll b/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll new file mode 100644 index 0000000..822a75e --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll @@ -0,0 +1,78 @@ +; RUN: opt < %s -S -passes=infer-address-spaces | FileCheck %s --check-prefix=INFER
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80 | FileCheck %s --check-prefix=PTX
+; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80 | %ptxas-verify %}
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
+target triple = "nvptx64-unknown-unknown"
+
+@constant_tensormap = addrspace(4) global [64 x i8] zeroinitializer, align 64
+
+; Inference from const address space
+define void @test_infer_const_from_cast() {
+; INFER-LABEL: @test_infer_const_from_cast
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap)
+; BOTH: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap)
+; PTX-LABEL: .visible .func test_infer_const_from_cast(
+; PTX: mov.b64 %rd{{[0-9]+}}, constant_tensormap;
+; PTX: cvta.const.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}};
+; PTX: prefetch.tensormap [%rd{{[0-9]+}}];
+entry:
+ %casted = addrspacecast ptr addrspace(4) @constant_tensormap to ptr
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %casted)
+ ret void
+}
+
+; Cast from Const space to Generic
+define void @test_const_to_generic_cast(ptr addrspace(4) %const_ptr) {
+; INFER-LABEL: @test_const_to_generic_cast
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) %const_ptr)
+; PTX-LABEL: .visible .func test_const_to_generic_cast(
+; PTX: prefetch.const.tensormap [%rd{{[0-9]+}}];
+entry:
+ %cast = addrspacecast ptr addrspace(4) %const_ptr to ptr
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %cast)
+ ret void
+}
+
+; No inference possible
+define void @test_no_inference_possible(ptr %generic_ptr) {
+; INFER-LABEL: @test_no_inference_possible
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p0(ptr %generic_ptr)
+; PTX-LABEL: .visible .func test_no_inference_possible(
+; PTX: prefetch.tensormap [%rd{{[0-9]+}}];
+entry:
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %generic_ptr)
+ ret void
+}
+
+; Cast from Parameter space to Generic
+define void @test_param_to_generic_cast(ptr addrspace(101) %param_ptr) {
+; INFER-LABEL: @test_param_to_generic_cast
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
+; PTX-LABEL: .visible .func test_param_to_generic_cast(
+; PTX: prefetch.param.tensormap [%rd{{[0-9]+}}];
+entry:
+ %cast = addrspacecast ptr addrspace(101) %param_ptr to ptr
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %cast)
+ ret void
+}
+
+; Multiple casts in sequence
+define void @test_infer_through_multiple_casts() {
+; INFER-LABEL: @test_infer_through_multiple_casts
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap)
+; PTX-LABEL: .visible .func test_infer_through_multiple_casts(
+; PTX: mov.b64 %rd{{[0-9]+}}, constant_tensormap;
+; PTX: cvta.const.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}};
+; PTX: prefetch.tensormap [%rd{{[0-9]+}}];
+entry:
+ %cast1 = addrspacecast ptr addrspace(4) @constant_tensormap to ptr
+ %cast2 = addrspacecast ptr %cast1 to ptr addrspace(4)
+ %cast3 = addrspacecast ptr addrspace(4) %cast2 to ptr
+ call void @llvm.nvvm.prefetch.tensormap(ptr %cast3)
+ ret void
+}
+
+declare void @llvm.nvvm.prefetch.tensormap.p0(ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4))
+declare void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101))
diff --git a/llvm/test/CodeGen/NVPTX/prefetch.ll b/llvm/test/CodeGen/NVPTX/prefetch.ll index a64e4fe..862e26d 100644 --- a/llvm/test/CodeGen/NVPTX/prefetch.ll +++ b/llvm/test/CodeGen/NVPTX/prefetch.ll @@ -12,6 +12,10 @@ declare void @llvm.nvvm.prefetch.local.L2(ptr addrspace(5) %local_ptr) declare void @llvm.nvvm.prefetch.L1(ptr %ptr)
declare void @llvm.nvvm.prefetch.L2(ptr %ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p0(ptr %ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) %const_ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
+
declare void @llvm.nvvm.prefetch.global.L2.evict.normal(ptr addrspace(1) %global_ptr)
declare void @llvm.nvvm.prefetch.global.L2.evict.last(ptr addrspace(1) %global_ptr)
@@ -78,4 +82,43 @@ define void @prefetchu_l1(ptr %ptr) { ; CHECK-PTX64-NEXT: ret;
tail call void @llvm.nvvm.prefetchu.L1(ptr %ptr)
ret void
+}
+
+define void @prefetch_tensormap(ptr %ptr) {
+; CHECK-PTX64-LABEL: prefetch_tensormap(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_tensormap_param_0];
+; CHECK-PTX64-NEXT: prefetch.tensormap [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.tensormap.p0(ptr %ptr)
+ ret void
+}
+
+define void @prefetch_const_tensormap(ptr addrspace(4) %const_ptr) {
+; CHECK-PTX64-LABEL: prefetch_const_tensormap(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_const_tensormap_param_0];
+; CHECK-PTX64-NEXT: prefetch.const.tensormap [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) %const_ptr)
+ ret void
+}
+
+define void @prefetch_param_tensormap(ptr addrspace(101) %param_ptr) {
+; CHECK-PTX64-LABEL: prefetch_param_tensormap(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_param_tensormap_param_0];
+; CHECK-PTX64-NEXT: prefetch.param.tensormap [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
+ ret void
}
\ No newline at end of file diff --git a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll index 92cb51b..94c2637 100644 --- a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll +++ b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll @@ -2,19 +2,18 @@ ; RUN: llc < %s -mcpu=sm_80 -mattr=+ptx70 -O0 \ ; RUN: -disable-post-ra -verify-machineinstrs \ ; RUN: | FileCheck -check-prefixes CHECK,CHECK-SM80 %s -; RUN: %if ptxas-12.8 %{ llc < %s -mcpu=sm_80 -mattr=+ptx70 -O0 \ +; RUN: %if ptxas-12.9 %{ llc < %s -mcpu=sm_80 -mattr=+ptx70 -O0 \ ; RUN: -disable-post-ra -verify-machineinstrs \ ; RUN: | %ptxas-verify -arch=sm_80 %} -; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx87 -O0 \ +; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx88 -O0 \ ; RUN: -disable-post-ra -verify-machineinstrs \ ; RUN: | FileCheck -check-prefixes CHECK,CHECK-SM100 %s -; RUN: %if ptxas-12.8 %{ llc < %s -mcpu=sm_100 -mattr=+ptx87 -O0 \ +; RUN: %if ptxas-12.9 %{ llc < %s -mcpu=sm_100 -mattr=+ptx88 -O0 \ ; RUN: -disable-post-ra -verify-machineinstrs \ ; RUN: | %ptxas-verify -arch=sm_100 %} target triple = "nvptx64-nvidia-cuda" target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" -; Check straight line reduction. define half @reduce_fadd_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fadd_half( ; CHECK: { @@ -43,45 +42,22 @@ define half @reduce_fadd_half(<8 x half> %in) { } define half @reduce_fadd_half_reassoc(<8 x half> %in) { -; CHECK-SM80-LABEL: reduce_fadd_half_reassoc( -; CHECK-SM80: { -; CHECK-SM80-NEXT: .reg .b16 %rs<6>; -; CHECK-SM80-NEXT: .reg .b32 %r<10>; -; CHECK-SM80-EMPTY: -; CHECK-SM80-NEXT: // %bb.0: -; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0]; -; CHECK-SM80-NEXT: add.rn.f16x2 %r5, %r2, %r4; -; CHECK-SM80-NEXT: add.rn.f16x2 %r6, %r1, %r3; -; CHECK-SM80-NEXT: add.rn.f16x2 %r7, %r6, %r5; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; CHECK-SM80-NEXT: // implicit-def: %rs2 -; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM80-NEXT: add.rn.f16x2 %r9, %r7, %r8; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; } -; CHECK-SM80-NEXT: mov.b16 %rs4, 0x0000; -; CHECK-SM80-NEXT: add.rn.f16 %rs5, %rs3, %rs4; -; CHECK-SM80-NEXT: st.param.b16 [func_retval0], %rs5; -; CHECK-SM80-NEXT: ret; -; -; CHECK-SM100-LABEL: reduce_fadd_half_reassoc( -; CHECK-SM100: { -; CHECK-SM100-NEXT: .reg .b16 %rs<6>; -; CHECK-SM100-NEXT: .reg .b32 %r<10>; -; CHECK-SM100-EMPTY: -; CHECK-SM100-NEXT: // %bb.0: -; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0]; -; CHECK-SM100-NEXT: add.rn.f16x2 %r5, %r2, %r4; -; CHECK-SM100-NEXT: add.rn.f16x2 %r6, %r1, %r3; -; CHECK-SM100-NEXT: add.rn.f16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: add.rn.f16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: mov.b16 %rs4, 0x0000; -; CHECK-SM100-NEXT: add.rn.f16 %rs5, %rs3, %rs4; -; CHECK-SM100-NEXT: st.param.b16 [func_retval0], %rs5; -; CHECK-SM100-NEXT: ret; +; CHECK-LABEL: reduce_fadd_half_reassoc( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<6>; +; CHECK-NEXT: .reg .b32 %r<8>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0]; +; CHECK-NEXT: add.rn.f16x2 %r5, %r2, %r4; +; CHECK-NEXT: add.rn.f16x2 %r6, %r1, %r3; +; CHECK-NEXT: add.rn.f16x2 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: add.rn.f16 %rs3, %rs1, %rs2; +; CHECK-NEXT: mov.b16 %rs4, 0x0000; +; CHECK-NEXT: add.rn.f16 %rs5, %rs3, %rs4; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs5; +; CHECK-NEXT: ret; %res = call reassoc half @llvm.vector.reduce.fadd(half 0.0, <8 x half> %in) ret half %res } @@ -109,7 +85,6 @@ define half @reduce_fadd_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. define float @reduce_fadd_float(<8 x float> %in) { ; CHECK-LABEL: reduce_fadd_float( ; CHECK: { @@ -148,15 +123,15 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) { ; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fadd_float_reassoc_param_0]; ; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd4; ; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-SM80-NEXT: add.rn.f32 %r5, %r3, %r1; +; CHECK-SM80-NEXT: add.rn.f32 %r5, %r4, %r2; ; CHECK-SM80-NEXT: mov.b64 {%r6, %r7}, %rd3; ; CHECK-SM80-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-SM80-NEXT: add.rn.f32 %r10, %r8, %r6; -; CHECK-SM80-NEXT: add.rn.f32 %r11, %r4, %r2; -; CHECK-SM80-NEXT: add.rn.f32 %r12, %r9, %r7; -; CHECK-SM80-NEXT: add.rn.f32 %r13, %r12, %r11; -; CHECK-SM80-NEXT: add.rn.f32 %r14, %r10, %r5; -; CHECK-SM80-NEXT: add.rn.f32 %r15, %r14, %r13; +; CHECK-SM80-NEXT: add.rn.f32 %r10, %r9, %r7; +; CHECK-SM80-NEXT: add.rn.f32 %r11, %r10, %r5; +; CHECK-SM80-NEXT: add.rn.f32 %r12, %r3, %r1; +; CHECK-SM80-NEXT: add.rn.f32 %r13, %r8, %r6; +; CHECK-SM80-NEXT: add.rn.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: add.rn.f32 %r15, %r14, %r11; ; CHECK-SM80-NEXT: add.rn.f32 %r16, %r15, 0f00000000; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r16; ; CHECK-SM80-NEXT: ret; @@ -164,7 +139,7 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) { ; CHECK-SM100-LABEL: reduce_fadd_float_reassoc( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b32 %r<5>; -; CHECK-SM100-NEXT: .reg .b64 %rd<10>; +; CHECK-SM100-NEXT: .reg .b64 %rd<8>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fadd_float_reassoc_param_0+16]; @@ -172,11 +147,8 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) { ; CHECK-SM100-NEXT: add.rn.f32x2 %rd5, %rd2, %rd4; ; CHECK-SM100-NEXT: add.rn.f32x2 %rd6, %rd1, %rd3; ; CHECK-SM100-NEXT: add.rn.f32x2 %rd7, %rd6, %rd5; -; CHECK-SM100-NEXT: mov.b64 {_, %r1}, %rd7; -; CHECK-SM100-NEXT: // implicit-def: %r2 -; CHECK-SM100-NEXT: mov.b64 %rd8, {%r1, %r2}; -; CHECK-SM100-NEXT: add.rn.f32x2 %rd9, %rd7, %rd8; -; CHECK-SM100-NEXT: mov.b64 {%r3, _}, %rd9; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd7; +; CHECK-SM100-NEXT: add.rn.f32 %r3, %r1, %r2; ; CHECK-SM100-NEXT: add.rn.f32 %r4, %r3, 0f00000000; ; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-SM100-NEXT: ret; @@ -229,7 +201,6 @@ define float @reduce_fadd_float_reassoc_nonpow2(<7 x float> %in) { ret float %res } -; Check straight line reduction. define half @reduce_fmul_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fmul_half( ; CHECK: { @@ -256,41 +227,20 @@ define half @reduce_fmul_half(<8 x half> %in) { } define half @reduce_fmul_half_reassoc(<8 x half> %in) { -; CHECK-SM80-LABEL: reduce_fmul_half_reassoc( -; CHECK-SM80: { -; CHECK-SM80-NEXT: .reg .b16 %rs<4>; -; CHECK-SM80-NEXT: .reg .b32 %r<10>; -; CHECK-SM80-EMPTY: -; CHECK-SM80-NEXT: // %bb.0: -; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0]; -; CHECK-SM80-NEXT: mul.rn.f16x2 %r5, %r2, %r4; -; CHECK-SM80-NEXT: mul.rn.f16x2 %r6, %r1, %r3; -; CHECK-SM80-NEXT: mul.rn.f16x2 %r7, %r6, %r5; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; CHECK-SM80-NEXT: // implicit-def: %rs2 -; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM80-NEXT: mul.rn.f16x2 %r9, %r7, %r8; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; } -; CHECK-SM80-NEXT: st.param.b16 [func_retval0], %rs3; -; CHECK-SM80-NEXT: ret; -; -; CHECK-SM100-LABEL: reduce_fmul_half_reassoc( -; CHECK-SM100: { -; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<10>; -; CHECK-SM100-EMPTY: -; CHECK-SM100-NEXT: // %bb.0: -; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0]; -; CHECK-SM100-NEXT: mul.rn.f16x2 %r5, %r2, %r4; -; CHECK-SM100-NEXT: mul.rn.f16x2 %r6, %r1, %r3; -; CHECK-SM100-NEXT: mul.rn.f16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: mul.rn.f16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: st.param.b16 [func_retval0], %rs3; -; CHECK-SM100-NEXT: ret; +; CHECK-LABEL: reduce_fmul_half_reassoc( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<8>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0]; +; CHECK-NEXT: mul.rn.f16x2 %r5, %r2, %r4; +; CHECK-NEXT: mul.rn.f16x2 %r6, %r1, %r3; +; CHECK-NEXT: mul.rn.f16x2 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: mul.rn.f16 %rs3, %rs1, %rs2; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs3; +; CHECK-NEXT: ret; %res = call reassoc half @llvm.vector.reduce.fmul(half 1.0, <8 x half> %in) ret half %res } @@ -321,7 +271,6 @@ define half @reduce_fmul_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. define float @reduce_fmul_float(<8 x float> %in) { ; CHECK-LABEL: reduce_fmul_float( ; CHECK: { @@ -359,22 +308,22 @@ define float @reduce_fmul_float_reassoc(<8 x float> %in) { ; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmul_float_reassoc_param_0]; ; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd4; ; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-SM80-NEXT: mul.rn.f32 %r5, %r3, %r1; +; CHECK-SM80-NEXT: mul.rn.f32 %r5, %r4, %r2; ; CHECK-SM80-NEXT: mov.b64 {%r6, %r7}, %rd3; ; CHECK-SM80-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-SM80-NEXT: mul.rn.f32 %r10, %r8, %r6; -; CHECK-SM80-NEXT: mul.rn.f32 %r11, %r4, %r2; -; CHECK-SM80-NEXT: mul.rn.f32 %r12, %r9, %r7; -; CHECK-SM80-NEXT: mul.rn.f32 %r13, %r12, %r11; -; CHECK-SM80-NEXT: mul.rn.f32 %r14, %r10, %r5; -; CHECK-SM80-NEXT: mul.rn.f32 %r15, %r14, %r13; +; CHECK-SM80-NEXT: mul.rn.f32 %r10, %r9, %r7; +; CHECK-SM80-NEXT: mul.rn.f32 %r11, %r10, %r5; +; CHECK-SM80-NEXT: mul.rn.f32 %r12, %r3, %r1; +; CHECK-SM80-NEXT: mul.rn.f32 %r13, %r8, %r6; +; CHECK-SM80-NEXT: mul.rn.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: mul.rn.f32 %r15, %r14, %r11; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-SM80-NEXT: ret; ; ; CHECK-SM100-LABEL: reduce_fmul_float_reassoc( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b32 %r<4>; -; CHECK-SM100-NEXT: .reg .b64 %rd<10>; +; CHECK-SM100-NEXT: .reg .b64 %rd<8>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmul_float_reassoc_param_0+16]; @@ -382,11 +331,8 @@ define float @reduce_fmul_float_reassoc(<8 x float> %in) { ; CHECK-SM100-NEXT: mul.rn.f32x2 %rd5, %rd2, %rd4; ; CHECK-SM100-NEXT: mul.rn.f32x2 %rd6, %rd1, %rd3; ; CHECK-SM100-NEXT: mul.rn.f32x2 %rd7, %rd6, %rd5; -; CHECK-SM100-NEXT: mov.b64 {_, %r1}, %rd7; -; CHECK-SM100-NEXT: // implicit-def: %r2 -; CHECK-SM100-NEXT: mov.b64 %rd8, {%r1, %r2}; -; CHECK-SM100-NEXT: mul.rn.f32x2 %rd9, %rd7, %rd8; -; CHECK-SM100-NEXT: mov.b64 {%r3, _}, %rd9; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd7; +; CHECK-SM100-NEXT: mul.rn.f32 %r3, %r1, %r2; ; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmul(float 1.0, <8 x float> %in) @@ -436,7 +382,6 @@ define float @reduce_fmul_float_reassoc_nonpow2(<7 x float> %in) { ret float %res } -; Check straight line reduction. define half @reduce_fmax_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fmax_half( ; CHECK: { @@ -501,84 +446,256 @@ define half @reduce_fmax_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. -define float @reduce_fmax_float(<8 x float> %in) { -; -; CHECK-LABEL: reduce_fmax_float( +define half @reduce_fmax_half_nnan(<8 x half> %in) { +; CHECK-LABEL: reduce_fmax_half_nnan( ; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<8>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: max.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: max.f32 %r10, %r9, %r7; -; CHECK-NEXT: max.f32 %r11, %r10, %r5; -; CHECK-NEXT: max.f32 %r12, %r3, %r1; -; CHECK-NEXT: max.f32 %r13, %r8, %r6; -; CHECK-NEXT: max.f32 %r14, %r13, %r12; -; CHECK-NEXT: max.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_half_nnan_param_0]; +; CHECK-NEXT: max.f16x2 %r5, %r2, %r4; +; CHECK-NEXT: max.f16x2 %r6, %r1, %r3; +; CHECK-NEXT: max.f16x2 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: max.f16 %rs3, %rs1, %rs2; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs3; ; CHECK-NEXT: ret; - %res = call float @llvm.vector.reduce.fmax(<8 x float> %in) - ret float %res + %res = call nnan half @llvm.vector.reduce.fmax(<8 x half> %in) + ret half %res } -define float @reduce_fmax_float_reassoc(<8 x float> %in) { -; -; CHECK-LABEL: reduce_fmax_float_reassoc( +define half @reduce_fmax_half_nnan_nonpow2(<7 x half> %in) { +; CHECK-LABEL: reduce_fmax_half_nnan_nonpow2( ; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-NEXT: .reg .b16 %rs<12>; +; CHECK-NEXT: .reg .b32 %r<8>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: max.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: max.f32 %r10, %r9, %r7; -; CHECK-NEXT: max.f32 %r11, %r10, %r5; -; CHECK-NEXT: max.f32 %r12, %r3, %r1; -; CHECK-NEXT: max.f32 %r13, %r8, %r6; -; CHECK-NEXT: max.f32 %r14, %r13, %r12; -; CHECK-NEXT: max.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-NEXT: ld.param.b32 %r1, [reduce_fmax_half_nnan_nonpow2_param_0+8]; +; CHECK-NEXT: mov.b32 {%rs5, %rs6}, %r1; +; CHECK-NEXT: ld.param.v2.b32 {%r2, %r3}, [reduce_fmax_half_nnan_nonpow2_param_0]; +; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r3; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r2; +; CHECK-NEXT: ld.param.b16 %rs7, [reduce_fmax_half_nnan_nonpow2_param_0+12]; +; CHECK-NEXT: max.f16x2 %r4, %r2, %r1; +; CHECK-NEXT: mov.b16 %rs8, 0xFC00; +; CHECK-NEXT: mov.b32 %r5, {%rs7, %rs8}; +; CHECK-NEXT: max.f16x2 %r6, %r3, %r5; +; CHECK-NEXT: max.f16x2 %r7, %r4, %r6; +; CHECK-NEXT: mov.b32 {%rs9, %rs10}, %r7; +; CHECK-NEXT: max.f16 %rs11, %rs9, %rs10; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs11; ; CHECK-NEXT: ret; + %res = call nnan half @llvm.vector.reduce.fmax(<7 x half> %in) + ret half %res +} + +define float @reduce_fmax_float(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmax_float( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: max.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: max.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: max.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: max.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: max.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmax_float( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: max.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: max.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: max.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: max.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; + %res = call float @llvm.vector.reduce.fmax(<8 x float> %in) + ret float %res +} + +define float @reduce_fmax_float_reassoc(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmax_float_reassoc( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: max.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: max.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: max.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: max.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: max.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmax_float_reassoc( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: max.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: max.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: max.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: max.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmax(<8 x float> %in) ret float %res } define float @reduce_fmax_float_reassoc_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmax_float_reassoc_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0]; +; CHECK-SM80-NEXT: max.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: max.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: max.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: max.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: max.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: max.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fmax_float_reassoc_nonpow2( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<14>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0]; -; CHECK-NEXT: max.f32 %r8, %r3, %r7; -; CHECK-NEXT: max.f32 %r9, %r1, %r5; -; CHECK-NEXT: max.f32 %r10, %r9, %r8; -; CHECK-NEXT: max.f32 %r11, %r2, %r6; -; CHECK-NEXT: max.f32 %r12, %r11, %r4; -; CHECK-NEXT: max.f32 %r13, %r10, %r12; -; CHECK-NEXT: st.param.b32 [func_retval0], %r13; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fmax_float_reassoc_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0]; +; CHECK-SM100-NEXT: max.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: max.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: max.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmax(<7 x float> %in) ret float %res } -; Check straight line reduction. +define float @reduce_fmax_float_nnan(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmax_float_nnan( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_nnan_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_nnan_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: max.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: max.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: max.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: max.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: max.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmax_float_nnan( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_nnan_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_nnan_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: max.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: max.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: max.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: max.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; + %res = call nnan float @llvm.vector.reduce.fmax(<8 x float> %in) + ret float %res +} + +define float @reduce_fmax_float_nnan_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmax_float_nnan_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmax_float_nnan_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_nnan_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_nnan_nonpow2_param_0]; +; CHECK-SM80-NEXT: max.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: max.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: max.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: max.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: max.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: max.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmax_float_nnan_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmax_float_nnan_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_nnan_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_nnan_nonpow2_param_0]; +; CHECK-SM100-NEXT: max.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: max.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: max.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; + %res = call nnan float @llvm.vector.reduce.fmax(<7 x float> %in) + ret float %res +} + define half @reduce_fmin_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fmin_half( ; CHECK: { @@ -643,84 +760,256 @@ define half @reduce_fmin_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. -define float @reduce_fmin_float(<8 x float> %in) { -; -; CHECK-LABEL: reduce_fmin_float( +define half @reduce_fmin_half_nnan(<8 x half> %in) { +; CHECK-LABEL: reduce_fmin_half_nnan( ; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<8>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: min.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: min.f32 %r10, %r9, %r7; -; CHECK-NEXT: min.f32 %r11, %r10, %r5; -; CHECK-NEXT: min.f32 %r12, %r3, %r1; -; CHECK-NEXT: min.f32 %r13, %r8, %r6; -; CHECK-NEXT: min.f32 %r14, %r13, %r12; -; CHECK-NEXT: min.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_half_nnan_param_0]; +; CHECK-NEXT: min.f16x2 %r5, %r2, %r4; +; CHECK-NEXT: min.f16x2 %r6, %r1, %r3; +; CHECK-NEXT: min.f16x2 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: min.f16 %rs3, %rs1, %rs2; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs3; ; CHECK-NEXT: ret; - %res = call float @llvm.vector.reduce.fmin(<8 x float> %in) - ret float %res + %res = call nnan half @llvm.vector.reduce.fmin(<8 x half> %in) + ret half %res } -define float @reduce_fmin_float_reassoc(<8 x float> %in) { -; -; CHECK-LABEL: reduce_fmin_float_reassoc( +define half @reduce_fmin_half_nnan_nonpow2(<7 x half> %in) { +; CHECK-LABEL: reduce_fmin_half_nnan_nonpow2( ; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; +; CHECK-NEXT: .reg .b16 %rs<12>; +; CHECK-NEXT: .reg .b32 %r<8>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: min.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: min.f32 %r10, %r9, %r7; -; CHECK-NEXT: min.f32 %r11, %r10, %r5; -; CHECK-NEXT: min.f32 %r12, %r3, %r1; -; CHECK-NEXT: min.f32 %r13, %r8, %r6; -; CHECK-NEXT: min.f32 %r14, %r13, %r12; -; CHECK-NEXT: min.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-NEXT: ld.param.b32 %r1, [reduce_fmin_half_nnan_nonpow2_param_0+8]; +; CHECK-NEXT: mov.b32 {%rs5, %rs6}, %r1; +; CHECK-NEXT: ld.param.v2.b32 {%r2, %r3}, [reduce_fmin_half_nnan_nonpow2_param_0]; +; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r3; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r2; +; CHECK-NEXT: ld.param.b16 %rs7, [reduce_fmin_half_nnan_nonpow2_param_0+12]; +; CHECK-NEXT: min.f16x2 %r4, %r2, %r1; +; CHECK-NEXT: mov.b16 %rs8, 0x7C00; +; CHECK-NEXT: mov.b32 %r5, {%rs7, %rs8}; +; CHECK-NEXT: min.f16x2 %r6, %r3, %r5; +; CHECK-NEXT: min.f16x2 %r7, %r4, %r6; +; CHECK-NEXT: mov.b32 {%rs9, %rs10}, %r7; +; CHECK-NEXT: min.f16 %rs11, %rs9, %rs10; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs11; ; CHECK-NEXT: ret; + %res = call nnan half @llvm.vector.reduce.fmin(<7 x half> %in) + ret half %res +} + +define float @reduce_fmin_float(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmin_float( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: min.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: min.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: min.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: min.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: min.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmin_float( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: min.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: min.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: min.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: min.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; + %res = call float @llvm.vector.reduce.fmin(<8 x float> %in) + ret float %res +} + +define float @reduce_fmin_float_reassoc(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmin_float_reassoc( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: min.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: min.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: min.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: min.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: min.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmin_float_reassoc( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: min.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: min.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: min.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: min.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmin(<8 x float> %in) ret float %res } define float @reduce_fmin_float_reassoc_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmin_float_reassoc_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0]; +; CHECK-SM80-NEXT: min.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: min.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: min.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: min.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: min.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: min.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fmin_float_reassoc_nonpow2( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<14>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0]; -; CHECK-NEXT: min.f32 %r8, %r3, %r7; -; CHECK-NEXT: min.f32 %r9, %r1, %r5; -; CHECK-NEXT: min.f32 %r10, %r9, %r8; -; CHECK-NEXT: min.f32 %r11, %r2, %r6; -; CHECK-NEXT: min.f32 %r12, %r11, %r4; -; CHECK-NEXT: min.f32 %r13, %r10, %r12; -; CHECK-NEXT: st.param.b32 [func_retval0], %r13; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fmin_float_reassoc_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0]; +; CHECK-SM100-NEXT: min.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: min.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: min.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmin(<7 x float> %in) ret float %res } -; Check straight-line reduction. +define float @reduce_fmin_float_nnan(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmin_float_nnan( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_nnan_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_nnan_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: min.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: min.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: min.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: min.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: min.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmin_float_nnan( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_nnan_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_nnan_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: min.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: min.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: min.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: min.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; + %res = call nnan float @llvm.vector.reduce.fmin(<8 x float> %in) + ret float %res +} + +define float @reduce_fmin_float_nnan_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmin_float_nnan_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmin_float_nnan_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_nnan_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_nnan_nonpow2_param_0]; +; CHECK-SM80-NEXT: min.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: min.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: min.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: min.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: min.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: min.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_fmin_float_nnan_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmin_float_nnan_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_nnan_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_nnan_nonpow2_param_0]; +; CHECK-SM100-NEXT: min.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: min.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: min.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; + %res = call nnan float @llvm.vector.reduce.fmin(<7 x float> %in) + ret float %res +} + define half @reduce_fmaximum_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fmaximum_half( ; CHECK: { @@ -785,84 +1074,131 @@ define half @reduce_fmaximum_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. define float @reduce_fmaximum_float(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmaximum_float( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: max.NaN.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: max.NaN.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: max.NaN.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.NaN.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: max.NaN.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: max.NaN.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.NaN.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fmaximum_float( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: max.NaN.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r7; -; CHECK-NEXT: max.NaN.f32 %r11, %r10, %r5; -; CHECK-NEXT: max.NaN.f32 %r12, %r3, %r1; -; CHECK-NEXT: max.NaN.f32 %r13, %r8, %r6; -; CHECK-NEXT: max.NaN.f32 %r14, %r13, %r12; -; CHECK-NEXT: max.NaN.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fmaximum_float( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: max.NaN.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: max.NaN.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: max.NaN.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: max.NaN.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call float @llvm.vector.reduce.fmaximum(<8 x float> %in) ret float %res } define float @reduce_fmaximum_float_reassoc(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmaximum_float_reassoc( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: max.NaN.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: max.NaN.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: max.NaN.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.NaN.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: max.NaN.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: max.NaN.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.NaN.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fmaximum_float_reassoc( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: max.NaN.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r7; -; CHECK-NEXT: max.NaN.f32 %r11, %r10, %r5; -; CHECK-NEXT: max.NaN.f32 %r12, %r3, %r1; -; CHECK-NEXT: max.NaN.f32 %r13, %r8, %r6; -; CHECK-NEXT: max.NaN.f32 %r14, %r13, %r12; -; CHECK-NEXT: max.NaN.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fmaximum_float_reassoc( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: max.NaN.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: max.NaN.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: max.NaN.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: max.NaN.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmaximum(<8 x float> %in) ret float %res } define float @reduce_fmaximum_float_reassoc_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fmaximum_float_reassoc_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0]; +; CHECK-SM80-NEXT: max.NaN.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: max.NaN.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: max.NaN.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: max.NaN.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: max.NaN.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: max.NaN.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fmaximum_float_reassoc_nonpow2( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<14>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0]; -; CHECK-NEXT: max.NaN.f32 %r8, %r3, %r7; -; CHECK-NEXT: max.NaN.f32 %r9, %r1, %r5; -; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r8; -; CHECK-NEXT: max.NaN.f32 %r11, %r2, %r6; -; CHECK-NEXT: max.NaN.f32 %r12, %r11, %r4; -; CHECK-NEXT: max.NaN.f32 %r13, %r10, %r12; -; CHECK-NEXT: st.param.b32 [func_retval0], %r13; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fmaximum_float_reassoc_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0]; +; CHECK-SM100-NEXT: max.NaN.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: max.NaN.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: max.NaN.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fmaximum(<7 x float> %in) ret float %res } -; Check straight-line reduction. define half @reduce_fminimum_half(<8 x half> %in) { ; CHECK-LABEL: reduce_fminimum_half( ; CHECK: { @@ -927,79 +1263,127 @@ define half @reduce_fminimum_half_reassoc_nonpow2(<7 x half> %in) { ret half %res } -; Check straight-line reduction. define float @reduce_fminimum_float(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fminimum_float( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: min.NaN.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: min.NaN.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: min.NaN.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.NaN.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: min.NaN.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: min.NaN.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.NaN.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fminimum_float( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: min.NaN.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r7; -; CHECK-NEXT: min.NaN.f32 %r11, %r10, %r5; -; CHECK-NEXT: min.NaN.f32 %r12, %r3, %r1; -; CHECK-NEXT: min.NaN.f32 %r13, %r8, %r6; -; CHECK-NEXT: min.NaN.f32 %r14, %r13, %r12; -; CHECK-NEXT: min.NaN.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fminimum_float( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: min.NaN.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: min.NaN.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: min.NaN.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: min.NaN.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call float @llvm.vector.reduce.fminimum(<8 x float> %in) ret float %res } define float @reduce_fminimum_float_reassoc(<8 x float> %in) { +; CHECK-SM80-LABEL: reduce_fminimum_float_reassoc( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-NEXT: .reg .b64 %rd<5>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0]; +; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16]; +; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4; +; CHECK-SM80-NEXT: min.NaN.f32 %r9, %r7, %r8; +; CHECK-SM80-NEXT: min.NaN.f32 %r10, %r5, %r6; +; CHECK-SM80-NEXT: min.NaN.f32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.NaN.f32 %r12, %r3, %r4; +; CHECK-SM80-NEXT: min.NaN.f32 %r13, %r1, %r2; +; CHECK-SM80-NEXT: min.NaN.f32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.NaN.f32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fminimum_float_reassoc( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-NEXT: .reg .b64 %rd<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16]; -; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0]; -; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4; -; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2; -; CHECK-NEXT: min.NaN.f32 %r5, %r4, %r2; -; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3; -; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1; -; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r7; -; CHECK-NEXT: min.NaN.f32 %r11, %r10, %r5; -; CHECK-NEXT: min.NaN.f32 %r12, %r3, %r1; -; CHECK-NEXT: min.NaN.f32 %r13, %r8, %r6; -; CHECK-NEXT: min.NaN.f32 %r14, %r13, %r12; -; CHECK-NEXT: min.NaN.f32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fminimum_float_reassoc( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<13>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; CHECK-SM100-NEXT: min.NaN.f32 %r9, %r8, %r5, %r6; +; CHECK-SM100-NEXT: min.NaN.f32 %r10, %r3, %r4, %r7; +; CHECK-SM100-NEXT: min.NaN.f32 %r11, %r10, %r9, %r1; +; CHECK-SM100-NEXT: min.NaN.f32 %r12, %r11, %r2; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fminimum(<8 x float> %in) ret float %res } define float @reduce_fminimum_float_reassoc_nonpow2(<7 x float> %in) { +; CHECK-SM80-LABEL: reduce_fminimum_float_reassoc_nonpow2( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<14>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0]; +; CHECK-SM80-NEXT: min.NaN.f32 %r8, %r5, %r6; +; CHECK-SM80-NEXT: min.NaN.f32 %r9, %r8, %r7; +; CHECK-SM80-NEXT: min.NaN.f32 %r10, %r3, %r4; +; CHECK-SM80-NEXT: min.NaN.f32 %r11, %r1, %r2; +; CHECK-SM80-NEXT: min.NaN.f32 %r12, %r11, %r10; +; CHECK-SM80-NEXT: min.NaN.f32 %r13, %r12, %r9; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13; +; CHECK-SM80-NEXT: ret; ; -; CHECK-LABEL: reduce_fminimum_float_reassoc_nonpow2( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<14>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24]; -; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0]; -; CHECK-NEXT: min.NaN.f32 %r8, %r3, %r7; -; CHECK-NEXT: min.NaN.f32 %r9, %r1, %r5; -; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r8; -; CHECK-NEXT: min.NaN.f32 %r11, %r2, %r6; -; CHECK-NEXT: min.NaN.f32 %r12, %r11, %r4; -; CHECK-NEXT: min.NaN.f32 %r13, %r10, %r12; -; CHECK-NEXT: st.param.b32 [func_retval0], %r13; -; CHECK-NEXT: ret; +; CHECK-SM100-LABEL: reduce_fminimum_float_reassoc_nonpow2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24]; +; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0]; +; CHECK-SM100-NEXT: min.NaN.f32 %r8, %r4, %r5, %r6; +; CHECK-SM100-NEXT: min.NaN.f32 %r9, %r1, %r2, %r3; +; CHECK-SM100-NEXT: min.NaN.f32 %r10, %r9, %r8, %r7; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: ret; %res = call reassoc float @llvm.vector.reduce.fminimum(<7 x float> %in) ret float %res } @@ -1014,15 +1398,15 @@ define i16 @reduce_add_i16(<8 x i16> %in) { ; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i16_param_0]; ; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-SM80-NEXT: add.s16 %rs5, %rs3, %rs1; +; CHECK-SM80-NEXT: add.s16 %rs5, %rs4, %rs2; ; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-SM80-NEXT: add.s16 %rs10, %rs8, %rs6; -; CHECK-SM80-NEXT: add.s16 %rs11, %rs4, %rs2; -; CHECK-SM80-NEXT: add.s16 %rs12, %rs9, %rs7; -; CHECK-SM80-NEXT: add.s16 %rs13, %rs12, %rs11; -; CHECK-SM80-NEXT: add.s16 %rs14, %rs10, %rs5; -; CHECK-SM80-NEXT: add.s16 %rs15, %rs14, %rs13; +; CHECK-SM80-NEXT: add.s16 %rs10, %rs9, %rs7; +; CHECK-SM80-NEXT: add.s16 %rs11, %rs10, %rs5; +; CHECK-SM80-NEXT: add.s16 %rs12, %rs3, %rs1; +; CHECK-SM80-NEXT: add.s16 %rs13, %rs8, %rs6; +; CHECK-SM80-NEXT: add.s16 %rs14, %rs13, %rs12; +; CHECK-SM80-NEXT: add.s16 %rs15, %rs14, %rs11; ; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-SM80-NEXT: ret; @@ -1030,20 +1414,17 @@ define i16 @reduce_add_i16(<8 x i16> %in) { ; CHECK-SM100-LABEL: reduce_add_i16( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-NEXT: .reg .b32 %r<9>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i16_param_0]; ; CHECK-SM100-NEXT: add.s16x2 %r5, %r2, %r4; ; CHECK-SM100-NEXT: add.s16x2 %r6, %r1, %r3; ; CHECK-SM100-NEXT: add.s16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: add.s16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-SM100-NEXT: add.s16 %rs3, %rs1, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-SM100-NEXT: ret; %res = call i16 @llvm.vector.reduce.add(<8 x i16> %in) ret i16 %res @@ -1103,13 +1484,13 @@ define i32 @reduce_add_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_add_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i32_param_0]; -; CHECK-NEXT: add.s32 %r9, %r3, %r7; -; CHECK-NEXT: add.s32 %r10, %r1, %r5; -; CHECK-NEXT: add.s32 %r11, %r4, %r8; -; CHECK-NEXT: add.s32 %r12, %r2, %r6; -; CHECK-NEXT: add.s32 %r13, %r12, %r11; -; CHECK-NEXT: add.s32 %r14, %r10, %r9; -; CHECK-NEXT: add.s32 %r15, %r14, %r13; +; CHECK-NEXT: add.s32 %r9, %r4, %r8; +; CHECK-NEXT: add.s32 %r10, %r2, %r6; +; CHECK-NEXT: add.s32 %r11, %r10, %r9; +; CHECK-NEXT: add.s32 %r12, %r3, %r7; +; CHECK-NEXT: add.s32 %r13, %r1, %r5; +; CHECK-NEXT: add.s32 %r14, %r13, %r12; +; CHECK-NEXT: add.s32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.add(<8 x i32> %in) @@ -1147,15 +1528,15 @@ define i16 @reduce_mul_i16(<8 x i16> %in) { ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i16_param_0]; ; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-NEXT: mul.lo.s16 %rs5, %rs3, %rs1; +; CHECK-NEXT: mul.lo.s16 %rs5, %rs4, %rs2; ; CHECK-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-NEXT: mul.lo.s16 %rs10, %rs8, %rs6; -; CHECK-NEXT: mul.lo.s16 %rs11, %rs4, %rs2; -; CHECK-NEXT: mul.lo.s16 %rs12, %rs9, %rs7; -; CHECK-NEXT: mul.lo.s16 %rs13, %rs12, %rs11; -; CHECK-NEXT: mul.lo.s16 %rs14, %rs10, %rs5; -; CHECK-NEXT: mul.lo.s16 %rs15, %rs14, %rs13; +; CHECK-NEXT: mul.lo.s16 %rs10, %rs9, %rs7; +; CHECK-NEXT: mul.lo.s16 %rs11, %rs10, %rs5; +; CHECK-NEXT: mul.lo.s16 %rs12, %rs3, %rs1; +; CHECK-NEXT: mul.lo.s16 %rs13, %rs8, %rs6; +; CHECK-NEXT: mul.lo.s16 %rs14, %rs13, %rs12; +; CHECK-NEXT: mul.lo.s16 %rs15, %rs14, %rs11; ; CHECK-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-NEXT: ret; @@ -1194,13 +1575,13 @@ define i32 @reduce_mul_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_mul_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i32_param_0]; -; CHECK-NEXT: mul.lo.s32 %r9, %r3, %r7; -; CHECK-NEXT: mul.lo.s32 %r10, %r1, %r5; -; CHECK-NEXT: mul.lo.s32 %r11, %r4, %r8; -; CHECK-NEXT: mul.lo.s32 %r12, %r2, %r6; -; CHECK-NEXT: mul.lo.s32 %r13, %r12, %r11; -; CHECK-NEXT: mul.lo.s32 %r14, %r10, %r9; -; CHECK-NEXT: mul.lo.s32 %r15, %r14, %r13; +; CHECK-NEXT: mul.lo.s32 %r9, %r4, %r8; +; CHECK-NEXT: mul.lo.s32 %r10, %r2, %r6; +; CHECK-NEXT: mul.lo.s32 %r11, %r10, %r9; +; CHECK-NEXT: mul.lo.s32 %r12, %r3, %r7; +; CHECK-NEXT: mul.lo.s32 %r13, %r1, %r5; +; CHECK-NEXT: mul.lo.s32 %r14, %r13, %r12; +; CHECK-NEXT: mul.lo.s32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.mul(<8 x i32> %in) @@ -1238,15 +1619,15 @@ define i16 @reduce_umax_i16(<8 x i16> %in) { ; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i16_param_0]; ; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-SM80-NEXT: max.u16 %rs5, %rs3, %rs1; +; CHECK-SM80-NEXT: max.u16 %rs5, %rs4, %rs2; ; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-SM80-NEXT: max.u16 %rs10, %rs8, %rs6; -; CHECK-SM80-NEXT: max.u16 %rs11, %rs4, %rs2; -; CHECK-SM80-NEXT: max.u16 %rs12, %rs9, %rs7; -; CHECK-SM80-NEXT: max.u16 %rs13, %rs12, %rs11; -; CHECK-SM80-NEXT: max.u16 %rs14, %rs10, %rs5; -; CHECK-SM80-NEXT: max.u16 %rs15, %rs14, %rs13; +; CHECK-SM80-NEXT: max.u16 %rs10, %rs9, %rs7; +; CHECK-SM80-NEXT: max.u16 %rs11, %rs10, %rs5; +; CHECK-SM80-NEXT: max.u16 %rs12, %rs3, %rs1; +; CHECK-SM80-NEXT: max.u16 %rs13, %rs8, %rs6; +; CHECK-SM80-NEXT: max.u16 %rs14, %rs13, %rs12; +; CHECK-SM80-NEXT: max.u16 %rs15, %rs14, %rs11; ; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-SM80-NEXT: ret; @@ -1254,20 +1635,17 @@ define i16 @reduce_umax_i16(<8 x i16> %in) { ; CHECK-SM100-LABEL: reduce_umax_i16( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-NEXT: .reg .b32 %r<9>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i16_param_0]; ; CHECK-SM100-NEXT: max.u16x2 %r5, %r2, %r4; ; CHECK-SM100-NEXT: max.u16x2 %r6, %r1, %r3; ; CHECK-SM100-NEXT: max.u16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: max.u16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-SM100-NEXT: max.u16 %rs3, %rs1, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-SM100-NEXT: ret; %res = call i16 @llvm.vector.reduce.umax(<8 x i16> %in) ret i16 %res @@ -1327,13 +1705,13 @@ define i32 @reduce_umax_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umax_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i32_param_0]; -; CHECK-NEXT: max.u32 %r9, %r3, %r7; -; CHECK-NEXT: max.u32 %r10, %r1, %r5; -; CHECK-NEXT: max.u32 %r11, %r4, %r8; -; CHECK-NEXT: max.u32 %r12, %r2, %r6; -; CHECK-NEXT: max.u32 %r13, %r12, %r11; -; CHECK-NEXT: max.u32 %r14, %r10, %r9; -; CHECK-NEXT: max.u32 %r15, %r14, %r13; +; CHECK-NEXT: max.u32 %r9, %r4, %r8; +; CHECK-NEXT: max.u32 %r10, %r2, %r6; +; CHECK-NEXT: max.u32 %r11, %r10, %r9; +; CHECK-NEXT: max.u32 %r12, %r3, %r7; +; CHECK-NEXT: max.u32 %r13, %r1, %r5; +; CHECK-NEXT: max.u32 %r14, %r13, %r12; +; CHECK-NEXT: max.u32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.umax(<8 x i32> %in) @@ -1371,15 +1749,15 @@ define i16 @reduce_umin_i16(<8 x i16> %in) { ; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i16_param_0]; ; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-SM80-NEXT: min.u16 %rs5, %rs3, %rs1; +; CHECK-SM80-NEXT: min.u16 %rs5, %rs4, %rs2; ; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-SM80-NEXT: min.u16 %rs10, %rs8, %rs6; -; CHECK-SM80-NEXT: min.u16 %rs11, %rs4, %rs2; -; CHECK-SM80-NEXT: min.u16 %rs12, %rs9, %rs7; -; CHECK-SM80-NEXT: min.u16 %rs13, %rs12, %rs11; -; CHECK-SM80-NEXT: min.u16 %rs14, %rs10, %rs5; -; CHECK-SM80-NEXT: min.u16 %rs15, %rs14, %rs13; +; CHECK-SM80-NEXT: min.u16 %rs10, %rs9, %rs7; +; CHECK-SM80-NEXT: min.u16 %rs11, %rs10, %rs5; +; CHECK-SM80-NEXT: min.u16 %rs12, %rs3, %rs1; +; CHECK-SM80-NEXT: min.u16 %rs13, %rs8, %rs6; +; CHECK-SM80-NEXT: min.u16 %rs14, %rs13, %rs12; +; CHECK-SM80-NEXT: min.u16 %rs15, %rs14, %rs11; ; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-SM80-NEXT: ret; @@ -1387,20 +1765,17 @@ define i16 @reduce_umin_i16(<8 x i16> %in) { ; CHECK-SM100-LABEL: reduce_umin_i16( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-NEXT: .reg .b32 %r<9>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i16_param_0]; ; CHECK-SM100-NEXT: min.u16x2 %r5, %r2, %r4; ; CHECK-SM100-NEXT: min.u16x2 %r6, %r1, %r3; ; CHECK-SM100-NEXT: min.u16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: min.u16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-SM100-NEXT: min.u16 %rs3, %rs1, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-SM100-NEXT: ret; %res = call i16 @llvm.vector.reduce.umin(<8 x i16> %in) ret i16 %res @@ -1460,13 +1835,13 @@ define i32 @reduce_umin_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umin_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i32_param_0]; -; CHECK-NEXT: min.u32 %r9, %r3, %r7; -; CHECK-NEXT: min.u32 %r10, %r1, %r5; -; CHECK-NEXT: min.u32 %r11, %r4, %r8; -; CHECK-NEXT: min.u32 %r12, %r2, %r6; -; CHECK-NEXT: min.u32 %r13, %r12, %r11; -; CHECK-NEXT: min.u32 %r14, %r10, %r9; -; CHECK-NEXT: min.u32 %r15, %r14, %r13; +; CHECK-NEXT: min.u32 %r9, %r4, %r8; +; CHECK-NEXT: min.u32 %r10, %r2, %r6; +; CHECK-NEXT: min.u32 %r11, %r10, %r9; +; CHECK-NEXT: min.u32 %r12, %r3, %r7; +; CHECK-NEXT: min.u32 %r13, %r1, %r5; +; CHECK-NEXT: min.u32 %r14, %r13, %r12; +; CHECK-NEXT: min.u32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.umin(<8 x i32> %in) @@ -1504,15 +1879,15 @@ define i16 @reduce_smax_i16(<8 x i16> %in) { ; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i16_param_0]; ; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-SM80-NEXT: max.s16 %rs5, %rs3, %rs1; +; CHECK-SM80-NEXT: max.s16 %rs5, %rs4, %rs2; ; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-SM80-NEXT: max.s16 %rs10, %rs8, %rs6; -; CHECK-SM80-NEXT: max.s16 %rs11, %rs4, %rs2; -; CHECK-SM80-NEXT: max.s16 %rs12, %rs9, %rs7; -; CHECK-SM80-NEXT: max.s16 %rs13, %rs12, %rs11; -; CHECK-SM80-NEXT: max.s16 %rs14, %rs10, %rs5; -; CHECK-SM80-NEXT: max.s16 %rs15, %rs14, %rs13; +; CHECK-SM80-NEXT: max.s16 %rs10, %rs9, %rs7; +; CHECK-SM80-NEXT: max.s16 %rs11, %rs10, %rs5; +; CHECK-SM80-NEXT: max.s16 %rs12, %rs3, %rs1; +; CHECK-SM80-NEXT: max.s16 %rs13, %rs8, %rs6; +; CHECK-SM80-NEXT: max.s16 %rs14, %rs13, %rs12; +; CHECK-SM80-NEXT: max.s16 %rs15, %rs14, %rs11; ; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-SM80-NEXT: ret; @@ -1520,20 +1895,17 @@ define i16 @reduce_smax_i16(<8 x i16> %in) { ; CHECK-SM100-LABEL: reduce_smax_i16( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-NEXT: .reg .b32 %r<9>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i16_param_0]; ; CHECK-SM100-NEXT: max.s16x2 %r5, %r2, %r4; ; CHECK-SM100-NEXT: max.s16x2 %r6, %r1, %r3; ; CHECK-SM100-NEXT: max.s16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: max.s16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-SM100-NEXT: max.s16 %rs3, %rs1, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-SM100-NEXT: ret; %res = call i16 @llvm.vector.reduce.smax(<8 x i16> %in) ret i16 %res @@ -1593,13 +1965,13 @@ define i32 @reduce_smax_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smax_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i32_param_0]; -; CHECK-NEXT: max.s32 %r9, %r3, %r7; -; CHECK-NEXT: max.s32 %r10, %r1, %r5; -; CHECK-NEXT: max.s32 %r11, %r4, %r8; -; CHECK-NEXT: max.s32 %r12, %r2, %r6; -; CHECK-NEXT: max.s32 %r13, %r12, %r11; -; CHECK-NEXT: max.s32 %r14, %r10, %r9; -; CHECK-NEXT: max.s32 %r15, %r14, %r13; +; CHECK-NEXT: max.s32 %r9, %r4, %r8; +; CHECK-NEXT: max.s32 %r10, %r2, %r6; +; CHECK-NEXT: max.s32 %r11, %r10, %r9; +; CHECK-NEXT: max.s32 %r12, %r3, %r7; +; CHECK-NEXT: max.s32 %r13, %r1, %r5; +; CHECK-NEXT: max.s32 %r14, %r13, %r12; +; CHECK-NEXT: max.s32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.smax(<8 x i32> %in) @@ -1637,15 +2009,15 @@ define i16 @reduce_smin_i16(<8 x i16> %in) { ; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i16_param_0]; ; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4; ; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2; -; CHECK-SM80-NEXT: min.s16 %rs5, %rs3, %rs1; +; CHECK-SM80-NEXT: min.s16 %rs5, %rs4, %rs2; ; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3; ; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1; -; CHECK-SM80-NEXT: min.s16 %rs10, %rs8, %rs6; -; CHECK-SM80-NEXT: min.s16 %rs11, %rs4, %rs2; -; CHECK-SM80-NEXT: min.s16 %rs12, %rs9, %rs7; -; CHECK-SM80-NEXT: min.s16 %rs13, %rs12, %rs11; -; CHECK-SM80-NEXT: min.s16 %rs14, %rs10, %rs5; -; CHECK-SM80-NEXT: min.s16 %rs15, %rs14, %rs13; +; CHECK-SM80-NEXT: min.s16 %rs10, %rs9, %rs7; +; CHECK-SM80-NEXT: min.s16 %rs11, %rs10, %rs5; +; CHECK-SM80-NEXT: min.s16 %rs12, %rs3, %rs1; +; CHECK-SM80-NEXT: min.s16 %rs13, %rs8, %rs6; +; CHECK-SM80-NEXT: min.s16 %rs14, %rs13, %rs12; +; CHECK-SM80-NEXT: min.s16 %rs15, %rs14, %rs11; ; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15; ; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-SM80-NEXT: ret; @@ -1653,20 +2025,17 @@ define i16 @reduce_smin_i16(<8 x i16> %in) { ; CHECK-SM100-LABEL: reduce_smin_i16( ; CHECK-SM100: { ; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; +; CHECK-SM100-NEXT: .reg .b32 %r<9>; ; CHECK-SM100-EMPTY: ; CHECK-SM100-NEXT: // %bb.0: ; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i16_param_0]; ; CHECK-SM100-NEXT: min.s16x2 %r5, %r2, %r4; ; CHECK-SM100-NEXT: min.s16x2 %r6, %r1, %r3; ; CHECK-SM100-NEXT: min.s16x2 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: min.s16x2 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; +; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-SM100-NEXT: min.s16 %rs3, %rs1, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-SM100-NEXT: ret; %res = call i16 @llvm.vector.reduce.smin(<8 x i16> %in) ret i16 %res @@ -1726,13 +2095,13 @@ define i32 @reduce_smin_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smin_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i32_param_0]; -; CHECK-NEXT: min.s32 %r9, %r3, %r7; -; CHECK-NEXT: min.s32 %r10, %r1, %r5; -; CHECK-NEXT: min.s32 %r11, %r4, %r8; -; CHECK-NEXT: min.s32 %r12, %r2, %r6; -; CHECK-NEXT: min.s32 %r13, %r12, %r11; -; CHECK-NEXT: min.s32 %r14, %r10, %r9; -; CHECK-NEXT: min.s32 %r15, %r14, %r13; +; CHECK-NEXT: min.s32 %r9, %r4, %r8; +; CHECK-NEXT: min.s32 %r10, %r2, %r6; +; CHECK-NEXT: min.s32 %r11, %r10, %r9; +; CHECK-NEXT: min.s32 %r12, %r3, %r7; +; CHECK-NEXT: min.s32 %r13, %r1, %r5; +; CHECK-NEXT: min.s32 %r14, %r13, %r12; +; CHECK-NEXT: min.s32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.smin(<8 x i32> %in) @@ -1761,43 +2130,21 @@ define i32 @reduce_smin_i32_nonpow2(<7 x i32> %in) { } define i16 @reduce_and_i16(<8 x i16> %in) { -; CHECK-SM80-LABEL: reduce_and_i16( -; CHECK-SM80: { -; CHECK-SM80-NEXT: .reg .b16 %rs<4>; -; CHECK-SM80-NEXT: .reg .b32 %r<11>; -; CHECK-SM80-EMPTY: -; CHECK-SM80-NEXT: // %bb.0: -; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i16_param_0]; -; CHECK-SM80-NEXT: and.b32 %r5, %r2, %r4; -; CHECK-SM80-NEXT: and.b32 %r6, %r1, %r3; -; CHECK-SM80-NEXT: and.b32 %r7, %r6, %r5; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; CHECK-SM80-NEXT: // implicit-def: %rs2 -; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM80-NEXT: and.b32 %r9, %r7, %r8; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; } -; CHECK-SM80-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM80-NEXT: ret; -; -; CHECK-SM100-LABEL: reduce_and_i16( -; CHECK-SM100: { -; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; -; CHECK-SM100-EMPTY: -; CHECK-SM100-NEXT: // %bb.0: -; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i16_param_0]; -; CHECK-SM100-NEXT: and.b32 %r5, %r2, %r4; -; CHECK-SM100-NEXT: and.b32 %r6, %r1, %r3; -; CHECK-SM100-NEXT: and.b32 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: and.b32 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM100-NEXT: ret; +; CHECK-LABEL: reduce_and_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i16_param_0]; +; CHECK-NEXT: and.b32 %r5, %r2, %r4; +; CHECK-NEXT: and.b32 %r6, %r1, %r3; +; CHECK-NEXT: and.b32 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: and.b16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; +; CHECK-NEXT: ret; %res = call i16 @llvm.vector.reduce.and(<8 x i16> %in) ret i16 %res } @@ -1837,13 +2184,13 @@ define i32 @reduce_and_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_and_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i32_param_0]; -; CHECK-NEXT: and.b32 %r9, %r3, %r7; -; CHECK-NEXT: and.b32 %r10, %r1, %r5; -; CHECK-NEXT: and.b32 %r11, %r4, %r8; -; CHECK-NEXT: and.b32 %r12, %r2, %r6; -; CHECK-NEXT: and.b32 %r13, %r12, %r11; -; CHECK-NEXT: and.b32 %r14, %r10, %r9; -; CHECK-NEXT: and.b32 %r15, %r14, %r13; +; CHECK-NEXT: and.b32 %r9, %r4, %r8; +; CHECK-NEXT: and.b32 %r10, %r2, %r6; +; CHECK-NEXT: and.b32 %r11, %r10, %r9; +; CHECK-NEXT: and.b32 %r12, %r3, %r7; +; CHECK-NEXT: and.b32 %r13, %r1, %r5; +; CHECK-NEXT: and.b32 %r14, %r13, %r12; +; CHECK-NEXT: and.b32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.and(<8 x i32> %in) @@ -1872,43 +2219,21 @@ define i32 @reduce_and_i32_nonpow2(<7 x i32> %in) { } define i16 @reduce_or_i16(<8 x i16> %in) { -; CHECK-SM80-LABEL: reduce_or_i16( -; CHECK-SM80: { -; CHECK-SM80-NEXT: .reg .b16 %rs<4>; -; CHECK-SM80-NEXT: .reg .b32 %r<11>; -; CHECK-SM80-EMPTY: -; CHECK-SM80-NEXT: // %bb.0: -; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i16_param_0]; -; CHECK-SM80-NEXT: or.b32 %r5, %r2, %r4; -; CHECK-SM80-NEXT: or.b32 %r6, %r1, %r3; -; CHECK-SM80-NEXT: or.b32 %r7, %r6, %r5; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; CHECK-SM80-NEXT: // implicit-def: %rs2 -; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM80-NEXT: or.b32 %r9, %r7, %r8; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; } -; CHECK-SM80-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM80-NEXT: ret; -; -; CHECK-SM100-LABEL: reduce_or_i16( -; CHECK-SM100: { -; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; -; CHECK-SM100-EMPTY: -; CHECK-SM100-NEXT: // %bb.0: -; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i16_param_0]; -; CHECK-SM100-NEXT: or.b32 %r5, %r2, %r4; -; CHECK-SM100-NEXT: or.b32 %r6, %r1, %r3; -; CHECK-SM100-NEXT: or.b32 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: or.b32 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM100-NEXT: ret; +; CHECK-LABEL: reduce_or_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i16_param_0]; +; CHECK-NEXT: or.b32 %r5, %r2, %r4; +; CHECK-NEXT: or.b32 %r6, %r1, %r3; +; CHECK-NEXT: or.b32 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: or.b16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; +; CHECK-NEXT: ret; %res = call i16 @llvm.vector.reduce.or(<8 x i16> %in) ret i16 %res } @@ -1948,13 +2273,13 @@ define i32 @reduce_or_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_or_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i32_param_0]; -; CHECK-NEXT: or.b32 %r9, %r3, %r7; -; CHECK-NEXT: or.b32 %r10, %r1, %r5; -; CHECK-NEXT: or.b32 %r11, %r4, %r8; -; CHECK-NEXT: or.b32 %r12, %r2, %r6; -; CHECK-NEXT: or.b32 %r13, %r12, %r11; -; CHECK-NEXT: or.b32 %r14, %r10, %r9; -; CHECK-NEXT: or.b32 %r15, %r14, %r13; +; CHECK-NEXT: or.b32 %r9, %r4, %r8; +; CHECK-NEXT: or.b32 %r10, %r2, %r6; +; CHECK-NEXT: or.b32 %r11, %r10, %r9; +; CHECK-NEXT: or.b32 %r12, %r3, %r7; +; CHECK-NEXT: or.b32 %r13, %r1, %r5; +; CHECK-NEXT: or.b32 %r14, %r13, %r12; +; CHECK-NEXT: or.b32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.or(<8 x i32> %in) @@ -1983,43 +2308,21 @@ define i32 @reduce_or_i32_nonpow2(<7 x i32> %in) { } define i16 @reduce_xor_i16(<8 x i16> %in) { -; CHECK-SM80-LABEL: reduce_xor_i16( -; CHECK-SM80: { -; CHECK-SM80-NEXT: .reg .b16 %rs<4>; -; CHECK-SM80-NEXT: .reg .b32 %r<11>; -; CHECK-SM80-EMPTY: -; CHECK-SM80-NEXT: // %bb.0: -; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i16_param_0]; -; CHECK-SM80-NEXT: xor.b32 %r5, %r2, %r4; -; CHECK-SM80-NEXT: xor.b32 %r6, %r1, %r3; -; CHECK-SM80-NEXT: xor.b32 %r7, %r6, %r5; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; CHECK-SM80-NEXT: // implicit-def: %rs2 -; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM80-NEXT: xor.b32 %r9, %r7, %r8; -; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; } -; CHECK-SM80-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM80-NEXT: ret; -; -; CHECK-SM100-LABEL: reduce_xor_i16( -; CHECK-SM100: { -; CHECK-SM100-NEXT: .reg .b16 %rs<4>; -; CHECK-SM100-NEXT: .reg .b32 %r<11>; -; CHECK-SM100-EMPTY: -; CHECK-SM100-NEXT: // %bb.0: -; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i16_param_0]; -; CHECK-SM100-NEXT: xor.b32 %r5, %r2, %r4; -; CHECK-SM100-NEXT: xor.b32 %r6, %r1, %r3; -; CHECK-SM100-NEXT: xor.b32 %r7, %r6, %r5; -; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7; -; CHECK-SM100-NEXT: // implicit-def: %rs2 -; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2}; -; CHECK-SM100-NEXT: xor.b32 %r9, %r7, %r8; -; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9; -; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3; -; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10; -; CHECK-SM100-NEXT: ret; +; CHECK-LABEL: reduce_xor_i16( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i16_param_0]; +; CHECK-NEXT: xor.b32 %r5, %r2, %r4; +; CHECK-NEXT: xor.b32 %r6, %r1, %r3; +; CHECK-NEXT: xor.b32 %r7, %r6, %r5; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7; +; CHECK-NEXT: xor.b16 %rs3, %rs1, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r8, %rs3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; +; CHECK-NEXT: ret; %res = call i16 @llvm.vector.reduce.xor(<8 x i16> %in) ret i16 %res } @@ -2059,13 +2362,13 @@ define i32 @reduce_xor_i32(<8 x i32> %in) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_xor_i32_param_0+16]; ; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i32_param_0]; -; CHECK-NEXT: xor.b32 %r9, %r3, %r7; -; CHECK-NEXT: xor.b32 %r10, %r1, %r5; -; CHECK-NEXT: xor.b32 %r11, %r4, %r8; -; CHECK-NEXT: xor.b32 %r12, %r2, %r6; -; CHECK-NEXT: xor.b32 %r13, %r12, %r11; -; CHECK-NEXT: xor.b32 %r14, %r10, %r9; -; CHECK-NEXT: xor.b32 %r15, %r14, %r13; +; CHECK-NEXT: xor.b32 %r9, %r4, %r8; +; CHECK-NEXT: xor.b32 %r10, %r2, %r6; +; CHECK-NEXT: xor.b32 %r11, %r10, %r9; +; CHECK-NEXT: xor.b32 %r12, %r3, %r7; +; CHECK-NEXT: xor.b32 %r13, %r1, %r5; +; CHECK-NEXT: xor.b32 %r14, %r13, %r12; +; CHECK-NEXT: xor.b32 %r15, %r14, %r11; ; CHECK-NEXT: st.param.b32 [func_retval0], %r15; ; CHECK-NEXT: ret; %res = call i32 @llvm.vector.reduce.xor(<8 x i32> %in) diff --git a/llvm/test/CodeGen/NVPTX/sext-setcc.ll b/llvm/test/CodeGen/NVPTX/sext-setcc.ll index 9a67bdf..97918a6 100644 --- a/llvm/test/CodeGen/NVPTX/sext-setcc.ll +++ b/llvm/test/CodeGen/NVPTX/sext-setcc.ll @@ -29,7 +29,6 @@ define <4 x i8> @sext_setcc_v4i1_to_v4i8(ptr %p) { ; CHECK-LABEL: sext_setcc_v4i1_to_v4i8( ; CHECK: { ; CHECK-NEXT: .reg .pred %p<5>; -; CHECK-NEXT: .reg .b16 %rs<5>; ; CHECK-NEXT: .reg .b32 %r<13>; ; CHECK-NEXT: .reg .b64 %rd<2>; ; CHECK-EMPTY: @@ -37,17 +36,13 @@ define <4 x i8> @sext_setcc_v4i1_to_v4i8(ptr %p) { ; CHECK-NEXT: ld.param.b64 %rd1, [sext_setcc_v4i1_to_v4i8_param_0]; ; CHECK-NEXT: ld.b32 %r1, [%rd1]; ; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 0x7770U; -; CHECK-NEXT: cvt.u16.u32 %rs1, %r2; -; CHECK-NEXT: setp.eq.b16 %p1, %rs1, 0; +; CHECK-NEXT: setp.eq.b32 %p1, %r2, 0; ; CHECK-NEXT: prmt.b32 %r3, %r1, 0, 0x7771U; -; CHECK-NEXT: cvt.u16.u32 %rs2, %r3; -; CHECK-NEXT: setp.eq.b16 %p2, %rs2, 0; +; CHECK-NEXT: setp.eq.b32 %p2, %r3, 0; ; CHECK-NEXT: prmt.b32 %r4, %r1, 0, 0x7772U; -; CHECK-NEXT: cvt.u16.u32 %rs3, %r4; -; CHECK-NEXT: setp.eq.b16 %p3, %rs3, 0; +; CHECK-NEXT: setp.eq.b32 %p3, %r4, 0; ; CHECK-NEXT: prmt.b32 %r5, %r1, 0, 0x7773U; -; CHECK-NEXT: cvt.u16.u32 %rs4, %r5; -; CHECK-NEXT: setp.eq.b16 %p4, %rs4, 0; +; CHECK-NEXT: setp.eq.b32 %p4, %r5, 0; ; CHECK-NEXT: selp.b32 %r6, -1, 0, %p4; ; CHECK-NEXT: selp.b32 %r7, -1, 0, %p3; ; CHECK-NEXT: prmt.b32 %r8, %r7, %r6, 0x3340U; diff --git a/llvm/test/CodeGen/NVPTX/trunc-setcc.ll b/llvm/test/CodeGen/NVPTX/trunc-setcc.ll new file mode 100644 index 0000000..f22e37e --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/trunc-setcc.ll @@ -0,0 +1,269 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mcpu=sm_50 | FileCheck %s +; RUN: %if ptxas %{ llc < %s -mcpu=sm_50 | %ptxas-verify -arch=sm_50 %} + +target triple = "nvptx64-nvidia-cuda" + +define i1 @trunc_nsw_singed_const(i32 %a) { +; CHECK-LABEL: trunc_nsw_singed_const( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nsw_singed_const_param_0]; +; CHECK-NEXT: add.s32 %r2, %r1, 1; +; CHECK-NEXT: setp.gt.s32 %p1, %r2, -1; +; CHECK-NEXT: selp.b32 %r3, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; + %a2 = add i32 %a, 1 + %b = trunc nsw i32 %a2 to i8 + %c = icmp sgt i8 %b, -1 + ret i1 %c +} + +define i1 @trunc_nuw_singed_const(i32 %a) { +; CHECK-LABEL: trunc_nuw_singed_const( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [trunc_nuw_singed_const_param_0]; +; CHECK-NEXT: add.s16 %rs2, %rs1, 1; +; CHECK-NEXT: cvt.s16.s8 %rs3, %rs2; +; CHECK-NEXT: setp.lt.s16 %p1, %rs3, 100; +; CHECK-NEXT: selp.b32 %r1, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; + %a2 = add i32 %a, 1 + %b = trunc nuw i32 %a2 to i8 + %c = icmp slt i8 %b, 100 + ret i1 %c +} + +define i1 @trunc_nsw_unsinged_const(i32 %a) { +; CHECK-LABEL: trunc_nsw_unsinged_const( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [trunc_nsw_unsinged_const_param_0]; +; CHECK-NEXT: add.s16 %rs2, %rs1, 1; +; CHECK-NEXT: and.b16 %rs3, %rs2, 255; +; CHECK-NEXT: setp.lt.u16 %p1, %rs3, 236; +; CHECK-NEXT: selp.b32 %r1, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; + %a2 = add i32 %a, 1 + %b = trunc nsw i32 %a2 to i8 + %c = icmp ult i8 %b, -20 + ret i1 %c +} + +define i1 @trunc_nuw_unsinged_const(i32 %a) { +; CHECK-LABEL: trunc_nuw_unsinged_const( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nuw_unsinged_const_param_0]; +; CHECK-NEXT: add.s32 %r2, %r1, 1; +; CHECK-NEXT: setp.gt.u32 %p1, %r2, 100; +; CHECK-NEXT: selp.b32 %r3, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NEXT: ret; + %a2 = add i32 %a, 1 + %b = trunc nuw i32 %a2 to i8 + %c = icmp ugt i8 %b, 100 + ret i1 %c +} + + +define i1 @trunc_nsw_eq_const(i32 %a) { +; CHECK-LABEL: trunc_nsw_eq_const( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nsw_eq_const_param_0]; +; CHECK-NEXT: setp.eq.b32 %p1, %r1, 99; +; CHECK-NEXT: selp.b32 %r2, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; +; CHECK-NEXT: ret; + %a2 = add i32 %a, 1 + %b = trunc nsw i32 %a2 to i8 + %c = icmp eq i8 %b, 100 + ret i1 %c +} + +define i1 @trunc_nuw_eq_const(i32 %a) { +; CHECK-LABEL: trunc_nuw_eq_const( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nuw_eq_const_param_0]; +; CHECK-NEXT: setp.eq.b32 %p1, %r1, 99; +; CHECK-NEXT: selp.b32 %r2, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; +; CHECK-NEXT: ret; + %a2 = add i32 %a, 1 + %b = trunc nuw i32 %a2 to i8 + %c = icmp eq i8 %b, 100 + ret i1 %c +} + +;;; + +define i1 @trunc_nsw_singed(i32 %a1, i32 %a2) { +; CHECK-LABEL: trunc_nsw_singed( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nsw_singed_param_0]; +; CHECK-NEXT: add.s32 %r2, %r1, 1; +; CHECK-NEXT: ld.param.b32 %r3, [trunc_nsw_singed_param_1]; +; CHECK-NEXT: add.s32 %r4, %r3, 7; +; CHECK-NEXT: setp.gt.s32 %p1, %r2, %r4; +; CHECK-NEXT: selp.b32 %r5, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; +; CHECK-NEXT: ret; + %b1 = add i32 %a1, 1 + %b2 = add i32 %a2, 7 + %c1 = trunc nsw i32 %b1 to i8 + %c2 = trunc nsw i32 %b2 to i8 + %c = icmp sgt i8 %c1, %c2 + ret i1 %c +} + +define i1 @trunc_nuw_singed(i32 %a1, i32 %a2) { +; CHECK-LABEL: trunc_nuw_singed( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<7>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [trunc_nuw_singed_param_0]; +; CHECK-NEXT: ld.param.b8 %rs2, [trunc_nuw_singed_param_1]; +; CHECK-NEXT: add.s16 %rs3, %rs1, 1; +; CHECK-NEXT: cvt.s16.s8 %rs4, %rs3; +; CHECK-NEXT: add.s16 %rs5, %rs2, 6; +; CHECK-NEXT: cvt.s16.s8 %rs6, %rs5; +; CHECK-NEXT: setp.lt.s16 %p1, %rs4, %rs6; +; CHECK-NEXT: selp.b32 %r1, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; + %b1 = add i32 %a1, 1 + %b2 = add i32 %a2, 6 + %c1 = trunc nuw i32 %b1 to i8 + %c2 = trunc nuw i32 %b2 to i8 + %c = icmp slt i8 %c1, %c2 + ret i1 %c +} + +define i1 @trunc_nsw_unsinged(i32 %a1, i32 %a2) { +; CHECK-LABEL: trunc_nsw_unsinged( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<7>; +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [trunc_nsw_unsinged_param_0]; +; CHECK-NEXT: ld.param.b8 %rs2, [trunc_nsw_unsinged_param_1]; +; CHECK-NEXT: add.s16 %rs3, %rs1, 1; +; CHECK-NEXT: and.b16 %rs4, %rs3, 255; +; CHECK-NEXT: add.s16 %rs5, %rs2, 4; +; CHECK-NEXT: and.b16 %rs6, %rs5, 255; +; CHECK-NEXT: setp.lt.u16 %p1, %rs4, %rs6; +; CHECK-NEXT: selp.b32 %r1, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NEXT: ret; + %b1 = add i32 %a1, 1 + %b2 = add i32 %a2, 4 + %c1 = trunc nsw i32 %b1 to i8 + %c2 = trunc nsw i32 %b2 to i8 + %c = icmp ult i8 %c1, %c2 + ret i1 %c +} + +define i1 @trunc_nuw_unsinged(i32 %a1, i32 %a2) { +; CHECK-LABEL: trunc_nuw_unsinged( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nuw_unsinged_param_0]; +; CHECK-NEXT: add.s32 %r2, %r1, 1; +; CHECK-NEXT: ld.param.b32 %r3, [trunc_nuw_unsinged_param_1]; +; CHECK-NEXT: add.s32 %r4, %r3, 5; +; CHECK-NEXT: setp.gt.u32 %p1, %r2, %r4; +; CHECK-NEXT: selp.b32 %r5, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; +; CHECK-NEXT: ret; + %b1 = add i32 %a1, 1 + %b2 = add i32 %a2, 5 + %c1 = trunc nuw i32 %b1 to i8 + %c2 = trunc nuw i32 %b2 to i8 + %c = icmp ugt i8 %c1, %c2 + ret i1 %c +} + + +define i1 @trunc_nsw_eq(i32 %a1, i32 %a2) { +; CHECK-LABEL: trunc_nsw_eq( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nsw_eq_param_0]; +; CHECK-NEXT: add.s32 %r2, %r1, 1; +; CHECK-NEXT: ld.param.b32 %r3, [trunc_nsw_eq_param_1]; +; CHECK-NEXT: add.s32 %r4, %r3, 3; +; CHECK-NEXT: setp.eq.b32 %p1, %r2, %r4; +; CHECK-NEXT: selp.b32 %r5, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; +; CHECK-NEXT: ret; + %b1 = add i32 %a1, 1 + %b2 = add i32 %a2, 3 + %c1 = trunc nsw i32 %b1 to i8 + %c2 = trunc nsw i32 %b2 to i8 + %c = icmp eq i8 %c1, %c2 + ret i1 %c +} + +define i1 @trunc_nuw_eq(i32 %a1, i32 %a2) { +; CHECK-LABEL: trunc_nuw_eq( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [trunc_nuw_eq_param_0]; +; CHECK-NEXT: add.s32 %r2, %r1, 2; +; CHECK-NEXT: ld.param.b32 %r3, [trunc_nuw_eq_param_1]; +; CHECK-NEXT: add.s32 %r4, %r3, 1; +; CHECK-NEXT: setp.eq.b32 %p1, %r2, %r4; +; CHECK-NEXT: selp.b32 %r5, -1, 0, %p1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; +; CHECK-NEXT: ret; + %b1 = add i32 %a1, 2 + %b2 = add i32 %a2, 1 + %c1 = trunc nuw i32 %b1 to i8 + %c2 = trunc nuw i32 %b2 to i8 + %c = icmp eq i8 %c1, %c2 + ret i1 %c +} diff --git a/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll b/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll index 9ffb4fd..258ddf6 100644 --- a/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll +++ b/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll @@ -37,9 +37,9 @@ define signext i8 @test_chars(i8 signext %c1, i8 signext %c2, i8 signext %c3, i8 ; 32BIT: bb.0.entry: ; 32BIT-NEXT: liveins: $r3, $r4, $r5, $r6 ; 32BIT-NEXT: {{ $}} - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r4 - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r5 - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r6 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r4 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r5 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r6 ; 32BIT-NEXT: renamable $r3 = EXTSB killed renamable $r3 ; 32BIT-NEXT: BLR implicit $lr, implicit $rm, implicit $r3 ; @@ -47,9 +47,9 @@ define signext i8 @test_chars(i8 signext %c1, i8 signext %c2, i8 signext %c3, i8 ; 64BIT: bb.0.entry: ; 64BIT-NEXT: liveins: $x3, $x4, $x5, $x6 ; 64BIT-NEXT: {{ $}} - ; 64BIT-NEXT: renamable $r3 = ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3 - ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r5, implicit killed $x5 - ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r5, implicit killed $x5 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3 ; 64BIT-NEXT: renamable $x3 = EXTSB8 killed renamable $x3 ; 64BIT-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3 entry: @@ -96,9 +96,9 @@ define signext i8 @test_chars_mix(i8 signext %c1, i8 zeroext %c2, i8 zeroext %c3 ; 32BIT: bb.0.entry: ; 32BIT-NEXT: liveins: $r3, $r4, $r5, $r6 ; 32BIT-NEXT: {{ $}} - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r4 - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r5 - ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r6 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r4 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r5 + ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r6 ; 32BIT-NEXT: renamable $r3 = EXTSB killed renamable $r3 ; 32BIT-NEXT: BLR implicit $lr, implicit $rm, implicit $r3 ; @@ -106,9 +106,9 @@ define signext i8 @test_chars_mix(i8 signext %c1, i8 zeroext %c2, i8 zeroext %c3 ; 64BIT: bb.0.entry: ; 64BIT-NEXT: liveins: $x3, $x4, $x5, $x6 ; 64BIT-NEXT: {{ $}} - ; 64BIT-NEXT: renamable $r3 = ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3 - ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r5, implicit killed $x5 - ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r5, implicit killed $x5 + ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3 ; 64BIT-NEXT: renamable $x3 = EXTSB8 killed renamable $x3 ; 64BIT-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3 entry: diff --git a/llvm/test/CodeGen/PowerPC/aix-nest-param.ll b/llvm/test/CodeGen/PowerPC/aix-nest-param.ll index 1863eaf..bfc7fbb 100644 --- a/llvm/test/CodeGen/PowerPC/aix-nest-param.ll +++ b/llvm/test/CodeGen/PowerPC/aix-nest-param.ll @@ -1,5 +1,5 @@ -; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s -; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s +; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s +; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s define ptr @nest_receiver(ptr nest %arg) nounwind { ret ptr %arg @@ -9,5 +9,10 @@ define ptr @nest_caller(ptr %arg) nounwind { %result = call ptr @nest_receiver(ptr nest %arg) ret ptr %result } +; CHECK-LABEL: .nest_receiver: +; CHECK: mr 3, 11 +; CHECK: blr -; CHECK: LLVM ERROR: Nest arguments are unimplemented. +; CHECK-LABEL: .nest_caller: +; CHECK: mr 11, 3 +; CHECK: bl .nest_receiver diff --git a/llvm/test/CodeGen/PowerPC/aix-trampoline.ll b/llvm/test/CodeGen/PowerPC/aix-trampoline.ll index b71f6b5..19df220 100644 --- a/llvm/test/CodeGen/PowerPC/aix-trampoline.ll +++ b/llvm/test/CodeGen/PowerPC/aix-trampoline.ll @@ -1,7 +1,7 @@ -; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s -; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s - -; CHECK: LLVM ERROR: INIT_TRAMPOLINE operation is not supported on AIX. +; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | \ +; RUN: FileCheck %s --check-prefix=32BIT +; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 -mattr=-altivec | \ +; RUN: FileCheck %s --check-prefix=64BIT define void @create_trampoline(ptr %buffer, ptr %nval) nounwind { entry: @@ -12,3 +12,17 @@ entry: declare i32 @nested(i32); declare void @llvm.init.trampoline(ptr, ptr, ptr) nounwind + +; 32BIT: stw 4, 8(3) +; 32BIT: lwz [[FuncDesc:[0-9]+]], L..C0(2) +; 32BIT-DAG: lwz [[SCRATCH1:[0-9]+]], 0([[FuncDesc]]) +; 32BIT-DAG: lwz [[SCRATCH2:[0-9]+]], 4([[FuncDesc]]) +; 32BIT-DAG: stw [[SCRATCH1]], 0(3) +; 32BIT-DAG: stw [[SCRATCH2]], 4(3) + +; 64BIT: std 4, 16(3) +; 64BIT-DAG: ld [[FuncDesc:[0-9]+]], L..C0(2) +; 64BIT-DAG: ld [[SCRATCH1:[0-9]+]], 0([[FuncDesc]]) +; 64BIT-DAG: ld [[SCRATCH2:[0-9]+]], 8([[FuncDesc]]) +; 64BIT-DAG: std [[SCRATCH1]], 0(3) +; 64BIT-DAG: std [[SCRATCH2]], 8(3) diff --git a/llvm/test/CodeGen/PowerPC/check-zero-vector.ll b/llvm/test/CodeGen/PowerPC/check-zero-vector.ll index 59173e2..d8e66d6 100644 --- a/llvm/test/CodeGen/PowerPC/check-zero-vector.ll +++ b/llvm/test/CodeGen/PowerPC/check-zero-vector.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu \ ; RUN: < %s | FileCheck %s --check-prefix=POWERPC_64LE @@ -7,240 +8,90 @@ ; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc-ibm-aix \ ; RUN: < %s | FileCheck %s --check-prefix=POWERPC_32 -define i32 @test_Greater_than(ptr %colauths, i32 signext %ncols) { -; This testcase is manually reduced to isolate the critical code blocks. -; It is designed to check for vector comparison specifically for zero vectors. -; In the vector.body section, we are expecting a comparison instruction (vcmpequh), -; merge instructions (vmrghh and vmrglh) which use exactly 2 vectors. -; The output of the merge instruction is being used by xxland and finally -; accumulated by vadduwm instruction. - +define i32 @test_Greater_than(ptr %colauths) { +; This testcase is for the special case of zero-vector comparisons. +; Currently the generated code does a comparison (vcmpequh) and then a negation (xxlnor). +; This pattern is expected to be optimized in a future patch. ; POWERPC_64LE-LABEL: test_Greater_than: -; POWERPC_64LE: .LBB0_6: # %vector.body -; POWERPC_64LE-NEXT: # -; POWERPC_64LE-NEXT: lxv [[R1:[0-9]+]], -64(4) -; POWERPC_64LE-NEXT: vcmpequh [[R2:[0-9]+]], [[R2]], [[R3:[0-9]+]] -; POWERPC_64LE-NEXT: xxlnor [[R1]], [[R1]], [[R1]] -; POWERPC_64LE-NEXT: vmrghh [[R4:[0-9]+]], [[R2]], [[R2]] -; POWERPC_64LE-NEXT: vmrglh [[R2]], [[R2]], [[R2]] -; POWERPC_64LE-NEXT: xxland [[R5:[0-9]+]], [[R5]], [[R6:[0-9]+]] -; POWERPC_64LE-NEXT: xxland [[R1]], [[R1]], [[R6]] -; POWERPC_64LE-NEXT: vadduwm [[R7:[0-9]+]], [[R7]], [[R4]] -; POWERPC_64LE: .LBB0_10: # %vec.epilog.vector.body -; POWERPC_64LE-NEXT: # -; POWERPC_64LE-NEXT: lxv [[R8:[0-9]+]], 0(4) -; POWERPC_64LE-NEXT: addi 4, 4, 16 -; POWERPC_64LE-NEXT: vcmpequh [[R9:[0-9]+]], [[R9]], [[R10:[0-9]+]] -; POWERPC_64LE-NEXT: xxlnor [[R8]], [[R8]], [[R8]] -; POWERPC_64LE-NEXT: vmrglh [[R11:[0-9]+]], [[R9]], [[R9]] -; POWERPC_64LE-NEXT: vmrghh [[R9]], [[R9]], [[R9]] -; POWERPC_64LE-NEXT: xxland [[R12:[0-9]+]], [[R12]], [[R6]] -; POWERPC_64LE-NEXT: xxland [[R8]], [[R8]], [[R6]] -; POWERPC_64LE-NEXT: vadduwm [[R7]], [[R7]], [[R9]] -; POWERPC_64LE-NEXT: vadduwm [[R3]], [[R3]], [[R11]] -; POWERPC_64LE-NEXT: bdnz .LBB0_10 -; POWERPC_64LE: blr +; POWERPC_64LE: # %bb.0: # %entry +; POWERPC_64LE-NEXT: lfd 0, 0(3) +; POWERPC_64LE-NEXT: xxlxor 35, 35, 35 +; POWERPC_64LE-NEXT: li 4, 0 +; POWERPC_64LE-NEXT: li 3, 4 +; POWERPC_64LE-NEXT: xxswapd 34, 0 +; POWERPC_64LE-NEXT: vcmpequh 2, 2, 3 +; POWERPC_64LE-NEXT: xxlnor 34, 34, 34 +; POWERPC_64LE-NEXT: vmrglh 3, 2, 2 +; POWERPC_64LE-NEXT: vextuwrx 4, 4, 2 +; POWERPC_64LE-NEXT: vextuwrx 3, 3, 3 +; POWERPC_64LE-NEXT: clrlwi 4, 4, 31 +; POWERPC_64LE-NEXT: rlwimi 4, 3, 1, 30, 30 +; POWERPC_64LE-NEXT: mfvsrwz 3, 35 +; POWERPC_64LE-NEXT: rlwimi 4, 3, 2, 29, 29 +; POWERPC_64LE-NEXT: li 3, 12 +; POWERPC_64LE-NEXT: vextuwrx 3, 3, 3 +; POWERPC_64LE-NEXT: rlwimi 4, 3, 3, 28, 28 +; POWERPC_64LE-NEXT: stb 4, -1(1) +; POWERPC_64LE-NEXT: lbz 3, -1(1) +; POWERPC_64LE-NEXT: popcntd 3, 3 +; POWERPC_64LE-NEXT: blr ; ; POWERPC_64-LABEL: test_Greater_than: -; POWERPC_64: L..BB0_6: # %vector.body -; POWERPC_64-NEXT: # -; POWERPC_64-NEXT: lxv [[R1:[0-9]+]], -64(4) -; POWERPC_64-NEXT: vcmpequh [[R2:[0-9]+]], [[R2]], [[R3:[0-9]+]] -; POWERPC_64-NEXT: xxlnor [[R1]], [[R1]], [[R1]] -; POWERPC_64-NEXT: vmrglh [[R4:[0-9]+]], [[R2]], [[R2]] -; POWERPC_64-NEXT: vmrghh [[R2]], [[R2]], [[R2]] -; POWERPC_64-NEXT: xxland [[R5:[0-9]+]], [[R5]], [[R6:[0-9]+]] -; POWERPC_64-NEXT: xxland [[R1]], [[R1]], [[R6]] -; POWERPC_64-NEXT: vadduwm [[R7:[0-9]+]], [[R7]], [[R4]] -; POWERPC_64: L..BB0_10: # %vec.epilog.vector.body -; POWERPC_64-NEXT: # -; POWERPC_64-NEXT: lxv [[R8:[0-9]+]], 0(4) -; POWERPC_64-NEXT: addi 4, 4, 16 -; POWERPC_64-NEXT: vcmpequh [[R9:[0-9]+]], [[R9]], [[R10:[0-9]+]] -; POWERPC_64-NEXT: xxlnor [[R8]], [[R8]], [[R8]] -; POWERPC_64-NEXT: vmrghh [[R11:[0-9]+]], [[R9]], [[R9]] -; POWERPC_64-NEXT: vmrglh [[R9]], [[R9]], [[R9]] -; POWERPC_64-NEXT: xxland [[R12:[0-9]+]], [[R12]], [[R6]] -; POWERPC_64-NEXT: xxland [[R8]], [[R8]], [[R6]] -; POWERPC_64-NEXT: vadduwm [[R7]], [[R7]], [[R9]] -; POWERPC_64-NEXT: vadduwm [[R3]], [[R3]], [[R11]] -; POWERPC_64-NEXT: bdnz L..BB0_10 -; POWERPC_64: blr +; POWERPC_64: # %bb.0: # %entry +; POWERPC_64-NEXT: lxsd 2, 0(3) +; POWERPC_64-NEXT: xxlxor 35, 35, 35 +; POWERPC_64-NEXT: li 4, 12 +; POWERPC_64-NEXT: li 3, 8 +; POWERPC_64-NEXT: vcmpequh 2, 2, 3 +; POWERPC_64-NEXT: xxlnor 34, 34, 34 +; POWERPC_64-NEXT: vmrghh 2, 2, 2 +; POWERPC_64-NEXT: vextuwlx 4, 4, 2 +; POWERPC_64-NEXT: vextuwlx 3, 3, 2 +; POWERPC_64-NEXT: clrlwi 4, 4, 31 +; POWERPC_64-NEXT: rlwimi 4, 3, 1, 30, 30 +; POWERPC_64-NEXT: mfvsrwz 3, 34 +; POWERPC_64-NEXT: rlwimi 4, 3, 2, 29, 29 +; POWERPC_64-NEXT: li 3, 0 +; POWERPC_64-NEXT: vextuwlx 3, 3, 2 +; POWERPC_64-NEXT: rlwimi 4, 3, 3, 28, 28 +; POWERPC_64-NEXT: stb 4, -1(1) +; POWERPC_64-NEXT: lbz 3, -1(1) +; POWERPC_64-NEXT: popcntd 3, 3 +; POWERPC_64-NEXT: blr ; ; POWERPC_32-LABEL: test_Greater_than: -; POWERPC_32: L..BB0_7: # %vector.body -; POWERPC_32-NEXT: # -; POWERPC_32-NEXT: lxv [[R1:[0-9]+]], 0(10) -; POWERPC_32-NEXT: addic [[R13:[0-9]+]], [[R13]], 64 -; POWERPC_32-NEXT: addze [[R14:[0-9]+]], [[R14]] -; POWERPC_32-NEXT: xor [[R15:[0-9]+]], [[R13]], [[R16:[0-9]+]] -; POWERPC_32-NEXT: or. [[R15]], [[R15]], [[R14]] -; POWERPC_32-NEXT: vcmpequh [[R2:[0-9]+]], [[R2]], [[R3:[0-9]+]] -; POWERPC_32-NEXT: xxlnor [[R1]], [[R1]], [[R1]] -; POWERPC_32-NEXT: vmrglh [[R4:[0-9]+]], [[R2]], [[R2]] -; POWERPC_32-NEXT: vmrghh [[R2]], [[R2]], [[R2]] -; POWERPC_32-NEXT: xxland [[R5:[0-9]+]], [[R5]], [[R6:[0-9]+]] -; POWERPC_32-NEXT: xxland [[R1]], [[R1]], [[R6]] -; POWERPC_32-NEXT: vadduwm [[R7:[0-9]+]], [[R7]], [[R4]] -; POWERPC_32: L..BB0_11: # %vec.epilog.vector.body -; POWERPC_32-NEXT: # -; POWERPC_32-NEXT: slwi [[R14]], [[R13]], 1 -; POWERPC_32-NEXT: addic [[R13]], [[R13]], 8 -; POWERPC_32-NEXT: addze [[R17:[0-9]+]], [[R17]] -; POWERPC_32-NEXT: lxvx [[R8:[0-9]+]], [[R18:[0-9]+]], [[R14]] -; POWERPC_32-NEXT: xor [[R14]], [[R13]], [[R16]] -; POWERPC_32-NEXT: or. [[R14]], [[R14]], [[R17]] -; POWERPC_32-NEXT: vcmpequh [[R9:[0-9]+]], [[R9]], [[R3]] -; POWERPC_32-NEXT: xxlnor [[R8]], [[R8]], [[R8]] -; POWERPC_32-NEXT: vmrghh [[R11:[0-9]+]], [[R9]], [[R9]] -; POWERPC_32-NEXT: vmrglh [[R9]], [[R9]], [[R9]] -; POWERPC_32-NEXT: xxland [[R12:[0-9]+]], [[R12]], [[R6]] -; POWERPC_32-NEXT: xxland [[R8]], [[R8]], [[R6]] -; POWERPC_32-NEXT: vadduwm [[R7]], [[R7]], [[R9]] -; POWERPC_32-NEXT: vadduwm [[R19:[0-9]+]], [[R19]], [[R11]] -; POWERPC_32-NEXT: bne 0, L..BB0_11 -; POWERPC_32: blr - entry: - %cmp5 = icmp sgt i32 %ncols, 0 - br i1 %cmp5, label %iter.check, label %for.cond.cleanup - -iter.check: ; preds = %entry - %wide.trip.count = zext nneg i32 %ncols to i64 - %min.iters.check = icmp ult i32 %ncols, 8 - br i1 %min.iters.check, label %for.body.preheader, label %vector.main.loop.iter.check - -for.body.preheader: ; preds = %vec.epilog.iter.check, %vec.epilog.middle.block, %iter.check - %indvars.iv.ph = phi i64 [ 0, %iter.check ], [ %n.vec, %vec.epilog.iter.check ], [ %n.vec31, %vec.epilog.middle.block ] - %num_cols_needed.06.ph = phi i32 [ 0, %iter.check ], [ %33, %vec.epilog.iter.check ], [ %40, %vec.epilog.middle.block ] - br label %for.body - -vector.main.loop.iter.check: ; preds = %iter.check - %min.iters.check9 = icmp ult i32 %ncols, 64 - br i1 %min.iters.check9, label %vec.epilog.ph, label %vector.ph - -vector.ph: ; preds = %vector.main.loop.iter.check - %n.vec = and i64 %wide.trip.count, 2147483584 - br label %vector.body - -vector.body: ; preds = %vector.body, %vector.ph - %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] - %vec.phi = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %24, %vector.body ] - %vec.phi10 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %25, %vector.body ] - %vec.phi11 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %26, %vector.body ] - %vec.phi12 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %27, %vector.body ] - %vec.phi13 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %28, %vector.body ] - %vec.phi14 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %29, %vector.body ] - %vec.phi15 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %30, %vector.body ] - %vec.phi16 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %31, %vector.body ] - %0 = getelementptr inbounds nuw i16, ptr %colauths, i64 %index - %1 = getelementptr inbounds nuw i8, ptr %0, i64 16 - %2 = getelementptr inbounds nuw i8, ptr %0, i64 32 - %3 = getelementptr inbounds nuw i8, ptr %0, i64 48 - %4 = getelementptr inbounds nuw i8, ptr %0, i64 64 - %5 = getelementptr inbounds nuw i8, ptr %0, i64 80 - %6 = getelementptr inbounds nuw i8, ptr %0, i64 96 - %7 = getelementptr inbounds nuw i8, ptr %0, i64 112 - %wide.load = load <8 x i16>, ptr %0, align 2, !tbaa !5 - %wide.load17 = load <8 x i16>, ptr %1, align 2, !tbaa !5 - %wide.load18 = load <8 x i16>, ptr %2, align 2, !tbaa !5 - %wide.load19 = load <8 x i16>, ptr %3, align 2, !tbaa !5 - %wide.load20 = load <8 x i16>, ptr %4, align 2, !tbaa !5 - %wide.load21 = load <8 x i16>, ptr %5, align 2, !tbaa !5 - %wide.load22 = load <8 x i16>, ptr %6, align 2, !tbaa !5 - %wide.load23 = load <8 x i16>, ptr %7, align 2, !tbaa !5 - %8 = icmp ne <8 x i16> %wide.load, zeroinitializer - %9 = icmp ne <8 x i16> %wide.load17, zeroinitializer - %10 = icmp ne <8 x i16> %wide.load18, zeroinitializer - %11 = icmp ne <8 x i16> %wide.load19, zeroinitializer - %12 = icmp ne <8 x i16> %wide.load20, zeroinitializer - %13 = icmp ne <8 x i16> %wide.load21, zeroinitializer - %14 = icmp ne <8 x i16> %wide.load22, zeroinitializer - %15 = icmp ne <8 x i16> %wide.load23, zeroinitializer - %16 = zext <8 x i1> %8 to <8 x i32> - %17 = zext <8 x i1> %9 to <8 x i32> - %18 = zext <8 x i1> %10 to <8 x i32> - %19 = zext <8 x i1> %11 to <8 x i32> - %20 = zext <8 x i1> %12 to <8 x i32> - %21 = zext <8 x i1> %13 to <8 x i32> - %22 = zext <8 x i1> %14 to <8 x i32> - %23 = zext <8 x i1> %15 to <8 x i32> - %24 = add <8 x i32> %vec.phi, %16 - %25 = add <8 x i32> %vec.phi10, %17 - %26 = add <8 x i32> %vec.phi11, %18 - %27 = add <8 x i32> %vec.phi12, %19 - %28 = add <8 x i32> %vec.phi13, %20 - %29 = add <8 x i32> %vec.phi14, %21 - %30 = add <8 x i32> %vec.phi15, %22 - %31 = add <8 x i32> %vec.phi16, %23 - %index.next = add nuw i64 %index, 64 - %32 = icmp eq i64 %index.next, %n.vec - br i1 %32, label %middle.block, label %vector.body, !llvm.loop !9 - -middle.block: ; preds = %vector.body - %bin.rdx = add <8 x i32> %25, %24 - %bin.rdx24 = add <8 x i32> %26, %bin.rdx - %bin.rdx25 = add <8 x i32> %27, %bin.rdx24 - %bin.rdx26 = add <8 x i32> %28, %bin.rdx25 - %bin.rdx27 = add <8 x i32> %29, %bin.rdx26 - %bin.rdx28 = add <8 x i32> %30, %bin.rdx27 - %bin.rdx29 = add <8 x i32> %31, %bin.rdx28 - %33 = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %bin.rdx29) - %cmp.n = icmp eq i64 %n.vec, %wide.trip.count - br i1 %cmp.n, label %for.cond.cleanup, label %vec.epilog.iter.check - -vec.epilog.iter.check: ; preds = %middle.block - %n.vec.remaining = and i64 %wide.trip.count, 56 - %min.epilog.iters.check = icmp eq i64 %n.vec.remaining, 0 - br i1 %min.epilog.iters.check, label %for.body.preheader, label %vec.epilog.ph - -vec.epilog.ph: ; preds = %vec.epilog.iter.check, %vector.main.loop.iter.check - %vec.epilog.resume.val = phi i64 [ %n.vec, %vec.epilog.iter.check ], [ 0, %vector.main.loop.iter.check ] - %bc.merge.rdx = phi i32 [ %33, %vec.epilog.iter.check ], [ 0, %vector.main.loop.iter.check ] - %n.vec31 = and i64 %wide.trip.count, 2147483640 - %34 = insertelement <8 x i32> <i32 poison, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, i32 %bc.merge.rdx, i64 0 - br label %vec.epilog.vector.body - -vec.epilog.vector.body: ; preds = %vec.epilog.vector.body, %vec.epilog.ph - %index32 = phi i64 [ %vec.epilog.resume.val, %vec.epilog.ph ], [ %index.next35, %vec.epilog.vector.body ] - %vec.phi33 = phi <8 x i32> [ %34, %vec.epilog.ph ], [ %38, %vec.epilog.vector.body ] - %35 = getelementptr inbounds nuw i16, ptr %colauths, i64 %index32 - %wide.load34 = load <8 x i16>, ptr %35, align 2, !tbaa !5 - %36 = icmp ne <8 x i16> %wide.load34, zeroinitializer - %37 = zext <8 x i1> %36 to <8 x i32> - %38 = add <8 x i32> %vec.phi33, %37 - %index.next35 = add nuw i64 %index32, 8 - %39 = icmp eq i64 %index.next35, %n.vec31 - br i1 %39, label %vec.epilog.middle.block, label %vec.epilog.vector.body, !llvm.loop !13 - -vec.epilog.middle.block: ; preds = %vec.epilog.vector.body - %40 = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %38) - %cmp.n36 = icmp eq i64 %n.vec31, %wide.trip.count - br i1 %cmp.n36, label %for.cond.cleanup, label %for.body.preheader - -for.cond.cleanup: ; preds = %for.body, %middle.block, %vec.epilog.middle.block, %entry - %num_cols_needed.0.lcssa = phi i32 [ 0, %entry ], [ %33, %middle.block ], [ %40, %vec.epilog.middle.block ], [ %spec.select, %for.body ] - ret i32 %num_cols_needed.0.lcssa - -for.body: ; preds = %for.body.preheader, %for.body - %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] - %num_cols_needed.06 = phi i32 [ %spec.select, %for.body ], [ %num_cols_needed.06.ph, %for.body.preheader ] - %arrayidx = getelementptr inbounds nuw i16, ptr %colauths, i64 %indvars.iv - %41 = load i16, ptr %arrayidx, align 2, !tbaa !5 - %tobool.not = icmp ne i16 %41, 0 - %inc = zext i1 %tobool.not to i32 - %spec.select = add nuw nsw i32 %num_cols_needed.06, %inc - %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 - %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count - br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !14 +; POWERPC_32: # %bb.0: # %entry +; POWERPC_32-NEXT: li 4, 4 +; POWERPC_32-NEXT: lxvwsx 1, 0, 3 +; POWERPC_32-NEXT: xxlxor 35, 35, 35 +; POWERPC_32-NEXT: lxvwsx 0, 3, 4 +; POWERPC_32-NEXT: xxmrghw 34, 1, 0 +; POWERPC_32-NEXT: vcmpequh 2, 2, 3 +; POWERPC_32-NEXT: xxlnor 34, 34, 34 +; POWERPC_32-NEXT: vmrghh 2, 2, 2 +; POWERPC_32-NEXT: stxv 34, -32(1) +; POWERPC_32-NEXT: lwz 3, -20(1) +; POWERPC_32-NEXT: lwz 4, -24(1) +; POWERPC_32-NEXT: clrlwi 3, 3, 31 +; POWERPC_32-NEXT: rlwimi 3, 4, 1, 30, 30 +; POWERPC_32-NEXT: lwz 4, -28(1) +; POWERPC_32-NEXT: rlwimi 3, 4, 2, 29, 29 +; POWERPC_32-NEXT: lwz 4, -32(1) +; POWERPC_32-NEXT: rlwimi 3, 4, 3, 28, 28 +; POWERPC_32-NEXT: popcntw 3, 3 +; POWERPC_32-NEXT: blr +entry: + %0 = load <4 x i16>, ptr %colauths, align 2, !tbaa !5 + %1 = icmp ne <4 x i16> %0, zeroinitializer + %2 = bitcast <4 x i1> %1 to i4 + %3 = tail call range(i4 0, 5) i4 @llvm.ctpop.i4(i4 %2) + %4 = zext nneg i4 %3 to i32 + ret i32 %4 } +declare i4 @llvm.ctpop.i4(i4) #1 + !5 = !{!6, !6, i64 0} !6 = !{!"short", !7, i64 0} !7 = !{!"omnipotent char", !8, i64 0} !8 = !{!"Simple C/C++ TBAA"} -!9 = distinct !{!9, !10, !11, !12} -!10 = !{!"llvm.loop.mustprogress"} -!11 = !{!"llvm.loop.isvectorized", i32 1} -!12 = !{!"llvm.loop.unroll.runtime.disable"} -!13 = distinct !{!13, !10, !11, !12} -!14 = distinct !{!14, !10, !12, !11} diff --git a/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll b/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll index 232014d..a9503f7 100644 --- a/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll +++ b/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll @@ -2,22 +2,87 @@ ; Verify whether the generated assembly for the following function includes the mtvsrbmi instruction. ; vector unsigned char v00FF() ; { -; vector unsigned char x = { 0xFF, 0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 }; -; return x; +; vector unsigned char x = { 0xFF, 0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 }; +; return x; +; } +; vector unsigned short short00FF() +; { +; vector unsigned short x = { 0xFF, 0,0,0, 0,0,0,0}; +; return x; +; } +; vector unsigned int int00FF() +; { +; vector unsigned int x = { 0xFF, 0,0,0}; +; return x; +; } +; vector unsigned long long longlong00FF() +; { +; vector unsigned long long x = { 0xFF, 0}; +; return x; ; } ; RUN: llc < %s -ppc-asm-full-reg-names -mtriple=powerpc-ibm-aix -mcpu=pwr10 -verify-machineinstrs \ -; RUN: | FileCheck %s --check-prefix=CHECK +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-BE + +; RUN: llc < %s -ppc-asm-full-reg-names -mtriple=powerpc64le-unknown-gnu-linux -mcpu=pwr10 -verify-machineinstrs \ +; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-LE + +; CHECK-NOT: .byte 255 +; CHECK-NOT: .byte 0 define dso_local noundef range(i8 -1, 1) <16 x i8> @_Z5v00FFv() { -; CHECK-NOT: L..CPI0_0: -; CHECK-NOT: .byte 255 # 0xff -; CHECK-NOT: .byte 0 # 0x0 - -; CHECK-LABEL: _Z5v00FFv: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: mtvsrbmi v2, 1 -; CHECK-NEXT: blr +; CHECK-BE-LABEL: _Z5v00FFv: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: mtvsrbmi v2, 32768 +; CHECK-BE-NEXT: blr +; +; CHECK-LE-LABEL: _Z5v00FFv: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: mtvsrbmi v2, 1 +; CHECK-LE-NEXT: blr + entry: ret <16 x i8> <i8 -1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0> } + +define dso_local noundef range(i16 0, 256) <8 x i16> @_Z9short00FFv() { +; CHECK-BE-LABEL: _Z9short00FFv: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: mtvsrbmi v2, 16384 +; CHECK-BE-NEXT: blr +; +; CHECK-LE-LABEL: _Z9short00FFv: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: mtvsrbmi v2, 1 +; CHECK-LE-NEXT: blr +entry: + ret <8 x i16> <i16 255, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0> +} + +define dso_local noundef range(i32 0, 256) <4 x i32> @_Z7int00FFv() { +; CHECK-BE-LABEL: _Z7int00FFv: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: mtvsrbmi v2, 4096 +; CHECK-BE-NEXT: blr +; +; CHECK-LE-LABEL: _Z7int00FFv: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: mtvsrbmi v2, 1 +; CHECK-LE-NEXT: blr +entry: + ret <4 x i32> <i32 255, i32 0, i32 0, i32 0> +} + +define dso_local noundef range(i64 0, 256) <2 x i64> @_Z12longlong00FFv() { +; CHECK-BE-LABEL: _Z12longlong00FFv: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: mtvsrbmi v2, 256 +; CHECK-BE-NEXT: blr +; +; CHECK-LE-LABEL: _Z12longlong00FFv: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: mtvsrbmi v2, 1 +; CHECK-LE-NEXT: blr +entry: + ret <2 x i64> <i64 255, i64 0> +} diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll index b94665b..fb53921 100644 --- a/llvm/test/CodeGen/RISCV/features-info.ll +++ b/llvm/test/CodeGen/RISCV/features-info.ll @@ -6,13 +6,21 @@ ; CHECK-NEXT: 32bit - Implements RV32. ; CHECK-NEXT: 64bit - Implements RV64. ; CHECK-NEXT: a - 'A' (Atomic Instructions). +; CHECK-NEXT: add-load-fusion - Enable ADD(.UW) + load macrofusion. +; CHECK-NEXT: addi-load-fusion - Enable ADDI + load macrofusion. ; CHECK-NEXT: andes45 - Andes 45-Series processors. ; CHECK-NEXT: auipc-addi-fusion - Enable AUIPC+ADDI macrofusion. +; CHECK-NEXT: auipc-load-fusion - Enable AUIPC + load macrofusion. ; CHECK-NEXT: b - 'B' (the collection of the Zba, Zbb, Zbs extensions). +; CHECK-NEXT: bfext-fusion - Enable SLLI+SRLI (bitfield extract) macrofusion. ; CHECK-NEXT: c - 'C' (Compressed Instructions). ; CHECK-NEXT: conditional-cmv-fusion - Enable branch+c.mv fusion. ; CHECK-NEXT: d - 'D' (Double-Precision Floating-Point). ; CHECK-NEXT: disable-latency-sched-heuristic - Disable latency scheduling heuristic. +; CHECK-NEXT: disable-misched-load-clustering - Disable load clustering in the machine scheduler. +; CHECK-NEXT: disable-misched-store-clustering - Disable store clustering in the machine scheduler. +; CHECK-NEXT: disable-postmisched-load-clustering - Disable PostRA load clustering in the machine scheduler. +; CHECK-NEXT: disable-postmisched-store-clustering - Disable PostRA store clustering in the machine scheduler. ; CHECK-NEXT: dlen-factor-2 - Vector unit DLEN(data path width) is half of VLEN. ; CHECK-NEXT: e - 'E' (Embedded Instruction Set with 16 GPRs). ; CHECK-NEXT: exact-asm - Enable Exact Assembly (Disables Compression and Relaxation). @@ -58,6 +66,7 @@ ; CHECK-NEXT: ld-add-fusion - Enable LD+ADD macrofusion. ; CHECK-NEXT: log-vrgather - Has vrgather.vv with LMUL*log2(LMUL) latency ; CHECK-NEXT: lui-addi-fusion - Enable LUI+ADDI macro fusion. +; CHECK-NEXT: lui-load-fusion - Enable LUI + load macrofusion. ; CHECK-NEXT: m - 'M' (Integer Multiplication and Division). ; CHECK-NEXT: mips-p8700 - MIPS p8700 processor. ; CHECK-NEXT: no-default-unroll - Disable default unroll preference.. @@ -130,6 +139,7 @@ ; CHECK-NEXT: shvsatpa - 'Shvsatpa' (vsatp supports all modes supported by satp). ; CHECK-NEXT: shvstvala - 'Shvstvala' (vstval provides all needed values). ; CHECK-NEXT: shvstvecd - 'Shvstvecd' (vstvec supports Direct mode). +; CHECK-NEXT: shxadd-load-fusion - Enable SH(1|2|3)ADD(.UW) + load macrofusion. ; CHECK-NEXT: sifive7 - SiFive 7-Series processors. ; CHECK-NEXT: smaia - 'Smaia' (Advanced Interrupt Architecture Machine Level). ; CHECK-NEXT: smcdeleg - 'Smcdeleg' (Counter Delegation Machine Level). diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll index facb544..0c152e6 100644 --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -2262,12 +2262,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32IZHINX-NEXT: addi a2, a3, -1 ; RV32IZHINX-NEXT: .LBB10_4: # %start ; RV32IZHINX-NEXT: feq.s a3, s0, s0 -; RV32IZHINX-NEXT: neg a4, a1 -; RV32IZHINX-NEXT: neg a1, s1 +; RV32IZHINX-NEXT: neg a4, s1 +; RV32IZHINX-NEXT: neg a5, a1 ; RV32IZHINX-NEXT: neg a3, a3 -; RV32IZHINX-NEXT: and a0, a1, a0 +; RV32IZHINX-NEXT: and a0, a4, a0 ; RV32IZHINX-NEXT: and a1, a3, a2 -; RV32IZHINX-NEXT: or a0, a4, a0 +; RV32IZHINX-NEXT: or a0, a5, a0 ; RV32IZHINX-NEXT: and a0, a3, a0 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -2309,12 +2309,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32IZDINXZHINX-NEXT: addi a2, a3, -1 ; RV32IZDINXZHINX-NEXT: .LBB10_4: # %start ; RV32IZDINXZHINX-NEXT: feq.s a3, s0, s0 -; RV32IZDINXZHINX-NEXT: neg a4, a1 -; RV32IZDINXZHINX-NEXT: neg a1, s1 +; RV32IZDINXZHINX-NEXT: neg a4, s1 +; RV32IZDINXZHINX-NEXT: neg a5, a1 ; RV32IZDINXZHINX-NEXT: neg a3, a3 -; RV32IZDINXZHINX-NEXT: and a0, a1, a0 +; RV32IZDINXZHINX-NEXT: and a0, a4, a0 ; RV32IZDINXZHINX-NEXT: and a1, a3, a2 -; RV32IZDINXZHINX-NEXT: or a0, a4, a0 +; RV32IZDINXZHINX-NEXT: or a0, a5, a0 ; RV32IZDINXZHINX-NEXT: and a0, a3, a0 ; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZDINXZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -2653,12 +2653,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; CHECK32-IZHINXMIN-NEXT: addi a2, a3, -1 ; CHECK32-IZHINXMIN-NEXT: .LBB10_4: # %start ; CHECK32-IZHINXMIN-NEXT: feq.s a3, s0, s0 -; CHECK32-IZHINXMIN-NEXT: neg a4, a1 -; CHECK32-IZHINXMIN-NEXT: neg a1, s1 +; CHECK32-IZHINXMIN-NEXT: neg a4, s1 +; CHECK32-IZHINXMIN-NEXT: neg a5, a1 ; CHECK32-IZHINXMIN-NEXT: neg a3, a3 -; CHECK32-IZHINXMIN-NEXT: and a0, a1, a0 +; CHECK32-IZHINXMIN-NEXT: and a0, a4, a0 ; CHECK32-IZHINXMIN-NEXT: and a1, a3, a2 -; CHECK32-IZHINXMIN-NEXT: or a0, a4, a0 +; CHECK32-IZHINXMIN-NEXT: or a0, a5, a0 ; CHECK32-IZHINXMIN-NEXT: and a0, a3, a0 ; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; CHECK32-IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -2701,12 +2701,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; CHECK32-IZDINXZHINXMIN-NEXT: addi a2, a3, -1 ; CHECK32-IZDINXZHINXMIN-NEXT: .LBB10_4: # %start ; CHECK32-IZDINXZHINXMIN-NEXT: feq.s a3, s0, s0 -; CHECK32-IZDINXZHINXMIN-NEXT: neg a4, a1 -; CHECK32-IZDINXZHINXMIN-NEXT: neg a1, s1 +; CHECK32-IZDINXZHINXMIN-NEXT: neg a4, s1 +; CHECK32-IZDINXZHINXMIN-NEXT: neg a5, a1 ; CHECK32-IZDINXZHINXMIN-NEXT: neg a3, a3 -; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a1, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a4, a0 ; CHECK32-IZDINXZHINXMIN-NEXT: and a1, a3, a2 -; CHECK32-IZDINXZHINXMIN-NEXT: or a0, a4, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: or a0, a5, a0 ; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a3, a0 ; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; CHECK32-IZDINXZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -2972,18 +2972,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill -; RV32IZHINX-NEXT: fcvt.s.h a0, a0 -; RV32IZHINX-NEXT: lui a1, 391168 -; RV32IZHINX-NEXT: addi a1, a1, -1 -; RV32IZHINX-NEXT: fle.s a2, zero, a0 -; RV32IZHINX-NEXT: flt.s a1, a1, a0 -; RV32IZHINX-NEXT: neg s0, a1 -; RV32IZHINX-NEXT: neg s1, a2 +; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: fle.s a0, zero, s0 +; RV32IZHINX-NEXT: neg s1, a0 +; RV32IZHINX-NEXT: mv a0, s0 ; RV32IZHINX-NEXT: call __fixunssfdi ; RV32IZHINX-NEXT: and a0, s1, a0 +; RV32IZHINX-NEXT: lui a2, 391168 ; RV32IZHINX-NEXT: and a1, s1, a1 -; RV32IZHINX-NEXT: or a0, s0, a0 -; RV32IZHINX-NEXT: or a1, s0, a1 +; RV32IZHINX-NEXT: addi a2, a2, -1 +; RV32IZHINX-NEXT: flt.s a2, a2, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: or a1, a2, a1 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -3005,18 +3006,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZDINXZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IZDINXZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill -; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 -; RV32IZDINXZHINX-NEXT: lui a1, 391168 -; RV32IZDINXZHINX-NEXT: addi a1, a1, -1 -; RV32IZDINXZHINX-NEXT: fle.s a2, zero, a0 -; RV32IZDINXZHINX-NEXT: flt.s a1, a1, a0 -; RV32IZDINXZHINX-NEXT: neg s0, a1 -; RV32IZDINXZHINX-NEXT: neg s1, a2 +; RV32IZDINXZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZDINXZHINX-NEXT: fle.s a0, zero, s0 +; RV32IZDINXZHINX-NEXT: neg s1, a0 +; RV32IZDINXZHINX-NEXT: mv a0, s0 ; RV32IZDINXZHINX-NEXT: call __fixunssfdi ; RV32IZDINXZHINX-NEXT: and a0, s1, a0 +; RV32IZDINXZHINX-NEXT: lui a2, 391168 ; RV32IZDINXZHINX-NEXT: and a1, s1, a1 -; RV32IZDINXZHINX-NEXT: or a0, s0, a0 -; RV32IZDINXZHINX-NEXT: or a1, s0, a1 +; RV32IZDINXZHINX-NEXT: addi a2, a2, -1 +; RV32IZDINXZHINX-NEXT: flt.s a2, a2, s0 +; RV32IZDINXZHINX-NEXT: neg a2, a2 +; RV32IZDINXZHINX-NEXT: or a0, a2, a0 +; RV32IZDINXZHINX-NEXT: or a1, a2, a1 ; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZDINXZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IZDINXZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -3217,18 +3219,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; CHECK32-IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; CHECK32-IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill -; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 -; CHECK32-IZHINXMIN-NEXT: lui a1, 391168 -; CHECK32-IZHINXMIN-NEXT: addi a1, a1, -1 -; CHECK32-IZHINXMIN-NEXT: fle.s a2, zero, a0 -; CHECK32-IZHINXMIN-NEXT: flt.s a1, a1, a0 -; CHECK32-IZHINXMIN-NEXT: neg s0, a1 -; CHECK32-IZHINXMIN-NEXT: neg s1, a2 +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; CHECK32-IZHINXMIN-NEXT: fle.s a0, zero, s0 +; CHECK32-IZHINXMIN-NEXT: neg s1, a0 +; CHECK32-IZHINXMIN-NEXT: mv a0, s0 ; CHECK32-IZHINXMIN-NEXT: call __fixunssfdi ; CHECK32-IZHINXMIN-NEXT: and a0, s1, a0 +; CHECK32-IZHINXMIN-NEXT: lui a2, 391168 ; CHECK32-IZHINXMIN-NEXT: and a1, s1, a1 -; CHECK32-IZHINXMIN-NEXT: or a0, s0, a0 -; CHECK32-IZHINXMIN-NEXT: or a1, s0, a1 +; CHECK32-IZHINXMIN-NEXT: addi a2, a2, -1 +; CHECK32-IZHINXMIN-NEXT: flt.s a2, a2, s0 +; CHECK32-IZHINXMIN-NEXT: neg a2, a2 +; CHECK32-IZHINXMIN-NEXT: or a0, a2, a0 +; CHECK32-IZHINXMIN-NEXT: or a1, a2, a1 ; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; CHECK32-IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; CHECK32-IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -3251,18 +3254,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; CHECK32-IZDINXZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; CHECK32-IZDINXZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill -; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 -; CHECK32-IZDINXZHINXMIN-NEXT: lui a1, 391168 -; CHECK32-IZDINXZHINXMIN-NEXT: addi a1, a1, -1 -; CHECK32-IZDINXZHINXMIN-NEXT: fle.s a2, zero, a0 -; CHECK32-IZDINXZHINXMIN-NEXT: flt.s a1, a1, a0 -; CHECK32-IZDINXZHINXMIN-NEXT: neg s0, a1 -; CHECK32-IZDINXZHINXMIN-NEXT: neg s1, a2 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h s0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fle.s a0, zero, s0 +; CHECK32-IZDINXZHINXMIN-NEXT: neg s1, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: mv a0, s0 ; CHECK32-IZDINXZHINXMIN-NEXT: call __fixunssfdi ; CHECK32-IZDINXZHINXMIN-NEXT: and a0, s1, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: lui a2, 391168 ; CHECK32-IZDINXZHINXMIN-NEXT: and a1, s1, a1 -; CHECK32-IZDINXZHINXMIN-NEXT: or a0, s0, a0 -; CHECK32-IZDINXZHINXMIN-NEXT: or a1, s0, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: addi a2, a2, -1 +; CHECK32-IZDINXZHINXMIN-NEXT: flt.s a2, a2, s0 +; CHECK32-IZDINXZHINXMIN-NEXT: neg a2, a2 +; CHECK32-IZDINXZHINXMIN-NEXT: or a0, a2, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: or a1, a2, a1 ; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; CHECK32-IZDINXZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; CHECK32-IZDINXZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/macro-fusions.mir b/llvm/test/CodeGen/RISCV/macro-fusions.mir index 1346414..ae5b52d 100644 --- a/llvm/test/CodeGen/RISCV/macro-fusions.mir +++ b/llvm/test/CodeGen/RISCV/macro-fusions.mir @@ -2,7 +2,12 @@ # RUN: llc -mtriple=riscv64-linux-gnu -x=mir < %s \ # RUN: -debug-only=machine-scheduler -start-before=machine-scheduler 2>&1 \ # RUN: -mattr=+lui-addi-fusion,+auipc-addi-fusion,+zexth-fusion,+zextw-fusion,+shifted-zextw-fusion,+ld-add-fusion \ +# RUN: -mattr=+add-load-fusion,+auipc-load-fusion,+lui-load-fusion,+addi-load-fusion \ +# RUN: -mattr=+zba,+shxadd-load-fusion \ # RUN: | FileCheck %s +# RUN: llc -mtriple=riscv64-linux-gnu -x=mir < %s \ +# RUN: -debug-only=machine-scheduler -start-before=machine-scheduler 2>&1 \ +# RUN: -mattr=+zba,+bfext-fusion | FileCheck --check-prefixes=CHECK-BFEXT %s # CHECK: lui_addi:%bb.0 # CHECK: Macro fuse: {{.*}}LUI - ADDI @@ -174,3 +179,1374 @@ body: | $x11 = COPY %5 PseudoRET ... + +# CHECK: add_lb +# CHECK: Macro fuse: {{.*}}ADD - LB +--- +name: add_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: add_lh +# CHECK: Macro fuse: {{.*}}ADD - LH +--- +name: add_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: add_lw +# CHECK: Macro fuse: {{.*}}ADD - LW +--- +name: add_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: add_lbu +# CHECK: Macro fuse: {{.*}}ADD - LBU +--- +name: add_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: add_lhu +# CHECK: Macro fuse: {{.*}}ADD - LHU +--- +name: add_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: add_lwu +# CHECK: Macro fuse: {{.*}}ADD - LWU +--- +name: add_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: auipc_lb +# CHECK: Macro fuse: {{.*}}AUIPC - LB +--- +name: auipc_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LB %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_lh +# CHECK: Macro fuse: {{.*}}AUIPC - LH +--- +name: auipc_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LH %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_lw +# CHECK: Macro fuse: {{.*}}AUIPC - LW +--- +name: auipc_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LW %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_ld +# CHECK: Macro fuse: {{.*}}AUIPC - LD +--- +name: auipc_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LD %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_lbu +# CHECK: Macro fuse: {{.*}}AUIPC - LBU +--- +name: auipc_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LBU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_lhu +# CHECK: Macro fuse: {{.*}}AUIPC - LHU +--- +name: auipc_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LHU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: auipc_lwu +# CHECK: Macro fuse: {{.*}}AUIPC - LWU +--- +name: auipc_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = AUIPC 1 + %3:gpr = XORI %1, 2 + %4:gpr = LWU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lb +# CHECK: Macro fuse: {{.*}}LUI - LB +--- +name: lui_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LB %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lh +# CHECK: Macro fuse: {{.*}}LUI - LH +--- +name: lui_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LH %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lw +# CHECK: Macro fuse: {{.*}}LUI - LW +--- +name: lui_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LW %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_ld +# CHECK: Macro fuse: {{.*}}LUI - LD +--- +name: lui_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LD %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lbu +# CHECK: Macro fuse: {{.*}}LUI - LBU +--- +name: lui_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LBU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lhu +# CHECK: Macro fuse: {{.*}}LUI - LHU +--- +name: lui_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LHU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: lui_lwu +# CHECK: Macro fuse: {{.*}}LUI - LWU +--- +name: lui_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = LUI 1 + %3:gpr = XORI %1, 2 + %4:gpr = LWU %2, 4 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK-BFEXT: bitfield_extract +# CHECK-BFEXT: Macro fuse: {{.*}}SLLI - SRLI +--- +name: bitfield_extract +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10 + %1:gpr = COPY $x10 + %2:gpr = SLLI %1, 31 + %3:gpr = XORI %1, 3 + %4:gpr = SRLI %2, 48 + $x10 = COPY %3 + $x11 = COPY %4 + PseudoRET +... + +# CHECK: addi_lb +# CHECK: Macro fuse: {{.*}}ADDI - LB +--- +name: addi_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_lh +# CHECK: Macro fuse: {{.*}}ADDI - LH +--- +name: addi_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_lw +# CHECK: Macro fuse: {{.*}}ADDI - LW +--- +name: addi_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_ld +# CHECK: Macro fuse: {{.*}}ADDI - LD +--- +name: addi_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_lbu +# CHECK: Macro fuse: {{.*}}ADDI - LBU +--- +name: addi_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_lhu +# CHECK: Macro fuse: {{.*}}ADDI - LHU +--- +name: addi_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: addi_lwu +# CHECK: Macro fuse: {{.*}}ADDI - LWU +--- +name: addi_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADDI %1, 8 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lb +# CHECK: Macro fuse: {{.*}}ADD_UW - LB +--- +name: adduw_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lh +# CHECK: Macro fuse: {{.*}}ADD_UW - LH +--- +name: adduw_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lw +# CHECK: Macro fuse: {{.*}}ADD_UW - LW +--- +name: adduw_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_ld +# CHECK: Macro fuse: {{.*}}ADD_UW - LD +--- +name: adduw_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lbu +# CHECK: Macro fuse: {{.*}}ADD_UW - LBU +--- +name: adduw_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lhu +# CHECK: Macro fuse: {{.*}}ADD_UW - LHU +--- +name: adduw_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: adduw_lwu +# CHECK: Macro fuse: {{.*}}ADD_UW - LWU +--- +name: adduw_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 0 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lb +# CHECK: Macro fuse: {{.*}}SH1ADD - LB +--- +name: sh1add_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lb +# CHECK: Macro fuse: {{.*}}SH2ADD - LB +--- +name: sh2add_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lb +# CHECK: Macro fuse: {{.*}}SH3ADD - LB +--- +name: sh3add_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lh +# CHECK: Macro fuse: {{.*}}SH1ADD - LH +--- +name: sh1add_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lh +# CHECK: Macro fuse: {{.*}}SH2ADD - LH +--- +name: sh2add_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lh +# CHECK: Macro fuse: {{.*}}SH3ADD - LH +--- +name: sh3add_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lw +# CHECK: Macro fuse: {{.*}}SH1ADD - LW +--- +name: sh1add_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lw +# CHECK: Macro fuse: {{.*}}SH2ADD - LW +--- +name: sh2add_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lw +# CHECK: Macro fuse: {{.*}}SH3ADD - LW +--- +name: sh3add_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_ld +# CHECK: Macro fuse: {{.*}}SH1ADD - LD +--- +name: sh1add_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_ld +# CHECK: Macro fuse: {{.*}}SH2ADD - LD +--- +name: sh2add_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_ld +# CHECK: Macro fuse: {{.*}}SH3ADD - LD +--- +name: sh3add_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lbu +# CHECK: Macro fuse: {{.*}}SH1ADD - LBU +--- +name: sh1add_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lbu +# CHECK: Macro fuse: {{.*}}SH2ADD - LBU +--- +name: sh2add_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lbu +# CHECK: Macro fuse: {{.*}}SH3ADD - LBU +--- +name: sh3add_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lhu +# CHECK: Macro fuse: {{.*}}SH1ADD - LHU +--- +name: sh1add_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lhu +# CHECK: Macro fuse: {{.*}}SH2ADD - LHU +--- +name: sh2add_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lhu +# CHECK: Macro fuse: {{.*}}SH3ADD - LHU +--- +name: sh3add_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1add_lwu +# CHECK: Macro fuse: {{.*}}SH1ADD - LWU +--- +name: sh1add_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2add_lwu +# CHECK: Macro fuse: {{.*}}SH2ADD - LWU +--- +name: sh2add_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3add_lwu +# CHECK: Macro fuse: {{.*}}SH3ADD - LWU +--- +name: sh3add_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lb +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LB +--- +name: sh1adduw_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lb +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LB +--- +name: sh2adduw_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lb +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LB +--- +name: sh3adduw_lb +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LB %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lh +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LH +--- +name: sh1adduw_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lh +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LH +--- +name: sh2adduw_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lh +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LH +--- +name: sh3adduw_lh +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LH %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lw +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LW +--- +name: sh1adduw_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lw +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LW +--- +name: sh2adduw_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lw +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LW +--- +name: sh3adduw_lw +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LW %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_ld +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LD +--- +name: sh1adduw_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_ld +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LD +--- +name: sh2adduw_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_ld +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LD +--- +name: sh3adduw_ld +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LD %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lbu +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LBU +--- +name: sh1adduw_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lbu +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LBU +--- +name: sh2adduw_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lbu +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LBU +--- +name: sh3adduw_lbu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LBU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lhu +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LHU +--- +name: sh1adduw_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lhu +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LHU +--- +name: sh2adduw_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lhu +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LHU +--- +name: sh3adduw_lhu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LHU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh1adduw_lwu +# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LWU +--- +name: sh1adduw_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH1ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh2adduw_lwu +# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LWU +--- +name: sh2adduw_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH2ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... + +# CHECK: sh3adduw_lwu +# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LWU +--- +name: sh3adduw_lwu +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + %1:gpr = COPY $x10 + %2:gpr = COPY $x11 + %3:gpr = SH3ADD_UW %1, %2 + %4:gpr = XORI %2, 3 + %5:gpr = LWU %3, 8 + $x10 = COPY %4 + $x11 = COPY %5 + PseudoRET +... diff --git a/llvm/test/CodeGen/RISCV/misched-load-clustering.ll b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll index 160f0ae..abdc1ba 100644 --- a/llvm/test/CodeGen/RISCV/misched-load-clustering.ll +++ b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll @@ -1,17 +1,42 @@ ; REQUIRES: asserts -; RUN: llc -mtriple=riscv32 -verify-misched -riscv-misched-load-store-clustering=false \ +; +; Disable all misched clustering +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \ ; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ ; RUN: | FileCheck -check-prefix=NOCLUSTER %s -; RUN: llc -mtriple=riscv64 -verify-misched -riscv-misched-load-store-clustering=false \ +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \ ; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ ; RUN: | FileCheck -check-prefix=NOCLUSTER %s +; +; ST misched clustering only +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=STCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=STCLUSTER %s +; +; LD misched clustering only ; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-store-clustering \ ; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ ; RUN: | FileCheck -check-prefix=LDCLUSTER %s ; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-store-clustering \ ; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ ; RUN: | FileCheck -check-prefix=LDCLUSTER %s - +; +; Default misched cluster settings (i.e. both LD and ST clustering) +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s define i32 @load_clustering_1(ptr nocapture %p) { ; NOCLUSTER: ********** MI Scheduling ********** @@ -22,6 +47,14 @@ define i32 @load_clustering_1(ptr nocapture %p) { ; NOCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4 ; NOCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16 ; +; STCLUSTER: ********** MI Scheduling ********** +; STCLUSTER-LABEL: load_clustering_1:%bb.0 +; STCLUSTER: *** Final schedule for %bb.0 *** +; STCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12 +; STCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8 +; STCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4 +; STCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16 +; ; LDCLUSTER: ********** MI Scheduling ********** ; LDCLUSTER-LABEL: load_clustering_1:%bb.0 ; LDCLUSTER: *** Final schedule for %bb.0 *** @@ -29,6 +62,14 @@ define i32 @load_clustering_1(ptr nocapture %p) { ; LDCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8 ; LDCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12 ; LDCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16 +; +; DEFAULTCLUSTER: ********** MI Scheduling ********** +; DEFAULTCLUSTER-LABEL: load_clustering_1:%bb.0 +; DEFAULTCLUSTER: *** Final schedule for %bb.0 *** +; DEFAULTCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4 +; DEFAULTCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8 +; DEFAULTCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12 +; DEFAULTCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16 entry: %arrayidx0 = getelementptr inbounds i32, ptr %p, i32 3 %val0 = load i32, ptr %arrayidx0 diff --git a/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir b/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir index 21398d3..01960f9 100644 --- a/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir +++ b/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir @@ -1,10 +1,12 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 # RUN: llc -mtriple=riscv64 -x mir -mcpu=sifive-p470 -verify-misched -enable-post-misched=false \ -# RUN: -riscv-postmisched-load-store-clustering=false -debug-only=machine-scheduler \ +# RUN: -mattr=+disable-postmisched-load-clustering \ +# RUN: -mattr=+disable-postmisched-store-clustering -debug-only=machine-scheduler \ # RUN: -start-before=machine-scheduler -stop-after=postmisched -misched-regpressure=false -o - 2>&1 < %s \ # RUN: | FileCheck -check-prefix=NOPOSTMISCHED %s # RUN: llc -mtriple=riscv64 -x mir -mcpu=sifive-p470 -mattr=+use-postra-scheduler -verify-misched -enable-post-misched=true \ -# RUN: -riscv-postmisched-load-store-clustering=false -debug-only=machine-scheduler \ +# RUN: -mattr=+disable-postmisched-load-clustering \ +# RUN: -mattr=+disable-postmisched-store-clustering -debug-only=machine-scheduler \ # RUN: -start-before=machine-scheduler -stop-after=postmisched -misched-regpressure=false -o - 2>&1 < %s \ # RUN: | FileCheck -check-prefix=NOCLUSTER %s # RUN: llc -mtriple=riscv64 -x mir -mcpu=sifive-p470 -mattr=+use-postra-scheduler -verify-misched -enable-post-misched=true \ diff --git a/llvm/test/CodeGen/RISCV/misched-store-clustering.ll b/llvm/test/CodeGen/RISCV/misched-store-clustering.ll new file mode 100644 index 0000000..02e853d --- /dev/null +++ b/llvm/test/CodeGen/RISCV/misched-store-clustering.ll @@ -0,0 +1,83 @@ +; REQUIRES: asserts +; +; Disable all misched clustering +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=NOCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=NOCLUSTER %s +; +; ST misched clustering only +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=STCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-load-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=STCLUSTER %s +; +; LD misched clustering only +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -mattr=+disable-misched-store-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=LDCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -mattr=+disable-misched-store-clustering \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=LDCLUSTER %s +; +; Default misched cluster settings (i.e. both LD and ST clustering) +; RUN: llc -mtriple=riscv32 -verify-misched \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s +; RUN: llc -mtriple=riscv64 -verify-misched \ +; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \ +; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s + +define i32 @store_clustering_1(ptr nocapture %p, i32 %v) { +; NOCLUSTER: ********** MI Scheduling ********** +; NOCLUSTER-LABEL: store_clustering_1:%bb.0 +; NOCLUSTER: *** Final schedule for %bb.0 *** +; NOCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0) +; NOCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1) +; NOCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2) +; NOCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3) +; +; STCLUSTER: ********** MI Scheduling ********** +; STCLUSTER-LABEL: store_clustering_1:%bb.0 +; STCLUSTER: *** Final schedule for %bb.0 *** +; STCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2) +; STCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1) +; STCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0) +; STCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3) +; +; LDCLUSTER: ********** MI Scheduling ********** +; LDCLUSTER-LABEL: store_clustering_1:%bb.0 +; LDCLUSTER: *** Final schedule for %bb.0 *** +; LDCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0) +; LDCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1) +; LDCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2) +; LDCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3) +; +; DEFAULTCLUSTER: ********** MI Scheduling ********** +; DEFAULTCLUSTER-LABEL: store_clustering_1:%bb.0 +; DEFAULTCLUSTER: *** Final schedule for %bb.0 *** +; DEFAULTCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2) +; DEFAULTCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1) +; DEFAULTCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0) +; DEFAULTCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3) +entry: + %arrayidx0 = getelementptr inbounds i32, ptr %p, i32 3 + store i32 %v, ptr %arrayidx0 + %arrayidx1 = getelementptr inbounds i32, ptr %p, i32 2 + store i32 %v, ptr %arrayidx1 + %arrayidx2 = getelementptr inbounds i32, ptr %p, i32 1 + store i32 %v, ptr %arrayidx2 + %arrayidx3 = getelementptr inbounds i32, ptr %p, i32 4 + store i32 %v, ptr %arrayidx3 + ret i32 %v +} diff --git a/llvm/test/CodeGen/RISCV/rv32zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbkb.ll index 4aa6dd4..42d326e 100644 --- a/llvm/test/CodeGen/RISCV/rv32zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbkb.ll @@ -319,3 +319,142 @@ define i64 @zext_i16_to_i64(i16 %a) nounwind { %1 = zext i16 %a to i64 ret i64 %1 } + +define i32 @pack_lo_packh_hi_packh(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, i8 zeroext %3) nounwind { +; RV32I-LABEL: pack_lo_packh_hi_packh: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a1, a1, 8 +; RV32I-NEXT: slli a2, a2, 16 +; RV32I-NEXT: slli a3, a3, 24 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: or a2, a2, a3 +; RV32I-NEXT: or a0, a0, a2 +; RV32I-NEXT: ret +; +; RV32ZBKB-LABEL: pack_lo_packh_hi_packh: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: packh a0, a0, a1 +; RV32ZBKB-NEXT: packh a1, a2, a3 +; RV32ZBKB-NEXT: pack a0, a0, a1 +; RV32ZBKB-NEXT: ret + %a = zext i8 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = zext i8 %3 to i32 + %e = shl i32 %b, 8 + %f = shl i32 %c, 16 + %g = shl i32 %d, 24 + %h = or i32 %a, %e + %i = or i32 %h, %f + %j = or i32 %i, %g + ret i32 %j +} + +define i32 @pack_lo_packh_hi_packh_2(i8 %0, i8 %1, i8 %2, i8 %3) nounwind { +; RV32I-LABEL: pack_lo_packh_hi_packh_2: +; RV32I: # %bb.0: +; RV32I-NEXT: zext.b a0, a0 +; RV32I-NEXT: zext.b a1, a1 +; RV32I-NEXT: zext.b a2, a2 +; RV32I-NEXT: slli a3, a3, 24 +; RV32I-NEXT: slli a1, a1, 8 +; RV32I-NEXT: slli a2, a2, 16 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: or a2, a2, a3 +; RV32I-NEXT: or a0, a0, a2 +; RV32I-NEXT: ret +; +; RV32ZBKB-LABEL: pack_lo_packh_hi_packh_2: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: packh a0, a0, a1 +; RV32ZBKB-NEXT: packh a1, a2, a3 +; RV32ZBKB-NEXT: pack a0, a0, a1 +; RV32ZBKB-NEXT: ret + %a = zext i8 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = zext i8 %3 to i32 + %e = shl i32 %b, 8 + %f = shl i32 %c, 16 + %g = shl i32 %d, 24 + %h = or i32 %a, %e + %i = or i32 %h, %f + %j = or i32 %i, %g + ret i32 %j +} + +define i32 @pack_lo_zext_hi_packh(i16 zeroext %0, i8 zeroext %1, i8 zeroext %2) nounwind { +; RV32I-LABEL: pack_lo_zext_hi_packh: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: slli a2, a2, 24 +; RV32I-NEXT: or a1, a2, a1 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: ret +; +; RV32ZBKB-LABEL: pack_lo_zext_hi_packh: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: packh a1, a1, a2 +; RV32ZBKB-NEXT: pack a0, a0, a1 +; RV32ZBKB-NEXT: ret + %a = zext i16 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = shl i32 %c, 8 + %e = or i32 %b, %d + %f = shl i32 %e, 16 + %g = or i32 %f, %a + ret i32 %g +} + +; Negative test, %a isn't extended so we can't use pack for the outer or, but +; we can use packh for the high half. +define i32 @pack_lo_noext_hi_packh(i32 %a, i8 zeroext %1, i8 zeroext %2) nounwind { +; RV32I-LABEL: pack_lo_noext_hi_packh: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: slli a2, a2, 24 +; RV32I-NEXT: or a1, a2, a1 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: ret +; +; RV32ZBKB-LABEL: pack_lo_noext_hi_packh: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: packh a1, a1, a2 +; RV32ZBKB-NEXT: slli a1, a1, 16 +; RV32ZBKB-NEXT: or a0, a1, a0 +; RV32ZBKB-NEXT: ret + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = shl i32 %c, 8 + %e = or i32 %b, %d + %f = shl i32 %e, 16 + %g = or i32 %f, %a + ret i32 %g +} + +; Make sure we can match packh+slli without having the input bytes zero extended. +define i32 @pack_lo_noext_hi_packh_nozeroext(i32 %a, i8 %1, i8 %2) nounwind { +; RV32I-LABEL: pack_lo_noext_hi_packh_nozeroext: +; RV32I: # %bb.0: +; RV32I-NEXT: zext.b a1, a1 +; RV32I-NEXT: slli a2, a2, 24 +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: or a0, a2, a0 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: ret +; +; RV32ZBKB-LABEL: pack_lo_noext_hi_packh_nozeroext: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: packh a1, a1, a2 +; RV32ZBKB-NEXT: slli a1, a1, 16 +; RV32ZBKB-NEXT: or a0, a1, a0 +; RV32ZBKB-NEXT: ret + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = shl i32 %c, 8 + %e = or i32 %b, %d + %f = shl i32 %e, 16 + %g = or i32 %f, %a + ret i32 %g +} diff --git a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll index 57061e1..f89d1abf 100644 --- a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll +++ b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll @@ -253,8 +253,8 @@ define i128 @fptosi_sat_f16_to_i128(half %a) nounwind { ; RV64IZHINX-NEXT: srli a1, a2, 1 ; RV64IZHINX-NEXT: .LBB4_4: ; RV64IZHINX-NEXT: feq.s a2, s0, s0 -; RV64IZHINX-NEXT: neg a3, a3 ; RV64IZHINX-NEXT: neg a4, s1 +; RV64IZHINX-NEXT: neg a3, a3 ; RV64IZHINX-NEXT: neg a2, a2 ; RV64IZHINX-NEXT: and a0, a4, a0 ; RV64IZHINX-NEXT: and a1, a2, a1 @@ -334,18 +334,19 @@ define i128 @fptoui_sat_f16_to_i128(half %a) nounwind { ; RV64IZHINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64IZHINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64IZHINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill -; RV64IZHINX-NEXT: fcvt.s.h a0, a0 -; RV64IZHINX-NEXT: lui a1, 522240 -; RV64IZHINX-NEXT: addi a1, a1, -1 -; RV64IZHINX-NEXT: fle.s a2, zero, a0 -; RV64IZHINX-NEXT: flt.s a1, a1, a0 -; RV64IZHINX-NEXT: neg s0, a1 -; RV64IZHINX-NEXT: neg s1, a2 +; RV64IZHINX-NEXT: fcvt.s.h s0, a0 +; RV64IZHINX-NEXT: fle.s a0, zero, s0 +; RV64IZHINX-NEXT: neg s1, a0 +; RV64IZHINX-NEXT: mv a0, s0 ; RV64IZHINX-NEXT: call __fixunssfti ; RV64IZHINX-NEXT: and a0, s1, a0 +; RV64IZHINX-NEXT: lui a2, 522240 ; RV64IZHINX-NEXT: and a1, s1, a1 -; RV64IZHINX-NEXT: or a0, s0, a0 -; RV64IZHINX-NEXT: or a1, s0, a1 +; RV64IZHINX-NEXT: addi a2, a2, -1 +; RV64IZHINX-NEXT: flt.s a2, a2, s0 +; RV64IZHINX-NEXT: neg a2, a2 +; RV64IZHINX-NEXT: or a0, a2, a0 +; RV64IZHINX-NEXT: or a1, a2, a1 ; RV64IZHINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64IZHINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64IZHINX-NEXT: ld s1, 8(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll index 818ea72..f2c41db 100644 --- a/llvm/test/CodeGen/RISCV/rv64zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll @@ -392,3 +392,217 @@ define i64 @zext_i16_to_i64(i16 %a) nounwind { %1 = zext i16 %a to i64 ret i64 %1 } + +define void @pack_lo_packh_hi_packh(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, i8 zeroext %3, ptr %p) nounwind { +; RV64I-LABEL: pack_lo_packh_hi_packh: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a1, 8 +; RV64I-NEXT: slli a2, a2, 16 +; RV64I-NEXT: slli a3, a3, 24 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: or a2, a2, a3 +; RV64I-NEXT: or a0, a0, a2 +; RV64I-NEXT: sw a0, 0(a4) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_lo_packh_hi_packh: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a0, a0, a1 +; RV64ZBKB-NEXT: packh a1, a2, a3 +; RV64ZBKB-NEXT: packw a0, a0, a1 +; RV64ZBKB-NEXT: sw a0, 0(a4) +; RV64ZBKB-NEXT: ret + %a = zext i8 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = zext i8 %3 to i32 + %e = shl i32 %b, 8 + %f = shl i32 %c, 16 + %g = shl i32 %d, 24 + %h = or i32 %a, %e + %i = or i32 %h, %f + %j = or i32 %i, %g + store i32 %j, ptr %p + ret void +} + +define void @pack_lo_packh_hi_packh_2(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, i8 zeroext %3, ptr %p) nounwind { +; RV64I-LABEL: pack_lo_packh_hi_packh_2: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a1, 8 +; RV64I-NEXT: slli a2, a2, 16 +; RV64I-NEXT: slli a3, a3, 24 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: or a2, a2, a3 +; RV64I-NEXT: or a0, a2, a0 +; RV64I-NEXT: sw a0, 0(a4) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_lo_packh_hi_packh_2: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a0, a0, a1 +; RV64ZBKB-NEXT: packh a1, a3, a2 +; RV64ZBKB-NEXT: packw a0, a0, a1 +; RV64ZBKB-NEXT: sw a0, 0(a4) +; RV64ZBKB-NEXT: ret + %a = zext i8 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = zext i8 %3 to i32 + %e = shl i32 %b, 8 + %f = shl i32 %c, 16 + %g = shl i32 %d, 24 + %h = or i32 %a, %e + %i = or i32 %g, %h + %j = or i32 %f, %i + store i32 %j, ptr %p + ret void +} + +define void @pack_lo_packh_hi_packh_3(i8 %0, i8 %1, i8 %2, i8 %3, ptr %p) nounwind { +; RV64I-LABEL: pack_lo_packh_hi_packh_3: +; RV64I: # %bb.0: +; RV64I-NEXT: zext.b a0, a0 +; RV64I-NEXT: zext.b a1, a1 +; RV64I-NEXT: zext.b a2, a2 +; RV64I-NEXT: slli a3, a3, 24 +; RV64I-NEXT: slli a1, a1, 8 +; RV64I-NEXT: slli a2, a2, 16 +; RV64I-NEXT: or a0, a3, a0 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: or a0, a2, a0 +; RV64I-NEXT: sw a0, 0(a4) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_lo_packh_hi_packh_3: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a0, a0, a1 +; RV64ZBKB-NEXT: packh a1, a3, a2 +; RV64ZBKB-NEXT: packw a0, a0, a1 +; RV64ZBKB-NEXT: sw a0, 0(a4) +; RV64ZBKB-NEXT: ret + %a = zext i8 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = zext i8 %3 to i32 + %e = shl i32 %b, 8 + %f = shl i32 %c, 16 + %g = shl i32 %d, 24 + %h = or i32 %a, %e + %i = or i32 %g, %h + %j = or i32 %f, %i + store i32 %j, ptr %p + ret void +} + +define void @pack_lo_zext_hi_packh(i16 zeroext %0, i8 zeroext %1, i8 zeroext %2, ptr %p) nounwind { +; RV64I-LABEL: pack_lo_zext_hi_packh: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a1, 16 +; RV64I-NEXT: slli a2, a2, 24 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: sw a0, 0(a3) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_lo_zext_hi_packh: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a1, a1, a2 +; RV64ZBKB-NEXT: packw a0, a0, a1 +; RV64ZBKB-NEXT: sw a0, 0(a3) +; RV64ZBKB-NEXT: ret + %a = zext i16 %0 to i32 + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = shl i32 %c, 8 + %e = or i32 %b, %d + %f = shl i32 %e, 16 + %g = or i32 %f, %a + store i32 %g, ptr %p + ret void +} + +; Negative test, %a isn't extended so we can't use packw for the outer or, but +; we can use packh for the high half. +define void @pack_lo_noext_hi_packh(i32 %a, i8 zeroext %1, i8 zeroext %2, ptr %p) nounwind { +; RV64I-LABEL: pack_lo_noext_hi_packh: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a1, 16 +; RV64I-NEXT: slli a2, a2, 24 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: sw a0, 0(a3) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_lo_noext_hi_packh: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a1, a1, a2 +; RV64ZBKB-NEXT: slli a1, a1, 16 +; RV64ZBKB-NEXT: or a0, a1, a0 +; RV64ZBKB-NEXT: sw a0, 0(a3) +; RV64ZBKB-NEXT: ret + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = shl i32 %c, 8 + %e = or i32 %b, %d + %f = shl i32 %e, 16 + %g = or i32 %f, %a + store i32 %g, ptr %p + ret void +} + +; Make sure we can match packh+slli without having the input bytes zero extended. +define void @pack_i32_lo_noext_hi_packh_nozeroext(i32 %a, i8 %1, i8 %2, ptr %p) nounwind { +; RV64I-LABEL: pack_i32_lo_noext_hi_packh_nozeroext: +; RV64I: # %bb.0: +; RV64I-NEXT: zext.b a1, a1 +; RV64I-NEXT: slli a2, a2, 24 +; RV64I-NEXT: slli a1, a1, 16 +; RV64I-NEXT: or a0, a2, a0 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: sw a0, 0(a3) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_i32_lo_noext_hi_packh_nozeroext: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a1, a1, a2 +; RV64ZBKB-NEXT: slli a1, a1, 16 +; RV64ZBKB-NEXT: or a0, a1, a0 +; RV64ZBKB-NEXT: sw a0, 0(a3) +; RV64ZBKB-NEXT: ret + %b = zext i8 %1 to i32 + %c = zext i8 %2 to i32 + %d = shl i32 %c, 8 + %e = or i32 %b, %d + %f = shl i32 %e, 16 + %g = or i32 %f, %a + store i32 %g, ptr %p + ret void +} + +; Make sure we can match packh+slli without having the input bytes zero extended. +define i64 @pack_i64_lo_noext_hi_packh_nozeroext(i64 %a, i8 %1, i8 %2, ptr %p) nounwind { +; RV64I-LABEL: pack_i64_lo_noext_hi_packh_nozeroext: +; RV64I: # %bb.0: +; RV64I-NEXT: zext.b a1, a1 +; RV64I-NEXT: zext.b a2, a2 +; RV64I-NEXT: slli a1, a1, 16 +; RV64I-NEXT: slli a2, a2, 24 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_i64_lo_noext_hi_packh_nozeroext: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a1, a1, a2 +; RV64ZBKB-NEXT: slli a1, a1, 16 +; RV64ZBKB-NEXT: or a0, a1, a0 +; RV64ZBKB-NEXT: ret + %b = zext i8 %1 to i64 + %c = zext i8 %2 to i64 + %d = shl i64 %c, 8 + %e = or i64 %b, %d + %f = shl i64 %e, 16 + %g = or i64 %f, %a + ret i64 %g +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vploadff.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vploadff.ll new file mode 100644 index 0000000..5b01976 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vploadff.ll @@ -0,0 +1,586 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +define { <2 x i8>, i32 } @vploadff_v2i8(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i8>, i32 } @llvm.vp.load.ff.v2i8.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x i8>, i32 } %load +} + +define { <2 x i8>, i32 } @vploadff_v2i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i8>, i32 } @llvm.vp.load.ff.v2i8.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x i8>, i32 } %load +} + +define { <4 x i8>, i32 } @vploadff_v4i8(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i8>, i32 } @llvm.vp.load.ff.v4i8.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x i8>, i32 } %load +} + +define { <4 x i8>, i32 } @vploadff_v4i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i8>, i32 } @llvm.vp.load.ff.v4i8.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x i8>, i32 } %load +} + +define { <8 x i8>, i32 } @vploadff_v8i8(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i8>, i32 } @llvm.vp.load.ff.v8i8.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x i8>, i32 } %load +} + +define { <8 x i8>, i32 } @vploadff_v8i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i8>, i32 } @llvm.vp.load.ff.v8i8.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x i8>, i32 } %load +} + +define { <2 x i16>, i32 } @vploadff_v2i16(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i16>, i32 } @llvm.vp.load.ff.v2i16.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x i16>, i32 } %load +} + +define { <2 x i16>, i32 } @vploadff_v2i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i16>, i32 } @llvm.vp.load.ff.v2i16.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x i16>, i32 } %load +} + +define { <4 x i16>, i32 } @vploadff_v4i16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i16>, i32 } @llvm.vp.load.ff.v4i16.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x i16>, i32 } %load +} + +define { <4 x i16>, i32 } @vploadff_v4i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i16>, i32 } @llvm.vp.load.ff.v4i16.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x i16>, i32 } %load +} + +define { <8 x i16>, i32 } @vploadff_v8i16(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i16>, i32 } @llvm.vp.load.ff.v8i16.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x i16>, i32 } %load +} + +define { <8 x i16>, i32 } @vploadff_v8i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i16>, i32 } @llvm.vp.load.ff.v8i16.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x i16>, i32 } %load +} + +define { <2 x i32>, i32 } @vploadff_v2i32(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i32>, i32 } @llvm.vp.load.ff.v2i32.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x i32>, i32 } %load +} + +define { <2 x i32>, i32 } @vploadff_v2i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i32>, i32 } @llvm.vp.load.ff.v2i32.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x i32>, i32 } %load +} + +define { <4 x i32>, i32 } @vploadff_v4i32(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i32>, i32 } @llvm.vp.load.ff.v4i32.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x i32>, i32 } %load +} + +define { <4 x i32>, i32 } @vploadff_v4i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i32>, i32 } @llvm.vp.load.ff.v4i32.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x i32>, i32 } %load +} + +define { <8 x i32>, i32 } @vploadff_v8i32(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i32>, i32 } @llvm.vp.load.ff.v8i32.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x i32>, i32 } %load +} + +define { <8 x i32>, i32 } @vploadff_v8i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i32>, i32 } @llvm.vp.load.ff.v8i32.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x i32>, i32 } %load +} + +define { <2 x i64>, i32 } @vploadff_v2i64(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i64>, i32 } @llvm.vp.load.ff.v2i64.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x i64>, i32 } %load +} + +define { <2 x i64>, i32 } @vploadff_v2i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x i64>, i32 } @llvm.vp.load.ff.v2i64.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x i64>, i32 } %load +} + +define { <4 x i64>, i32 } @vploadff_v4i64(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i64>, i32 } @llvm.vp.load.ff.v4i64.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x i64>, i32 } %load +} + +define { <4 x i64>, i32 } @vploadff_v4i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x i64>, i32 } @llvm.vp.load.ff.v4i64.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x i64>, i32 } %load +} + +define { <8 x i64>, i32 } @vploadff_v8i64(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i64>, i32 } @llvm.vp.load.ff.v8i64.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x i64>, i32 } %load +} + +define { <8 x i64>, i32 } @vploadff_v8i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x i64>, i32 } @llvm.vp.load.ff.v8i64.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x i64>, i32 } %load +} + +define { <32 x i64>, i32 } @vploadff_v32i64(ptr %ptr, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v32i64: +; CHECK: # %bb.0: +; CHECK-NEXT: li a3, 16 +; CHECK-NEXT: bltu a2, a3, .LBB24_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: .LBB24_2: +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a1), v0.t +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: sw a1, 256(a0) +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: ret + %load = call { <32 x i64>, i32 } @llvm.vp.load.ff.v32i64.p0(ptr %ptr, <32 x i1> %m, i32 %evl) + ret { <32 x i64>, i32 } %load +} + +define { <32 x i64>, i32 } @vploadff_v32i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v32i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: li a3, 16 +; CHECK-NEXT: bltu a2, a3, .LBB25_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: .LBB25_2: +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a1) +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: sw a1, 256(a0) +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: ret + %load = call { <32 x i64>, i32 } @llvm.vp.load.ff.v32i64.p0(ptr %ptr, <32 x i1> splat (i1 true), i32 %evl) + ret { <32 x i64>, i32 } %load +} + +define { <2 x half>, i32 } @vploadff_v2f16(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x half>, i32 } @llvm.vp.load.ff.v2f16.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x half>, i32 } %load +} + +define { <2 x half>, i32 } @vploadff_v2f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x half>, i32 } @llvm.vp.load.ff.v2f16.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x half>, i32 } %load +} + +define { <4 x half>, i32 } @vploadff_v4f16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x half>, i32 } @llvm.vp.load.ff.v4f16.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x half>, i32 } %load +} + +define { <4 x half>, i32 } @vploadff_v4f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x half>, i32 } @llvm.vp.load.ff.v4f16.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x half>, i32 } %load +} + +define { <8 x half>, i32 } @vploadff_v8f16(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x half>, i32 } @llvm.vp.load.ff.v8f16.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x half>, i32 } %load +} + +define { <8 x half>, i32 } @vploadff_v8f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x half>, i32 } @llvm.vp.load.ff.v8f16.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x half>, i32 } %load +} + +define { <2 x float>, i32 } @vploadff_v2f32(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x float>, i32 } @llvm.vp.load.ff.v2f32.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x float>, i32 } %load +} + +define { <2 x float>, i32 } @vploadff_v2f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x float>, i32 } @llvm.vp.load.ff.v2f32.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x float>, i32 } %load +} + +define { <4 x float>, i32 } @vploadff_v4f32(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x float>, i32 } @llvm.vp.load.ff.v4f32.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x float>, i32 } %load +} + +define { <4 x float>, i32 } @vploadff_v4f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x float>, i32 } @llvm.vp.load.ff.v4f32.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x float>, i32 } %load +} + +define { <8 x float>, i32 } @vploadff_v8f32(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x float>, i32 } @llvm.vp.load.ff.v8f32.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x float>, i32 } %load +} + +define { <8 x float>, i32 } @vploadff_v8f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x float>, i32 } @llvm.vp.load.ff.v8f32.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x float>, i32 } %load +} + +define { <2 x double>, i32 } @vploadff_v2f64(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x double>, i32 } @llvm.vp.load.ff.v2f64.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x double>, i32 } %load +} + +define { <2 x double>, i32 } @vploadff_v2f64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2f64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x double>, i32 } @llvm.vp.load.ff.v2f64.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x double>, i32 } %load +} + +define { <4 x double>, i32 } @vploadff_v4f64(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x double>, i32 } @llvm.vp.load.ff.v4f64.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x double>, i32 } %load +} + +define { <4 x double>, i32 } @vploadff_v4f64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4f64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x double>, i32 } @llvm.vp.load.ff.v4f64.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x double>, i32 } %load +} + +define { <8 x double>, i32 } @vploadff_v8f64(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x double>, i32 } @llvm.vp.load.ff.v8f64.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x double>, i32 } %load +} + +define { <8 x double>, i32 } @vploadff_v8f64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8f64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x double>, i32 } @llvm.vp.load.ff.v8f64.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x double>, i32 } %load +} + +define { <2 x bfloat>, i32 } @vploadff_v2bf16(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x bfloat>, i32 } @llvm.vp.load.ff.v2bf16.p0(ptr %ptr, <2 x i1> %m, i32 %evl) + ret { <2 x bfloat>, i32 } %load +} + +define { <2 x bfloat>, i32 } @vploadff_v2bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v2bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <2 x bfloat>, i32 } @llvm.vp.load.ff.v2bf16.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl) + ret { <2 x bfloat>, i32 } %load +} + +define { <4 x bfloat>, i32 } @vploadff_v4bf16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x bfloat>, i32 } @llvm.vp.load.ff.v4bf16.p0(ptr %ptr, <4 x i1> %m, i32 %evl) + ret { <4 x bfloat>, i32 } %load +} + +define { <4 x bfloat>, i32 } @vploadff_v4bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v4bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <4 x bfloat>, i32 } @llvm.vp.load.ff.v4bf16.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl) + ret { <4 x bfloat>, i32 } %load +} + +define { <8 x bfloat>, i32 } @vploadff_v8bf16(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x bfloat>, i32 } @llvm.vp.load.ff.v8bf16.p0(ptr %ptr, <8 x i1> %m, i32 %evl) + ret { <8 x bfloat>, i32 } %load +} + +define { <8 x bfloat>, i32 } @vploadff_v8bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v8bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <8 x bfloat>, i32 } @llvm.vp.load.ff.v8bf16.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl) + ret { <8 x bfloat>, i32 } %load +} + +define { <7 x i8>, i32 } @vploadff_v7i8(ptr %ptr, <7 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_v7i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <7 x i8>, i32 } @llvm.vp.load.ff.v7i8.p0(ptr %ptr, <7 x i1> %m, i32 %evl) + ret { <7 x i8>, i32 } %load +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vploadff.ll b/llvm/test/CodeGen/RISCV/rvv/vploadff.ll new file mode 100644 index 0000000..9e08938 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vploadff.ll @@ -0,0 +1,1008 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +define { <vscale x 1 x i8>, i32 } @vploadff_nxv1i8(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i8>, i32 } @llvm.vp.load.ff.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x i8>, i32 } %load +} + +define { <vscale x 1 x i8>, i32 } @vploadff_nxv1i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i8>, i32 } @llvm.vp.load.ff.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x i8>, i32 } %load +} + +define { <vscale x 2 x i8>, i32 } @vploadff_nxv2i8(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i8>, i32 } @llvm.vp.load.ff.nxv2i8.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x i8>, i32 } %load +} + +define { <vscale x 2 x i8>, i32 } @vploadff_nxv2i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i8>, i32 } @llvm.vp.load.ff.nxv2i8.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x i8>, i32 } %load +} + +define { <vscale x 4 x i8>, i32 } @vploadff_nxv4i8(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i8>, i32 } @llvm.vp.load.ff.nxv4i8.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x i8>, i32 } %load +} + +define { <vscale x 4 x i8>, i32 } @vploadff_nxv4i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i8>, i32 } @llvm.vp.load.ff.nxv4i8.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x i8>, i32 } %load +} + +define { <vscale x 8 x i8>, i32 } @vploadff_nxv8i8(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i8>, i32 } @llvm.vp.load.ff.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x i8>, i32 } %load +} + +define { <vscale x 8 x i8>, i32 } @vploadff_nxv8i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i8>, i32 } @llvm.vp.load.ff.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x i8>, i32 } %load +} + +define { <vscale x 16 x i8>, i32 } @vploadff_nxv16i8(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x i8>, i32 } @llvm.vp.load.ff.nxv16i8.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) + ret { <vscale x 16 x i8>, i32 } %load +} + +define { <vscale x 16 x i8>, i32 } @vploadff_nxv16i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x i8>, i32 } @llvm.vp.load.ff.nxv16i8.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 16 x i8>, i32 } %load +} + +define { <vscale x 32 x i8>, i32 } @vploadff_nxv32i8(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x i8>, i32 } @llvm.vp.load.ff.nxv32i8.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl) + ret { <vscale x 32 x i8>, i32 } %load +} + +define { <vscale x 32 x i8>, i32 } @vploadff_nxv32i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x i8>, i32 } @llvm.vp.load.ff.nxv32i8.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 32 x i8>, i32 } %load +} + +define { <vscale x 64 x i8>, i32 } @vploadff_nxv64i8(ptr %ptr, <vscale x 64 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 64 x i8>, i32 } @llvm.vp.load.ff.nxv64i8.p0(ptr %ptr, <vscale x 64 x i1> %m, i32 %evl) + ret { <vscale x 64 x i8>, i32 } %load +} + +define { <vscale x 64 x i8>, i32 } @vploadff_nxv64i8_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv64i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 64 x i8>, i32 } @llvm.vp.load.ff.nxv64i8.p0(ptr %ptr, <vscale x 64 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 64 x i8>, i32 } %load +} + +define <vscale x 128 x i8> @vploadff_nxv128i8(ptr %ptr, ptr %evl_out, <vscale x 128 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv128i8: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a3, vlenb +; CHECK-NEXT: slli a3, a3, 3 +; CHECK-NEXT: bltu a2, a3, .LBB14_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: .LBB14_2: +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a1) +; CHECK-NEXT: ret + %load = call { <vscale x 128 x i8>, i32 } @llvm.vp.load.ff.nxv128i8.p0(ptr %ptr, <vscale x 128 x i1> %m, i32 %evl) + %result0 = extractvalue { <vscale x 128 x i8>, i32 } %load, 0 + %result1 = extractvalue { <vscale x 128 x i8>, i32 } %load, 1 + store i32 %result1, ptr %evl_out + ret <vscale x 128 x i8> %result0 +} + +define <vscale x 128 x i8> @vploadff_nxv128i8_allones_mask(ptr %ptr, ptr %evl_out, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv128i8_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a3, vlenb +; CHECK-NEXT: slli a3, a3, 3 +; CHECK-NEXT: bltu a2, a3, .LBB15_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: .LBB15_2: +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: sw a0, 0(a1) +; CHECK-NEXT: ret + %load = call { <vscale x 128 x i8>, i32 } @llvm.vp.load.ff.nxv128i8.p0(ptr %ptr, <vscale x 128 x i1> splat (i1 true), i32 %evl) + %result0 = extractvalue { <vscale x 128 x i8>, i32 } %load, 0 + %result1 = extractvalue { <vscale x 128 x i8>, i32 } %load, 1 + store i32 %result1, ptr %evl_out + ret <vscale x 128 x i8> %result0 +} + +define { <vscale x 1 x i16>, i32 } @vploadff_nxv1i16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i16>, i32 } @llvm.vp.load.ff.nxv1i16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x i16>, i32 } %load +} + +define { <vscale x 1 x i16>, i32 } @vploadff_nxv1i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i16>, i32 } @llvm.vp.load.ff.nxv1i16.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x i16>, i32 } %load +} + +define { <vscale x 2 x i16>, i32 } @vploadff_nxv2i16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i16>, i32 } @llvm.vp.load.ff.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x i16>, i32 } %load +} + +define { <vscale x 2 x i16>, i32 } @vploadff_nxv2i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i16>, i32 } @llvm.vp.load.ff.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x i16>, i32 } %load +} + +define { <vscale x 4 x i16>, i32 } @vploadff_nxv4i16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i16>, i32 } @llvm.vp.load.ff.nxv4i16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x i16>, i32 } %load +} + +define { <vscale x 4 x i16>, i32 } @vploadff_nxv4i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i16>, i32 } @llvm.vp.load.ff.nxv4i16.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x i16>, i32 } %load +} + +define { <vscale x 8 x i16>, i32 } @vploadff_nxv8i16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i16>, i32 } @llvm.vp.load.ff.nxv8i16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x i16>, i32 } %load +} + +define { <vscale x 8 x i16>, i32 } @vploadff_nxv8i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i16>, i32 } @llvm.vp.load.ff.nxv8i16.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x i16>, i32 } %load +} + +define { <vscale x 16 x i16>, i32 } @vploadff_nxv16i16(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x i16>, i32 } @llvm.vp.load.ff.nxv16i16.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) + ret { <vscale x 16 x i16>, i32 } %load +} + +define { <vscale x 16 x i16>, i32 } @vploadff_nxv16i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x i16>, i32 } @llvm.vp.load.ff.nxv16i16.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 16 x i16>, i32 } %load +} + +define { <vscale x 32 x i16>, i32 } @vploadff_nxv32i16(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x i16>, i32 } @llvm.vp.load.ff.nxv32i16.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl) + ret { <vscale x 32 x i16>, i32 } %load +} + +define { <vscale x 32 x i16>, i32 } @vploadff_nxv32i16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32i16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x i16>, i32 } @llvm.vp.load.ff.nxv32i16.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 32 x i16>, i32 } %load +} + +define { <vscale x 1 x i32>, i32 } @vploadff_nxv1i32(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i32>, i32 } @llvm.vp.load.ff.nxv1i32.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x i32>, i32 } %load +} + +define { <vscale x 1 x i32>, i32 } @vploadff_nxv1i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i32>, i32 } @llvm.vp.load.ff.nxv1i32.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x i32>, i32 } %load +} + +define { <vscale x 2 x i32>, i32 } @vploadff_nxv2i32(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i32>, i32 } @llvm.vp.load.ff.nxv2i32.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x i32>, i32 } %load +} + +define { <vscale x 2 x i32>, i32 } @vploadff_nxv2i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i32>, i32 } @llvm.vp.load.ff.nxv2i32.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x i32>, i32 } %load +} + +define { <vscale x 4 x i32>, i32 } @vploadff_nxv4i32(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i32>, i32 } @llvm.vp.load.ff.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x i32>, i32 } %load +} + +define { <vscale x 4 x i32>, i32 } @vploadff_nxv4i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i32>, i32 } @llvm.vp.load.ff.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x i32>, i32 } %load +} + +define { <vscale x 8 x i32>, i32 } @vploadff_nxv8i32(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i32>, i32 } @llvm.vp.load.ff.nxv8i32.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x i32>, i32 } %load +} + +define { <vscale x 8 x i32>, i32 } @vploadff_nxv8i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i32>, i32 } @llvm.vp.load.ff.nxv8i32.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x i32>, i32 } %load +} + +define { <vscale x 16 x i32>, i32 } @vploadff_nxv16i32(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x i32>, i32 } @llvm.vp.load.ff.nxv16i32.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) + ret { <vscale x 16 x i32>, i32 } %load +} + +define { <vscale x 16 x i32>, i32 } @vploadff_nxv16i32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16i32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x i32>, i32 } @llvm.vp.load.ff.nxv16i32.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 16 x i32>, i32 } %load +} + +define { <vscale x 1 x i64>, i32 } @vploadff_nxv1i64(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i64>, i32 } @llvm.vp.load.ff.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x i64>, i32 } %load +} + +define { <vscale x 1 x i64>, i32 } @vploadff_nxv1i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x i64>, i32 } @llvm.vp.load.ff.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x i64>, i32 } %load +} + +define { <vscale x 2 x i64>, i32 } @vploadff_nxv2i64(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i64>, i32 } @llvm.vp.load.ff.nxv2i64.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x i64>, i32 } %load +} + +define { <vscale x 2 x i64>, i32 } @vploadff_nxv2i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x i64>, i32 } @llvm.vp.load.ff.nxv2i64.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x i64>, i32 } %load +} + +define { <vscale x 4 x i64>, i32 } @vploadff_nxv4i64(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i64>, i32 } @llvm.vp.load.ff.nxv4i64.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x i64>, i32 } %load +} + +define { <vscale x 4 x i64>, i32 } @vploadff_nxv4i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x i64>, i32 } @llvm.vp.load.ff.nxv4i64.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x i64>, i32 } %load +} + +define { <vscale x 8 x i64>, i32 } @vploadff_nxv8i64(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i64>, i32 } @llvm.vp.load.ff.nxv8i64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x i64>, i32 } %load +} + +define { <vscale x 8 x i64>, i32 } @vploadff_nxv8i64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8i64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x i64>, i32 } @llvm.vp.load.ff.nxv8i64.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x i64>, i32 } %load +} + +define { <vscale x 1 x half>, i32 } @vploadff_nxv1f16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x half>, i32 } @llvm.vp.load.ff.nxv1f16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x half>, i32 } %load +} + +define { <vscale x 1 x half>, i32 } @vploadff_nxv1f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x half>, i32 } @llvm.vp.load.ff.nxv1f16.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x half>, i32 } %load +} + +define { <vscale x 2 x half>, i32 } @vploadff_nxv2f16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x half>, i32 } @llvm.vp.load.ff.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x half>, i32 } %load +} + +define { <vscale x 2 x half>, i32 } @vploadff_nxv2f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x half>, i32 } @llvm.vp.load.ff.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x half>, i32 } %load +} + +define { <vscale x 4 x half>, i32 } @vploadff_nxv4f16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x half>, i32 } @llvm.vp.load.ff.nxv4f16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x half>, i32 } %load +} + +define { <vscale x 4 x half>, i32 } @vploadff_nxv4f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x half>, i32 } @llvm.vp.load.ff.nxv4f16.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x half>, i32 } %load +} + +define { <vscale x 8 x half>, i32 } @vploadff_nxv8f16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x half>, i32 } @llvm.vp.load.ff.nxv8f16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x half>, i32 } %load +} + +define { <vscale x 8 x half>, i32 } @vploadff_nxv8f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x half>, i32 } @llvm.vp.load.ff.nxv8f16.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x half>, i32 } %load +} + +define { <vscale x 16 x half>, i32 } @vploadff_nxv16f16(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x half>, i32 } @llvm.vp.load.ff.nxv16f16.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) + ret { <vscale x 16 x half>, i32 } %load +} + +define { <vscale x 16 x half>, i32 } @vploadff_nxv16f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x half>, i32 } @llvm.vp.load.ff.nxv16f16.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 16 x half>, i32 } %load +} + +define { <vscale x 32 x half>, i32 } @vploadff_nxv32f16(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x half>, i32 } @llvm.vp.load.ff.nxv32f16.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl) + ret { <vscale x 32 x half>, i32 } %load +} + +define { <vscale x 32 x half>, i32 } @vploadff_nxv32f16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32f16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x half>, i32 } @llvm.vp.load.ff.nxv32f16.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 32 x half>, i32 } %load +} + +define { <vscale x 1 x float>, i32 } @vploadff_nxv1f32(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x float>, i32 } @llvm.vp.load.ff.nxv1f32.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x float>, i32 } %load +} + +define { <vscale x 1 x float>, i32 } @vploadff_nxv1f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x float>, i32 } @llvm.vp.load.ff.nxv1f32.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x float>, i32 } %load +} + +define { <vscale x 2 x float>, i32 } @vploadff_nxv2f32(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x float>, i32 } @llvm.vp.load.ff.nxv2f32.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x float>, i32 } %load +} + +define { <vscale x 2 x float>, i32 } @vploadff_nxv2f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x float>, i32 } @llvm.vp.load.ff.nxv2f32.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x float>, i32 } %load +} + +define { <vscale x 4 x float>, i32 } @vploadff_nxv4f32(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x float>, i32 } @llvm.vp.load.ff.nxv4f32.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x float>, i32 } %load +} + +define { <vscale x 4 x float>, i32 } @vploadff_nxv4f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x float>, i32 } @llvm.vp.load.ff.nxv4f32.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x float>, i32 } %load +} + +define { <vscale x 8 x float>, i32 } @vploadff_nxv8f32(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x float>, i32 } @llvm.vp.load.ff.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x float>, i32 } %load +} + +define { <vscale x 8 x float>, i32 } @vploadff_nxv8f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x float>, i32 } @llvm.vp.load.ff.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x float>, i32 } %load +} + +define { <vscale x 16 x float>, i32 } @vploadff_nxv16f32(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x float>, i32 } @llvm.vp.load.ff.nxv16f32.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) + ret { <vscale x 16 x float>, i32 } %load +} + +define { <vscale x 16 x float>, i32 } @vploadff_nxv16f32_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16f32_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vle32ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x float>, i32 } @llvm.vp.load.ff.nxv16f32.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 16 x float>, i32 } %load +} + +define { <vscale x 1 x double>, i32 } @vploadff_nxv1f64(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x double>, i32 } @llvm.vp.load.ff.nxv1f64.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x double>, i32 } %load +} + +define { <vscale x 1 x double>, i32 } @vploadff_nxv1f64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1f64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x double>, i32 } @llvm.vp.load.ff.nxv1f64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x double>, i32 } %load +} + +define { <vscale x 2 x double>, i32 } @vploadff_nxv2f64(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x double>, i32 } @llvm.vp.load.ff.nxv2f64.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x double>, i32 } %load +} + +define { <vscale x 2 x double>, i32 } @vploadff_nxv2f64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2f64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x double>, i32 } @llvm.vp.load.ff.nxv2f64.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x double>, i32 } %load +} + +define { <vscale x 4 x double>, i32 } @vploadff_nxv4f64(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x double>, i32 } @llvm.vp.load.ff.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x double>, i32 } %load +} + +define { <vscale x 4 x double>, i32 } @vploadff_nxv4f64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4f64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x double>, i32 } @llvm.vp.load.ff.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x double>, i32 } %load +} + +define { <vscale x 8 x double>, i32 } @vploadff_nxv8f64(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x double>, i32 } @llvm.vp.load.ff.nxv8f64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x double>, i32 } %load +} + +define { <vscale x 8 x double>, i32 } @vploadff_nxv8f64_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8f64_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vle64ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x double>, i32 } @llvm.vp.load.ff.nxv8f64.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x double>, i32 } %load +} + +define { <vscale x 1 x bfloat>, i32 } @vploadff_nxv1bf16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x bfloat>, i32 } @llvm.vp.load.ff.nxv1bf16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl) + ret { <vscale x 1 x bfloat>, i32 } %load +} + +define { <vscale x 1 x bfloat>, i32 } @vploadff_nxv1bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv1bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 1 x bfloat>, i32 } @llvm.vp.load.ff.nxv1bf16.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 1 x bfloat>, i32 } %load +} + +define { <vscale x 2 x bfloat>, i32 } @vploadff_nxv2bf16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x bfloat>, i32 } @llvm.vp.load.ff.nxv2bf16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl) + ret { <vscale x 2 x bfloat>, i32 } %load +} + +define { <vscale x 2 x bfloat>, i32 } @vploadff_nxv2bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv2bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 2 x bfloat>, i32 } @llvm.vp.load.ff.nxv2bf16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 2 x bfloat>, i32 } %load +} + +define { <vscale x 4 x bfloat>, i32 } @vploadff_nxv4bf16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x bfloat>, i32 } @llvm.vp.load.ff.nxv4bf16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl) + ret { <vscale x 4 x bfloat>, i32 } %load +} + +define { <vscale x 4 x bfloat>, i32 } @vploadff_nxv4bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv4bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 4 x bfloat>, i32 } @llvm.vp.load.ff.nxv4bf16.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 4 x bfloat>, i32 } %load +} + +define { <vscale x 8 x bfloat>, i32 } @vploadff_nxv8bf16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x bfloat>, i32 } @llvm.vp.load.ff.nxv8bf16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl) + ret { <vscale x 8 x bfloat>, i32 } %load +} + +define { <vscale x 8 x bfloat>, i32 } @vploadff_nxv8bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv8bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 8 x bfloat>, i32 } @llvm.vp.load.ff.nxv8bf16.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 8 x bfloat>, i32 } %load +} + +define { <vscale x 16 x bfloat>, i32 } @vploadff_nxv16bf16(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x bfloat>, i32 } @llvm.vp.load.ff.nxv16bf16.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl) + ret { <vscale x 16 x bfloat>, i32 } %load +} + +define { <vscale x 16 x bfloat>, i32 } @vploadff_nxv16bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv16bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 16 x bfloat>, i32 } @llvm.vp.load.ff.nxv16bf16.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 16 x bfloat>, i32 } %load +} + +define { <vscale x 32 x bfloat>, i32 } @vploadff_nxv32bf16(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x bfloat>, i32 } @llvm.vp.load.ff.nxv32bf16.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl) + ret { <vscale x 32 x bfloat>, i32 } %load +} + +define { <vscale x 32 x bfloat>, i32 } @vploadff_nxv32bf16_allones_mask(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv32bf16_allones_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vle16ff.v v8, (a0) +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 32 x bfloat>, i32 } @llvm.vp.load.ff.nxv32bf16.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl) + ret { <vscale x 32 x bfloat>, i32 } %load +} + +define { <vscale x 3 x i8>, i32 } @vploadff_nxv3i8(ptr %ptr, <vscale x 3 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vploadff_nxv3i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vle8ff.v v8, (a0), v0.t +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: ret + %load = call { <vscale x 3 x i8>, i32 } @llvm.vp.load.ff.nxv3i8.p0(ptr %ptr, <vscale x 3 x i1> %m, i32 %evl) + ret { <vscale x 3 x i8>, i32 } %load +} diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll index c9c49e8..cb046cd 100644 --- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll +++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll @@ -204,18 +204,16 @@ define i64 @load_i64(ptr %p) { ; RV64IZBKB-NEXT: lbu a2, 5(a0) ; RV64IZBKB-NEXT: lbu a3, 6(a0) ; RV64IZBKB-NEXT: lbu a4, 7(a0) -; RV64IZBKB-NEXT: lbu a5, 0(a0) -; RV64IZBKB-NEXT: lbu a6, 1(a0) -; RV64IZBKB-NEXT: lbu a7, 2(a0) -; RV64IZBKB-NEXT: lbu a0, 3(a0) +; RV64IZBKB-NEXT: lbu a5, 1(a0) +; RV64IZBKB-NEXT: lbu a6, 2(a0) +; RV64IZBKB-NEXT: lbu a7, 3(a0) +; RV64IZBKB-NEXT: lbu a0, 0(a0) +; RV64IZBKB-NEXT: packh a3, a3, a4 ; RV64IZBKB-NEXT: packh a1, a1, a2 -; RV64IZBKB-NEXT: packh a2, a3, a4 -; RV64IZBKB-NEXT: packh a3, a5, a6 -; RV64IZBKB-NEXT: packh a0, a7, a0 -; RV64IZBKB-NEXT: slli a2, a2, 16 -; RV64IZBKB-NEXT: slli a0, a0, 16 -; RV64IZBKB-NEXT: or a1, a2, a1 -; RV64IZBKB-NEXT: or a0, a0, a3 +; RV64IZBKB-NEXT: packh a2, a6, a7 +; RV64IZBKB-NEXT: packh a0, a0, a5 +; RV64IZBKB-NEXT: packw a1, a1, a3 +; RV64IZBKB-NEXT: packw a0, a0, a2 ; RV64IZBKB-NEXT: pack a0, a0, a1 ; RV64IZBKB-NEXT: ret ; diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll new file mode 100644 index 0000000..00e9185 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll @@ -0,0 +1,75 @@ +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv1.6-vulkan1.3-library %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv1.6-vulkan1.3-library %s -o - -filetype=obj | spirv-val --target-env vulkan1.3 %} + +@.str = private unnamed_addr constant [2 x i8] c"b\00", align 1 +@.str.2 = private unnamed_addr constant [2 x i8] c"c\00", align 1 +@.str.4 = private unnamed_addr constant [2 x i8] c"d\00", align 1 +@.str.6 = private unnamed_addr constant [2 x i8] c"e\00", align 1 +@.str.8 = private unnamed_addr constant [2 x i8] c"f\00", align 1 +@.str.10 = private unnamed_addr constant [2 x i8] c"g\00", align 1 +@.str.12 = private unnamed_addr constant [2 x i8] c"h\00", align 1 +@.str.14 = private unnamed_addr constant [2 x i8] c"i\00", align 1 + +; CHECK-DAG: OpName [[b:%[0-9]+]] "b" +; CHECK-DAG: OpName [[c:%[0-9]+]] "c" +; CHECK-DAG: OpName [[d:%[0-9]+]] "d" +; CHECK-DAG: OpName [[e:%[0-9]+]] "e" +; CHECK-DAG: OpName [[f:%[0-9]+]] "f" +; CHECK-DAG: OpName [[g:%[0-9]+]] "g" +; CHECK-DAG: OpName [[h:%[0-9]+]] "h" +; CHECK-DAG: OpName [[i:%[0-9]+]] "i" +; CHECK-DAG: OpDecorate [[b]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[b]] Binding 1 +; CHECK-DAG: OpDecorate [[c]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[c]] Binding 0 +; CHECK-DAG: OpDecorate [[d]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[d]] Binding 3 +; CHECK-DAG: OpDecorate [[e]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[e]] Binding 2 +; CHECK-DAG: OpDecorate [[f]] DescriptorSet 10 +; CHECK-DAG: OpDecorate [[f]] Binding 1 +; CHECK-DAG: OpDecorate [[g]] DescriptorSet 10 +; CHECK-DAG: OpDecorate [[g]] Binding 0 +; CHECK-DAG: OpDecorate [[h]] DescriptorSet 10 +; CHECK-DAG: OpDecorate [[h]] Binding 3 +; CHECK-DAG: OpDecorate [[i]] DescriptorSet 10 +; CHECK-DAG: OpDecorate [[i]] Binding 2 + + +define void @main() local_unnamed_addr #0 { +entry: + %0 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str) + %1 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str.2) + %2 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 1, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str.4) + %3 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str.6) + %4 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 1, i32 1, i32 0, i1 false, ptr nonnull @.str.8) + %5 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 2, i32 10, i32 1, i32 0, i1 false, ptr nonnull @.str.10) + %6 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 3, i32 10, i32 1, i32 0, i1 false, ptr nonnull @.str.12) + %7 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str.14) + %8 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %1, i32 0) + %9 = load i32, ptr addrspace(11) %8, align 4 + %10 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %2, i32 0) + %11 = load i32, ptr addrspace(11) %10, align 4 + %add.i = add nsw i32 %11, %9 + %12 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %3, i32 0) + %13 = load i32, ptr addrspace(11) %12, align 4 + %add4.i = add nsw i32 %add.i, %13 + %14 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %4, i32 0) + %15 = load i32, ptr addrspace(11) %14, align 4 + %add6.i = add nsw i32 %add4.i, %15 + %16 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %5, i32 0) + %17 = load i32, ptr addrspace(11) %16, align 4 + %add8.i = add nsw i32 %add6.i, %17 + %18 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %6, i32 0) + %19 = load i32, ptr addrspace(11) %18, align 4 + %add10.i = add nsw i32 %add8.i, %19 + %20 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %7, i32 0) + %21 = load i32, ptr addrspace(11) %20, align 4 + %add12.i = add nsw i32 %add10.i, %21 + %22 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %0, i32 0) + store i32 %add12.i, ptr addrspace(11) %22, align 4 + ret void +} + + +attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
\ No newline at end of file diff --git a/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll b/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll index 5c5f704..6b07891 100644 --- a/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll +++ b/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll @@ -62,10 +62,11 @@ entry: define i32 @simplify_demanded_bits_drop_flag(i1 zeroext %x, i1 zeroext %y) nounwind { ; CHECK-LABEL: simplify_demanded_bits_drop_flag: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: negl %edi +; CHECK-NEXT: # kill: def $esi killed $esi def $rsi ; CHECK-NEXT: shll $2, %esi -; CHECK-NEXT: xorl %edi, %esi -; CHECK-NEXT: movslq %esi, %rax +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: negq %rax +; CHECK-NEXT: xorq %rsi, %rax ; CHECK-NEXT: imulq $-1634202141, %rax, %rax # imm = 0x9E980DE3 ; CHECK-NEXT: movq %rax, %rcx ; CHECK-NEXT: shrq $63, %rcx diff --git a/llvm/test/CodeGen/X86/xray-custom-log.ll b/llvm/test/CodeGen/X86/xray-custom-log.ll index 8f23055..f4cdc23 100644 --- a/llvm/test/CodeGen/X86/xray-custom-log.ll +++ b/llvm/test/CodeGen/X86/xray-custom-log.ll @@ -1,9 +1,6 @@ ; RUN: llc -mtriple=x86_64 < %s | FileCheck %s ; RUN: llc -mtriple=x86_64 -relocation-model=pic < %s | FileCheck %s --check-prefix=PIC -; RUN: llc -mtriple=x86_64 -filetype=obj %s -o %t -; RUN: llvm-dwarfdump %t | FileCheck %s --check-prefix=DBG - define i32 @customevent() nounwind "function-instrument"="xray-always" !dbg !1 { %eventptr = alloca i8 %eventsize = alloca i64 @@ -93,17 +90,6 @@ define void @leaf_func() "function-instrument"="xray-always" "frame-pointer"="no declare void @llvm.xray.customevent(ptr, i64) declare void @llvm.xray.typedevent(i64, ptr, i64) -;; Construct call site entries for PATCHABLE_EVENT_CALL. -; DBG: DW_TAG_subprogram -; DBG: DW_TAG_call_site -; DBG-NEXT: DW_AT_call_target (DW_OP_reg{{.*}}) -; DBG-NEXT: DW_AT_call_return_pc - -; DBG: DW_TAG_subprogram -; DBG: DW_TAG_call_site -; DBG-NEXT: DW_AT_call_target (DW_OP_reg{{.*}}) -; DBG-NEXT: DW_AT_call_return_pc - !llvm.dbg.cu = !{!7} !llvm.module.flags = !{!10, !11} diff --git a/llvm/test/CodeGen/Xtensa/atomic-load-store.ll b/llvm/test/CodeGen/Xtensa/atomic-load-store.ll new file mode 100644 index 0000000..bd843a3 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/atomic-load-store.ll @@ -0,0 +1,498 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=xtensa -mattr=+windowed < %s | FileCheck %s --check-prefixes=XTENSA +; RUN: llc -mtriple=xtensa -mattr=+windowed,s32c1i < %s | FileCheck %s --check-prefixes=XTENSA-ATOMIC + +define i8 @atomic_load_i8_unordered(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i8, ptr %a unordered, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_monotonic(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i8, ptr %a monotonic, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_acquire(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI2_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i8, ptr %a acquire, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI3_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i8, ptr %a seq_cst, align 1 + ret i8 %1 +} + +define i16 @atomic_load_i16_unordered(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i16, ptr %a unordered, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_monotonic(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i16, ptr %a monotonic, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_acquire(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i16, ptr %a acquire, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_seq_cst(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i16, ptr %a seq_cst, align 2 + ret i16 %1 +} + +define i32 @atomic_load_i32_unordered(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i32, ptr %a unordered, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_monotonic(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI9_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i32, ptr %a monotonic, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_acquire(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i32, ptr %a acquire, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_seq_cst(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %1 = load atomic i32, ptr %a seq_cst, align 4 + ret i32 %1 +} + +define void @atomic_store_i8_unordered(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i8 %b, ptr %a unordered, align 1 + ret void +} + +define void @atomic_store_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI13_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i8 %b, ptr %a monotonic, align 1 + ret void +} + +define void @atomic_store_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i8 %b, ptr %a release, align 1 + ret void +} + +define void @atomic_store_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i8 %b, ptr %a seq_cst, align 1 + ret void +} + +define void @atomic_store_i16_unordered(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i16_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i16 %b, ptr %a unordered, align 2 + ret void +} + +define void @atomic_store_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i16 %b, ptr %a monotonic, align 2 + ret void +} + +define void @atomic_store_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i16 %b, ptr %a release, align 2 + ret void +} + +define void @atomic_store_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i16 %b, ptr %a seq_cst, align 2 + ret void +} + +define void @atomic_store_i32_unordered(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i32_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 %b, ptr %a unordered, align 4 + ret void +} + +define void @atomic_store_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI21_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 %b, ptr %a monotonic, align 4 + ret void +} + +define void @atomic_store_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI22_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 %b, ptr %a release, align 4 + ret void +} + +define void @atomic_store_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI23_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomic_store_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 %b, ptr %a seq_cst, align 4 + ret void +} diff --git a/llvm/test/CodeGen/Xtensa/atomic-rmw.ll b/llvm/test/CodeGen/Xtensa/atomic-rmw.ll new file mode 100644 index 0000000..81cb2dd --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/atomic-rmw.ll @@ -0,0 +1,10298 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=xtensa -mattr=+windowed < %s | FileCheck %s --check-prefixes=XTENSA +; RUN: llc -mtriple=xtensa -mattr=+windowed,s32c1i < %s | FileCheck %s --check-prefixes=XTENSA-ATOMIC + +define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB0_2 +; XTENSA-ATOMIC-NEXT: .LBB0_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB0_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB0_4 +; XTENSA-ATOMIC-NEXT: .LBB0_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB0_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB0_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB0_1 +; XTENSA-ATOMIC-NEXT: .LBB0_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB1_2 +; XTENSA-ATOMIC-NEXT: .LBB1_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB1_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB1_4 +; XTENSA-ATOMIC-NEXT: .LBB1_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB1_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB1_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB1_1 +; XTENSA-ATOMIC-NEXT: .LBB1_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI2_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB2_2 +; XTENSA-ATOMIC-NEXT: .LBB2_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB2_4 +; XTENSA-ATOMIC-NEXT: .LBB2_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB2_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB2_1 +; XTENSA-ATOMIC-NEXT: .LBB2_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI3_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB3_2 +; XTENSA-ATOMIC-NEXT: .LBB3_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB3_4 +; XTENSA-ATOMIC-NEXT: .LBB3_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB3_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB3_1 +; XTENSA-ATOMIC-NEXT: .LBB3_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB4_2 +; XTENSA-ATOMIC-NEXT: .LBB4_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB4_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB4_4 +; XTENSA-ATOMIC-NEXT: .LBB4_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB4_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB4_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB4_1 +; XTENSA-ATOMIC-NEXT: .LBB4_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB5_2 +; XTENSA-ATOMIC-NEXT: .LBB5_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB5_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB5_4 +; XTENSA-ATOMIC-NEXT: .LBB5_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB5_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB5_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB5_1 +; XTENSA-ATOMIC-NEXT: .LBB5_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB6_2 +; XTENSA-ATOMIC-NEXT: .LBB6_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB6_4 +; XTENSA-ATOMIC-NEXT: .LBB6_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB6_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB6_1 +; XTENSA-ATOMIC-NEXT: .LBB6_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB7_2 +; XTENSA-ATOMIC-NEXT: .LBB7_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB7_4 +; XTENSA-ATOMIC-NEXT: .LBB7_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB7_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB7_1 +; XTENSA-ATOMIC-NEXT: .LBB7_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB8_2 +; XTENSA-ATOMIC-NEXT: .LBB8_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB8_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB8_4 +; XTENSA-ATOMIC-NEXT: .LBB8_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB8_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB8_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB8_1 +; XTENSA-ATOMIC-NEXT: .LBB8_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI9_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB9_2 +; XTENSA-ATOMIC-NEXT: .LBB9_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB9_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB9_4 +; XTENSA-ATOMIC-NEXT: .LBB9_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB9_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB9_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB9_1 +; XTENSA-ATOMIC-NEXT: .LBB9_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB10_2 +; XTENSA-ATOMIC-NEXT: .LBB10_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB10_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB10_4 +; XTENSA-ATOMIC-NEXT: .LBB10_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB10_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB10_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB10_1 +; XTENSA-ATOMIC-NEXT: .LBB10_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB11_2 +; XTENSA-ATOMIC-NEXT: .LBB11_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB11_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB11_4 +; XTENSA-ATOMIC-NEXT: .LBB11_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB11_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB11_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB11_1 +; XTENSA-ATOMIC-NEXT: .LBB11_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB12_2 +; XTENSA-ATOMIC-NEXT: .LBB12_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB12_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB12_4 +; XTENSA-ATOMIC-NEXT: .LBB12_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB12_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB12_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB12_1 +; XTENSA-ATOMIC-NEXT: .LBB12_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI13_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB13_2 +; XTENSA-ATOMIC-NEXT: .LBB13_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB13_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB13_4 +; XTENSA-ATOMIC-NEXT: .LBB13_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB13_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB13_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB13_1 +; XTENSA-ATOMIC-NEXT: .LBB13_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB14_2 +; XTENSA-ATOMIC-NEXT: .LBB14_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB14_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB14_4 +; XTENSA-ATOMIC-NEXT: .LBB14_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB14_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB14_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB14_1 +; XTENSA-ATOMIC-NEXT: .LBB14_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB15_2 +; XTENSA-ATOMIC-NEXT: .LBB15_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB15_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB15_4 +; XTENSA-ATOMIC-NEXT: .LBB15_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB15_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB15_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB15_1 +; XTENSA-ATOMIC-NEXT: .LBB15_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB16_2 +; XTENSA-ATOMIC-NEXT: .LBB16_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB16_4 +; XTENSA-ATOMIC-NEXT: .LBB16_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB16_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB16_1 +; XTENSA-ATOMIC-NEXT: .LBB16_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB17_2 +; XTENSA-ATOMIC-NEXT: .LBB17_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB17_4 +; XTENSA-ATOMIC-NEXT: .LBB17_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB17_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB17_1 +; XTENSA-ATOMIC-NEXT: .LBB17_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB18_2 +; XTENSA-ATOMIC-NEXT: .LBB18_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB18_4 +; XTENSA-ATOMIC-NEXT: .LBB18_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB18_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB18_1 +; XTENSA-ATOMIC-NEXT: .LBB18_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB19_2 +; XTENSA-ATOMIC-NEXT: .LBB19_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB19_4 +; XTENSA-ATOMIC-NEXT: .LBB19_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB19_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB19_1 +; XTENSA-ATOMIC-NEXT: .LBB19_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB20_2 +; XTENSA-ATOMIC-NEXT: .LBB20_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB20_4 +; XTENSA-ATOMIC-NEXT: .LBB20_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB20_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB20_1 +; XTENSA-ATOMIC-NEXT: .LBB20_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI21_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB21_2 +; XTENSA-ATOMIC-NEXT: .LBB21_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB21_4 +; XTENSA-ATOMIC-NEXT: .LBB21_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB21_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB21_1 +; XTENSA-ATOMIC-NEXT: .LBB21_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI22_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB22_2 +; XTENSA-ATOMIC-NEXT: .LBB22_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB22_4 +; XTENSA-ATOMIC-NEXT: .LBB22_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB22_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB22_1 +; XTENSA-ATOMIC-NEXT: .LBB22_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI23_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB23_2 +; XTENSA-ATOMIC-NEXT: .LBB23_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB23_4 +; XTENSA-ATOMIC-NEXT: .LBB23_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB23_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB23_1 +; XTENSA-ATOMIC-NEXT: .LBB23_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI24_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB24_2 +; XTENSA-ATOMIC-NEXT: .LBB24_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB24_4 +; XTENSA-ATOMIC-NEXT: .LBB24_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB24_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB24_1 +; XTENSA-ATOMIC-NEXT: .LBB24_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI25_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB25_2 +; XTENSA-ATOMIC-NEXT: .LBB25_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB25_4 +; XTENSA-ATOMIC-NEXT: .LBB25_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB25_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB25_1 +; XTENSA-ATOMIC-NEXT: .LBB25_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI26_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB26_2 +; XTENSA-ATOMIC-NEXT: .LBB26_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB26_4 +; XTENSA-ATOMIC-NEXT: .LBB26_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB26_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB26_1 +; XTENSA-ATOMIC-NEXT: .LBB26_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI27_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB27_2 +; XTENSA-ATOMIC-NEXT: .LBB27_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB27_4 +; XTENSA-ATOMIC-NEXT: .LBB27_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB27_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB27_1 +; XTENSA-ATOMIC-NEXT: .LBB27_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI28_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB28_2 +; XTENSA-ATOMIC-NEXT: .LBB28_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB28_4 +; XTENSA-ATOMIC-NEXT: .LBB28_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB28_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB28_1 +; XTENSA-ATOMIC-NEXT: .LBB28_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI29_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB29_2 +; XTENSA-ATOMIC-NEXT: .LBB29_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB29_4 +; XTENSA-ATOMIC-NEXT: .LBB29_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB29_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB29_1 +; XTENSA-ATOMIC-NEXT: .LBB29_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI30_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB30_2 +; XTENSA-ATOMIC-NEXT: .LBB30_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB30_4 +; XTENSA-ATOMIC-NEXT: .LBB30_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB30_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB30_1 +; XTENSA-ATOMIC-NEXT: .LBB30_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI31_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB31_2 +; XTENSA-ATOMIC-NEXT: .LBB31_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB31_4 +; XTENSA-ATOMIC-NEXT: .LBB31_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB31_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB31_1 +; XTENSA-ATOMIC-NEXT: .LBB31_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI32_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB32_2 +; XTENSA-ATOMIC-NEXT: .LBB32_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB32_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB32_4 +; XTENSA-ATOMIC-NEXT: .LBB32_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB32_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB32_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB32_1 +; XTENSA-ATOMIC-NEXT: .LBB32_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI33_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB33_2 +; XTENSA-ATOMIC-NEXT: .LBB33_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB33_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB33_4 +; XTENSA-ATOMIC-NEXT: .LBB33_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB33_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB33_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB33_1 +; XTENSA-ATOMIC-NEXT: .LBB33_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI34_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB34_2 +; XTENSA-ATOMIC-NEXT: .LBB34_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB34_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB34_4 +; XTENSA-ATOMIC-NEXT: .LBB34_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB34_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB34_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB34_1 +; XTENSA-ATOMIC-NEXT: .LBB34_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a4, .LCPI35_0 +; XTENSA-NEXT: j .LBB35_2 +; XTENSA-NEXT: .LBB35_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB35_4 +; XTENSA-NEXT: .LBB35_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB35_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB35_1 +; XTENSA-NEXT: .LBB35_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB35_2 +; XTENSA-ATOMIC-NEXT: .LBB35_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB35_6 +; XTENSA-ATOMIC-NEXT: .LBB35_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB35_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB35_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB35_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB35_1 +; XTENSA-ATOMIC-NEXT: .LBB35_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a4, .LCPI36_0 +; XTENSA-NEXT: j .LBB36_2 +; XTENSA-NEXT: .LBB36_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB36_4 +; XTENSA-NEXT: .LBB36_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB36_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB36_1 +; XTENSA-NEXT: .LBB36_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB36_2 +; XTENSA-ATOMIC-NEXT: .LBB36_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB36_6 +; XTENSA-ATOMIC-NEXT: .LBB36_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB36_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB36_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB36_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB36_1 +; XTENSA-ATOMIC-NEXT: .LBB36_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l8ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 24 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a5, .LCPI37_0 +; XTENSA-NEXT: j .LBB37_2 +; XTENSA-NEXT: .LBB37_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB37_4 +; XTENSA-NEXT: .LBB37_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bge a4, a8, .LBB37_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB37_1 +; XTENSA-NEXT: .LBB37_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB37_2 +; XTENSA-ATOMIC-NEXT: .LBB37_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB37_6 +; XTENSA-ATOMIC-NEXT: .LBB37_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB37_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB37_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB37_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB37_1 +; XTENSA-ATOMIC-NEXT: .LBB37_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l8ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 24 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a5, .LCPI38_0 +; XTENSA-NEXT: j .LBB38_2 +; XTENSA-NEXT: .LBB38_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB38_4 +; XTENSA-NEXT: .LBB38_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bge a4, a8, .LBB38_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB38_1 +; XTENSA-NEXT: .LBB38_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB38_2 +; XTENSA-ATOMIC-NEXT: .LBB38_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB38_6 +; XTENSA-ATOMIC-NEXT: .LBB38_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB38_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB38_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB38_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB38_1 +; XTENSA-ATOMIC-NEXT: .LBB38_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI39_0 +; XTENSA-NEXT: j .LBB39_2 +; XTENSA-NEXT: .LBB39_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB39_4 +; XTENSA-NEXT: .LBB39_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB39_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB39_1 +; XTENSA-NEXT: .LBB39_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB39_2 +; XTENSA-ATOMIC-NEXT: .LBB39_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB39_6 +; XTENSA-ATOMIC-NEXT: .LBB39_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB39_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB39_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB39_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB39_1 +; XTENSA-ATOMIC-NEXT: .LBB39_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a4, .LCPI40_0 +; XTENSA-NEXT: j .LBB40_2 +; XTENSA-NEXT: .LBB40_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB40_4 +; XTENSA-NEXT: .LBB40_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB40_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB40_1 +; XTENSA-NEXT: .LBB40_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB40_2 +; XTENSA-ATOMIC-NEXT: .LBB40_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB40_6 +; XTENSA-ATOMIC-NEXT: .LBB40_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB40_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB40_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB40_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB40_1 +; XTENSA-ATOMIC-NEXT: .LBB40_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a4, .LCPI41_0 +; XTENSA-NEXT: j .LBB41_2 +; XTENSA-NEXT: .LBB41_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB41_4 +; XTENSA-NEXT: .LBB41_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB41_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB41_1 +; XTENSA-NEXT: .LBB41_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB41_2 +; XTENSA-ATOMIC-NEXT: .LBB41_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB41_6 +; XTENSA-ATOMIC-NEXT: .LBB41_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB41_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB41_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB41_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB41_1 +; XTENSA-ATOMIC-NEXT: .LBB41_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l8ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 24 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a5, .LCPI42_0 +; XTENSA-NEXT: j .LBB42_2 +; XTENSA-NEXT: .LBB42_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB42_4 +; XTENSA-NEXT: .LBB42_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: blt a4, a8, .LBB42_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB42_1 +; XTENSA-NEXT: .LBB42_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB42_2 +; XTENSA-ATOMIC-NEXT: .LBB42_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB42_6 +; XTENSA-ATOMIC-NEXT: .LBB42_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB42_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB42_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB42_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB42_1 +; XTENSA-ATOMIC-NEXT: .LBB42_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l8ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 24 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a5, .LCPI43_0 +; XTENSA-NEXT: j .LBB43_2 +; XTENSA-NEXT: .LBB43_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB43_4 +; XTENSA-NEXT: .LBB43_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: blt a4, a8, .LBB43_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB43_1 +; XTENSA-NEXT: .LBB43_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB43_2 +; XTENSA-ATOMIC-NEXT: .LBB43_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB43_6 +; XTENSA-ATOMIC-NEXT: .LBB43_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB43_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB43_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB43_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB43_1 +; XTENSA-ATOMIC-NEXT: .LBB43_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l8ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a5, a8, 24 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI44_0 +; XTENSA-NEXT: j .LBB44_2 +; XTENSA-NEXT: .LBB44_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB44_4 +; XTENSA-NEXT: .LBB44_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB44_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB44_1 +; XTENSA-NEXT: .LBB44_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: slli a12, a3, 24 +; XTENSA-ATOMIC-NEXT: srai a12, a12, 24 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB44_2 +; XTENSA-ATOMIC-NEXT: .LBB44_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB44_6 +; XTENSA-ATOMIC-NEXT: .LBB44_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: slli a6, a7, 24 +; XTENSA-ATOMIC-NEXT: srai a5, a6, 24 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB44_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB44_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB44_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB44_1 +; XTENSA-ATOMIC-NEXT: .LBB44_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a6, .LCPI45_0 +; XTENSA-NEXT: j .LBB45_2 +; XTENSA-NEXT: .LBB45_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB45_4 +; XTENSA-NEXT: .LBB45_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a4, a8, .LBB45_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB45_1 +; XTENSA-NEXT: .LBB45_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB45_2 +; XTENSA-ATOMIC-NEXT: .LBB45_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB45_6 +; XTENSA-ATOMIC-NEXT: .LBB45_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB45_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB45_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB45_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB45_1 +; XTENSA-ATOMIC-NEXT: .LBB45_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a6, .LCPI46_0 +; XTENSA-NEXT: j .LBB46_2 +; XTENSA-NEXT: .LBB46_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB46_4 +; XTENSA-NEXT: .LBB46_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a4, a8, .LBB46_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB46_1 +; XTENSA-NEXT: .LBB46_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB46_2 +; XTENSA-ATOMIC-NEXT: .LBB46_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB46_6 +; XTENSA-ATOMIC-NEXT: .LBB46_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB46_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB46_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB46_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB46_1 +; XTENSA-ATOMIC-NEXT: .LBB46_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a4, 255 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: and a8, a3, a4 +; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a3, .LCPI47_0 +; XTENSA-NEXT: j .LBB47_2 +; XTENSA-NEXT: .LBB47_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 8 +; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a3 +; XTENSA-NEXT: l8ui a2, a1, 8 +; XTENSA-NEXT: bnez a10, .LBB47_4 +; XTENSA-NEXT: .LBB47_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 8 +; XTENSA-NEXT: and a8, a2, a4 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bgeu a9, a8, .LBB47_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB47_1 +; XTENSA-NEXT: .LBB47_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB47_2 +; XTENSA-ATOMIC-NEXT: .LBB47_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB47_6 +; XTENSA-ATOMIC-NEXT: .LBB47_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB47_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB47_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB47_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB47_1 +; XTENSA-ATOMIC-NEXT: .LBB47_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a4, 255 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: and a8, a3, a4 +; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a3, .LCPI48_0 +; XTENSA-NEXT: j .LBB48_2 +; XTENSA-NEXT: .LBB48_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 8 +; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a3 +; XTENSA-NEXT: l8ui a2, a1, 8 +; XTENSA-NEXT: bnez a10, .LBB48_4 +; XTENSA-NEXT: .LBB48_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 8 +; XTENSA-NEXT: and a8, a2, a4 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bgeu a9, a8, .LBB48_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB48_1 +; XTENSA-NEXT: .LBB48_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB48_2 +; XTENSA-ATOMIC-NEXT: .LBB48_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB48_6 +; XTENSA-ATOMIC-NEXT: .LBB48_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB48_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB48_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB48_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB48_1 +; XTENSA-ATOMIC-NEXT: .LBB48_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a6, .LCPI49_0 +; XTENSA-NEXT: j .LBB49_2 +; XTENSA-NEXT: .LBB49_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB49_4 +; XTENSA-NEXT: .LBB49_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a4, a8, .LBB49_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB49_1 +; XTENSA-NEXT: .LBB49_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB49_2 +; XTENSA-ATOMIC-NEXT: .LBB49_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB49_6 +; XTENSA-ATOMIC-NEXT: .LBB49_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB49_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB49_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB49_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB49_1 +; XTENSA-ATOMIC-NEXT: .LBB49_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i8_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a6, .LCPI50_0 +; XTENSA-NEXT: j .LBB50_2 +; XTENSA-NEXT: .LBB50_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB50_4 +; XTENSA-NEXT: .LBB50_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a4, a8, .LBB50_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB50_1 +; XTENSA-NEXT: .LBB50_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB50_2 +; XTENSA-ATOMIC-NEXT: .LBB50_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB50_6 +; XTENSA-ATOMIC-NEXT: .LBB50_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB50_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB50_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB50_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB50_1 +; XTENSA-ATOMIC-NEXT: .LBB50_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i8 %b monotonic + ret i8 %res +} + +define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i8_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a6, .LCPI51_0 +; XTENSA-NEXT: j .LBB51_2 +; XTENSA-NEXT: .LBB51_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB51_4 +; XTENSA-NEXT: .LBB51_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a4, a8, .LBB51_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB51_1 +; XTENSA-NEXT: .LBB51_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB51_2 +; XTENSA-ATOMIC-NEXT: .LBB51_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB51_6 +; XTENSA-ATOMIC-NEXT: .LBB51_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB51_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB51_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB51_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB51_1 +; XTENSA-ATOMIC-NEXT: .LBB51_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i8 %b acquire + ret i8 %res +} + +define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i8_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a4, 255 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: and a8, a3, a4 +; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a3, .LCPI52_0 +; XTENSA-NEXT: j .LBB52_2 +; XTENSA-NEXT: .LBB52_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 8 +; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a3 +; XTENSA-NEXT: l8ui a2, a1, 8 +; XTENSA-NEXT: bnez a10, .LBB52_4 +; XTENSA-NEXT: .LBB52_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 8 +; XTENSA-NEXT: and a8, a2, a4 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bltu a9, a8, .LBB52_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB52_1 +; XTENSA-NEXT: .LBB52_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB52_2 +; XTENSA-ATOMIC-NEXT: .LBB52_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB52_6 +; XTENSA-ATOMIC-NEXT: .LBB52_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB52_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB52_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB52_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB52_1 +; XTENSA-ATOMIC-NEXT: .LBB52_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i8 %b release + ret i8 %res +} + +define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i8_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a4, 255 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: and a8, a3, a4 +; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a3, .LCPI53_0 +; XTENSA-NEXT: j .LBB53_2 +; XTENSA-NEXT: .LBB53_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 8 +; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a3 +; XTENSA-NEXT: l8ui a2, a1, 8 +; XTENSA-NEXT: bnez a10, .LBB53_4 +; XTENSA-NEXT: .LBB53_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 8 +; XTENSA-NEXT: and a8, a2, a4 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bltu a9, a8, .LBB53_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB53_1 +; XTENSA-NEXT: .LBB53_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB53_2 +; XTENSA-ATOMIC-NEXT: .LBB53_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB53_6 +; XTENSA-ATOMIC-NEXT: .LBB53_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB53_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB53_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB53_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB53_1 +; XTENSA-ATOMIC-NEXT: .LBB53_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i8 %b acq_rel + ret i8 %res +} + +define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i8_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a8, a3, a3 +; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: movi a5, 255 +; XTENSA-NEXT: and a4, a8, a5 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a6, .LCPI54_0 +; XTENSA-NEXT: j .LBB54_2 +; XTENSA-NEXT: .LBB54_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: l8ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB54_4 +; XTENSA-NEXT: .LBB54_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s8i a2, a1, 4 +; XTENSA-NEXT: and a8, a2, a5 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a4, a8, .LBB54_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB54_1 +; XTENSA-NEXT: .LBB54_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: and a12, a3, a9 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB54_2 +; XTENSA-ATOMIC-NEXT: .LBB54_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB54_6 +; XTENSA-ATOMIC-NEXT: .LBB54_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a7, a15 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: or a6, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB54_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a7, a7 +; XTENSA-ATOMIC-NEXT: .LBB54_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a6, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a7, a7 +; XTENSA-ATOMIC-NEXT: and a6, a15, a10 +; XTENSA-ATOMIC-NEXT: or a7, a6, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB54_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB54_1 +; XTENSA-ATOMIC-NEXT: .LBB54_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i8 %b seq_cst + ret i8 %res +} + +define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI55_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI55_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB55_2 +; XTENSA-ATOMIC-NEXT: .LBB55_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB55_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB55_4 +; XTENSA-ATOMIC-NEXT: .LBB55_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB55_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB55_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB55_1 +; XTENSA-ATOMIC-NEXT: .LBB55_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI56_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI56_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB56_2 +; XTENSA-ATOMIC-NEXT: .LBB56_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB56_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB56_4 +; XTENSA-ATOMIC-NEXT: .LBB56_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB56_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB56_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB56_1 +; XTENSA-ATOMIC-NEXT: .LBB56_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI57_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI57_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB57_2 +; XTENSA-ATOMIC-NEXT: .LBB57_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB57_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB57_4 +; XTENSA-ATOMIC-NEXT: .LBB57_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB57_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB57_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB57_1 +; XTENSA-ATOMIC-NEXT: .LBB57_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI58_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI58_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB58_2 +; XTENSA-ATOMIC-NEXT: .LBB58_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB58_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB58_4 +; XTENSA-ATOMIC-NEXT: .LBB58_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB58_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB58_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB58_1 +; XTENSA-ATOMIC-NEXT: .LBB58_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI59_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI59_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a10, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a11, -4 +; XTENSA-ATOMIC-NEXT: and a11, a2, a11 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB59_2 +; XTENSA-ATOMIC-NEXT: .LBB59_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB59_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB59_4 +; XTENSA-ATOMIC-NEXT: .LBB59_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a15, a10 +; XTENSA-ATOMIC-NEXT: or a14, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB59_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB59_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB59_1 +; XTENSA-ATOMIC-NEXT: .LBB59_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI60_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI60_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB60_2 +; XTENSA-ATOMIC-NEXT: .LBB60_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB60_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB60_4 +; XTENSA-ATOMIC-NEXT: .LBB60_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB60_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB60_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB60_1 +; XTENSA-ATOMIC-NEXT: .LBB60_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI61_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI61_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB61_2 +; XTENSA-ATOMIC-NEXT: .LBB61_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB61_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB61_4 +; XTENSA-ATOMIC-NEXT: .LBB61_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB61_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB61_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB61_1 +; XTENSA-ATOMIC-NEXT: .LBB61_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI62_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI62_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB62_2 +; XTENSA-ATOMIC-NEXT: .LBB62_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB62_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB62_4 +; XTENSA-ATOMIC-NEXT: .LBB62_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB62_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB62_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB62_1 +; XTENSA-ATOMIC-NEXT: .LBB62_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI63_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI63_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB63_2 +; XTENSA-ATOMIC-NEXT: .LBB63_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB63_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB63_4 +; XTENSA-ATOMIC-NEXT: .LBB63_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB63_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB63_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB63_1 +; XTENSA-ATOMIC-NEXT: .LBB63_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI64_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI64_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB64_2 +; XTENSA-ATOMIC-NEXT: .LBB64_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB64_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB64_4 +; XTENSA-ATOMIC-NEXT: .LBB64_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: add a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB64_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB64_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB64_1 +; XTENSA-ATOMIC-NEXT: .LBB64_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI65_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI65_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB65_2 +; XTENSA-ATOMIC-NEXT: .LBB65_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB65_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB65_4 +; XTENSA-ATOMIC-NEXT: .LBB65_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB65_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB65_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB65_1 +; XTENSA-ATOMIC-NEXT: .LBB65_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI66_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI66_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB66_2 +; XTENSA-ATOMIC-NEXT: .LBB66_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB66_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB66_4 +; XTENSA-ATOMIC-NEXT: .LBB66_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB66_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB66_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB66_1 +; XTENSA-ATOMIC-NEXT: .LBB66_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI67_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI67_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB67_2 +; XTENSA-ATOMIC-NEXT: .LBB67_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB67_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB67_4 +; XTENSA-ATOMIC-NEXT: .LBB67_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB67_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB67_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB67_1 +; XTENSA-ATOMIC-NEXT: .LBB67_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI68_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI68_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB68_2 +; XTENSA-ATOMIC-NEXT: .LBB68_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB68_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB68_4 +; XTENSA-ATOMIC-NEXT: .LBB68_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB68_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB68_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB68_1 +; XTENSA-ATOMIC-NEXT: .LBB68_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI69_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI69_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -4 +; XTENSA-ATOMIC-NEXT: and a12, a2, a12 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 1 +; XTENSA-ATOMIC-NEXT: j .LBB69_2 +; XTENSA-ATOMIC-NEXT: .LBB69_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB69_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB69_4 +; XTENSA-ATOMIC-NEXT: .LBB69_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a6, a15, a9 +; XTENSA-ATOMIC-NEXT: and a6, a6, a10 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB69_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB69_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a13, a13 +; XTENSA-ATOMIC-NEXT: j .LBB69_1 +; XTENSA-ATOMIC-NEXT: .LBB69_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI70_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI70_0 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB70_2 +; XTENSA-ATOMIC-NEXT: .LBB70_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB70_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB70_4 +; XTENSA-ATOMIC-NEXT: .LBB70_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB70_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB70_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB70_1 +; XTENSA-ATOMIC-NEXT: .LBB70_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI71_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI71_0 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB71_2 +; XTENSA-ATOMIC-NEXT: .LBB71_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB71_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB71_4 +; XTENSA-ATOMIC-NEXT: .LBB71_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB71_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB71_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB71_1 +; XTENSA-ATOMIC-NEXT: .LBB71_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI72_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI72_0 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB72_2 +; XTENSA-ATOMIC-NEXT: .LBB72_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB72_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB72_4 +; XTENSA-ATOMIC-NEXT: .LBB72_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB72_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB72_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB72_1 +; XTENSA-ATOMIC-NEXT: .LBB72_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI73_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI73_0 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB73_2 +; XTENSA-ATOMIC-NEXT: .LBB73_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB73_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB73_4 +; XTENSA-ATOMIC-NEXT: .LBB73_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB73_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB73_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB73_1 +; XTENSA-ATOMIC-NEXT: .LBB73_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI74_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI74_0 +; XTENSA-ATOMIC-NEXT: and a10, a3, a9 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a11 +; XTENSA-ATOMIC-NEXT: or a9, a10, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB74_2 +; XTENSA-ATOMIC-NEXT: .LBB74_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB74_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB74_4 +; XTENSA-ATOMIC-NEXT: .LBB74_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB74_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB74_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB74_1 +; XTENSA-ATOMIC-NEXT: .LBB74_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI75_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI75_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB75_2 +; XTENSA-ATOMIC-NEXT: .LBB75_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB75_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB75_4 +; XTENSA-ATOMIC-NEXT: .LBB75_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB75_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB75_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB75_1 +; XTENSA-ATOMIC-NEXT: .LBB75_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI76_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI76_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB76_2 +; XTENSA-ATOMIC-NEXT: .LBB76_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB76_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB76_4 +; XTENSA-ATOMIC-NEXT: .LBB76_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB76_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB76_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB76_1 +; XTENSA-ATOMIC-NEXT: .LBB76_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI77_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI77_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB77_2 +; XTENSA-ATOMIC-NEXT: .LBB77_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB77_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB77_4 +; XTENSA-ATOMIC-NEXT: .LBB77_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB77_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB77_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB77_1 +; XTENSA-ATOMIC-NEXT: .LBB77_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI78_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI78_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB78_2 +; XTENSA-ATOMIC-NEXT: .LBB78_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB78_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB78_4 +; XTENSA-ATOMIC-NEXT: .LBB78_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB78_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB78_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB78_1 +; XTENSA-ATOMIC-NEXT: .LBB78_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI79_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI79_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a10 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a11, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a11 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: movi a11, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a10, a11 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: movi a15, 1 +; XTENSA-ATOMIC-NEXT: j .LBB79_2 +; XTENSA-ATOMIC-NEXT: .LBB79_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB79_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a6, a6 +; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB79_4 +; XTENSA-ATOMIC-NEXT: .LBB79_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a6, a7, a12 +; XTENSA-ATOMIC-NEXT: and a5, a7, a9 +; XTENSA-ATOMIC-NEXT: xor a5, a5, a11 +; XTENSA-ATOMIC-NEXT: and a5, a5, a10 +; XTENSA-ATOMIC-NEXT: or a6, a6, a5 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0 +; XTENSA-ATOMIC-NEXT: or a5, a15, a15 +; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB79_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB79_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a5, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB79_1 +; XTENSA-ATOMIC-NEXT: .LBB79_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a6 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw nand ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI80_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI80_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB80_2 +; XTENSA-ATOMIC-NEXT: .LBB80_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB80_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB80_4 +; XTENSA-ATOMIC-NEXT: .LBB80_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB80_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB80_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB80_1 +; XTENSA-ATOMIC-NEXT: .LBB80_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI81_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI81_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB81_2 +; XTENSA-ATOMIC-NEXT: .LBB81_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB81_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB81_4 +; XTENSA-ATOMIC-NEXT: .LBB81_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB81_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB81_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB81_1 +; XTENSA-ATOMIC-NEXT: .LBB81_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI82_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI82_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB82_2 +; XTENSA-ATOMIC-NEXT: .LBB82_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB82_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB82_4 +; XTENSA-ATOMIC-NEXT: .LBB82_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB82_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB82_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB82_1 +; XTENSA-ATOMIC-NEXT: .LBB82_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI83_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI83_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB83_2 +; XTENSA-ATOMIC-NEXT: .LBB83_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB83_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB83_4 +; XTENSA-ATOMIC-NEXT: .LBB83_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB83_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB83_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB83_1 +; XTENSA-ATOMIC-NEXT: .LBB83_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI84_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI84_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB84_2 +; XTENSA-ATOMIC-NEXT: .LBB84_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB84_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB84_4 +; XTENSA-ATOMIC-NEXT: .LBB84_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB84_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB84_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB84_1 +; XTENSA-ATOMIC-NEXT: .LBB84_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI85_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI85_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB85_2 +; XTENSA-ATOMIC-NEXT: .LBB85_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB85_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB85_4 +; XTENSA-ATOMIC-NEXT: .LBB85_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB85_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB85_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB85_1 +; XTENSA-ATOMIC-NEXT: .LBB85_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI86_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI86_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB86_2 +; XTENSA-ATOMIC-NEXT: .LBB86_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB86_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB86_4 +; XTENSA-ATOMIC-NEXT: .LBB86_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB86_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB86_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB86_1 +; XTENSA-ATOMIC-NEXT: .LBB86_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI87_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI87_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB87_2 +; XTENSA-ATOMIC-NEXT: .LBB87_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB87_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB87_4 +; XTENSA-ATOMIC-NEXT: .LBB87_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB87_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB87_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB87_1 +; XTENSA-ATOMIC-NEXT: .LBB87_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI88_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI88_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB88_2 +; XTENSA-ATOMIC-NEXT: .LBB88_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB88_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB88_4 +; XTENSA-ATOMIC-NEXT: .LBB88_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB88_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB88_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB88_1 +; XTENSA-ATOMIC-NEXT: .LBB88_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI89_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI89_0 +; XTENSA-ATOMIC-NEXT: and a9, a3, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a10 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB89_2 +; XTENSA-ATOMIC-NEXT: .LBB89_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB89_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a13, a13 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB89_4 +; XTENSA-ATOMIC-NEXT: .LBB89_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a13, a14, a9 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB89_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB89_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB89_1 +; XTENSA-ATOMIC-NEXT: .LBB89_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a4, .LCPI90_0 +; XTENSA-NEXT: j .LBB90_2 +; XTENSA-NEXT: .LBB90_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB90_4 +; XTENSA-NEXT: .LBB90_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB90_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB90_1 +; XTENSA-NEXT: .LBB90_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI90_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB90_2 +; XTENSA-ATOMIC-NEXT: .LBB90_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB90_6 +; XTENSA-ATOMIC-NEXT: .LBB90_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB90_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB90_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI90_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB90_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB90_1 +; XTENSA-ATOMIC-NEXT: .LBB90_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a4, .LCPI91_0 +; XTENSA-NEXT: j .LBB91_2 +; XTENSA-NEXT: .LBB91_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB91_4 +; XTENSA-NEXT: .LBB91_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB91_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB91_1 +; XTENSA-NEXT: .LBB91_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI91_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB91_2 +; XTENSA-ATOMIC-NEXT: .LBB91_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB91_6 +; XTENSA-ATOMIC-NEXT: .LBB91_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB91_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB91_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI91_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB91_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB91_1 +; XTENSA-ATOMIC-NEXT: .LBB91_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l16ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 16 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a5, .LCPI92_0 +; XTENSA-NEXT: j .LBB92_2 +; XTENSA-NEXT: .LBB92_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB92_4 +; XTENSA-NEXT: .LBB92_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bge a4, a8, .LBB92_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB92_1 +; XTENSA-NEXT: .LBB92_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI92_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB92_2 +; XTENSA-ATOMIC-NEXT: .LBB92_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB92_6 +; XTENSA-ATOMIC-NEXT: .LBB92_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB92_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB92_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI92_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB92_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB92_1 +; XTENSA-ATOMIC-NEXT: .LBB92_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l16ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 16 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a5, .LCPI93_0 +; XTENSA-NEXT: j .LBB93_2 +; XTENSA-NEXT: .LBB93_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB93_4 +; XTENSA-NEXT: .LBB93_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: bge a4, a8, .LBB93_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB93_1 +; XTENSA-NEXT: .LBB93_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI93_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB93_2 +; XTENSA-ATOMIC-NEXT: .LBB93_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB93_6 +; XTENSA-ATOMIC-NEXT: .LBB93_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB93_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB93_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI93_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB93_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB93_1 +; XTENSA-ATOMIC-NEXT: .LBB93_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI94_0 +; XTENSA-NEXT: j .LBB94_2 +; XTENSA-NEXT: .LBB94_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB94_4 +; XTENSA-NEXT: .LBB94_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a5, a8, .LBB94_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB94_1 +; XTENSA-NEXT: .LBB94_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI94_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB94_2 +; XTENSA-ATOMIC-NEXT: .LBB94_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB94_6 +; XTENSA-ATOMIC-NEXT: .LBB94_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB94_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB94_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI94_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB94_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB94_1 +; XTENSA-ATOMIC-NEXT: .LBB94_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a4, .LCPI95_0 +; XTENSA-NEXT: j .LBB95_2 +; XTENSA-NEXT: .LBB95_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB95_4 +; XTENSA-NEXT: .LBB95_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB95_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB95_1 +; XTENSA-NEXT: .LBB95_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI95_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB95_2 +; XTENSA-ATOMIC-NEXT: .LBB95_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB95_6 +; XTENSA-ATOMIC-NEXT: .LBB95_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB95_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB95_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI95_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB95_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB95_1 +; XTENSA-ATOMIC-NEXT: .LBB95_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a4, .LCPI96_0 +; XTENSA-NEXT: j .LBB96_2 +; XTENSA-NEXT: .LBB96_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB96_4 +; XTENSA-NEXT: .LBB96_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB96_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB96_1 +; XTENSA-NEXT: .LBB96_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI96_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB96_2 +; XTENSA-ATOMIC-NEXT: .LBB96_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB96_6 +; XTENSA-ATOMIC-NEXT: .LBB96_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB96_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB96_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI96_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB96_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB96_1 +; XTENSA-ATOMIC-NEXT: .LBB96_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l16ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 16 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a5, .LCPI97_0 +; XTENSA-NEXT: j .LBB97_2 +; XTENSA-NEXT: .LBB97_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB97_4 +; XTENSA-NEXT: .LBB97_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: blt a4, a8, .LBB97_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB97_1 +; XTENSA-NEXT: .LBB97_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI97_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB97_2 +; XTENSA-ATOMIC-NEXT: .LBB97_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB97_6 +; XTENSA-ATOMIC-NEXT: .LBB97_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB97_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB97_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI97_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB97_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB97_1 +; XTENSA-ATOMIC-NEXT: .LBB97_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a9, a2, a2 +; XTENSA-NEXT: l16ui a2, a9, 0 +; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: or a3, a9, a9 +; XTENSA-NEXT: srai a4, a8, 16 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a5, .LCPI98_0 +; XTENSA-NEXT: j .LBB98_2 +; XTENSA-NEXT: .LBB98_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 4 +; XTENSA-NEXT: or a10, a3, a3 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 4 +; XTENSA-NEXT: bnez a10, .LBB98_4 +; XTENSA-NEXT: .LBB98_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 4 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: blt a4, a8, .LBB98_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB98_1 +; XTENSA-NEXT: .LBB98_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI98_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB98_2 +; XTENSA-ATOMIC-NEXT: .LBB98_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB98_6 +; XTENSA-ATOMIC-NEXT: .LBB98_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB98_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB98_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI98_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB98_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB98_1 +; XTENSA-ATOMIC-NEXT: .LBB98_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a5, a8, 16 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI99_0 +; XTENSA-NEXT: j .LBB99_2 +; XTENSA-NEXT: .LBB99_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB99_4 +; XTENSA-NEXT: .LBB99_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a5, a8, .LBB99_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB99_1 +; XTENSA-NEXT: .LBB99_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI99_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: slli a11, a3, 16 +; XTENSA-ATOMIC-NEXT: srai a11, a11, 16 +; XTENSA-ATOMIC-NEXT: movi a12, 0 +; XTENSA-ATOMIC-NEXT: movi a13, 1 +; XTENSA-ATOMIC-NEXT: j .LBB99_2 +; XTENSA-ATOMIC-NEXT: .LBB99_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a15, a15 +; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB99_6 +; XTENSA-ATOMIC-NEXT: .LBB99_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a14 +; XTENSA-ATOMIC-NEXT: slli a7, a15, 16 +; XTENSA-ATOMIC-NEXT: srai a6, a7, 16 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB99_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB99_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI99_0 +; XTENSA-ATOMIC-NEXT: and a15, a7, a15 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a15, a15 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: or a15, a7, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB99_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB99_1 +; XTENSA-ATOMIC-NEXT: .LBB99_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI100_1 +; XTENSA-NEXT: j .LBB100_2 +; XTENSA-NEXT: .LBB100_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB100_4 +; XTENSA-NEXT: .LBB100_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI100_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a9, a8, .LBB100_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB100_1 +; XTENSA-NEXT: .LBB100_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI100_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB100_2 +; XTENSA-ATOMIC-NEXT: .LBB100_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB100_6 +; XTENSA-ATOMIC-NEXT: .LBB100_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI100_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB100_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB100_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB100_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB100_1 +; XTENSA-ATOMIC-NEXT: .LBB100_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI101_1 +; XTENSA-NEXT: j .LBB101_2 +; XTENSA-NEXT: .LBB101_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB101_4 +; XTENSA-NEXT: .LBB101_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI101_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a9, a8, .LBB101_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB101_1 +; XTENSA-NEXT: .LBB101_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI101_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB101_2 +; XTENSA-ATOMIC-NEXT: .LBB101_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB101_6 +; XTENSA-ATOMIC-NEXT: .LBB101_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI101_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB101_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB101_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB101_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB101_1 +; XTENSA-ATOMIC-NEXT: .LBB101_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l16ui a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI102_1 +; XTENSA-NEXT: j .LBB102_2 +; XTENSA-NEXT: .LBB102_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB102_4 +; XTENSA-NEXT: .LBB102_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI102_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a9, a8, .LBB102_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB102_1 +; XTENSA-NEXT: .LBB102_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI102_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB102_2 +; XTENSA-ATOMIC-NEXT: .LBB102_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB102_6 +; XTENSA-ATOMIC-NEXT: .LBB102_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI102_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB102_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB102_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB102_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB102_1 +; XTENSA-ATOMIC-NEXT: .LBB102_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l16ui a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI103_1 +; XTENSA-NEXT: j .LBB103_2 +; XTENSA-NEXT: .LBB103_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB103_4 +; XTENSA-NEXT: .LBB103_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI103_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a9, a8, .LBB103_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB103_1 +; XTENSA-NEXT: .LBB103_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI103_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB103_2 +; XTENSA-ATOMIC-NEXT: .LBB103_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB103_6 +; XTENSA-ATOMIC-NEXT: .LBB103_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI103_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB103_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB103_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB103_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB103_1 +; XTENSA-ATOMIC-NEXT: .LBB103_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI104_1 +; XTENSA-NEXT: j .LBB104_2 +; XTENSA-NEXT: .LBB104_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB104_4 +; XTENSA-NEXT: .LBB104_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI104_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a9, a8, .LBB104_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB104_1 +; XTENSA-NEXT: .LBB104_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI104_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB104_2 +; XTENSA-ATOMIC-NEXT: .LBB104_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB104_6 +; XTENSA-ATOMIC-NEXT: .LBB104_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI104_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB104_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB104_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB104_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB104_1 +; XTENSA-ATOMIC-NEXT: .LBB104_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i16_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI105_1 +; XTENSA-NEXT: j .LBB105_2 +; XTENSA-NEXT: .LBB105_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB105_4 +; XTENSA-NEXT: .LBB105_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI105_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a9, a8, .LBB105_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB105_1 +; XTENSA-NEXT: .LBB105_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI105_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB105_2 +; XTENSA-ATOMIC-NEXT: .LBB105_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB105_6 +; XTENSA-ATOMIC-NEXT: .LBB105_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI105_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB105_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB105_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB105_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB105_1 +; XTENSA-ATOMIC-NEXT: .LBB105_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i16 %b monotonic + ret i16 %res +} + +define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i16_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI106_1 +; XTENSA-NEXT: j .LBB106_2 +; XTENSA-NEXT: .LBB106_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB106_4 +; XTENSA-NEXT: .LBB106_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI106_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a9, a8, .LBB106_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB106_1 +; XTENSA-NEXT: .LBB106_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI106_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB106_2 +; XTENSA-ATOMIC-NEXT: .LBB106_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB106_6 +; XTENSA-ATOMIC-NEXT: .LBB106_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI106_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB106_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB106_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB106_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB106_1 +; XTENSA-ATOMIC-NEXT: .LBB106_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i16 %b acquire + ret i16 %res +} + +define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i16_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l16ui a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI107_1 +; XTENSA-NEXT: j .LBB107_2 +; XTENSA-NEXT: .LBB107_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB107_4 +; XTENSA-NEXT: .LBB107_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI107_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a9, a8, .LBB107_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB107_1 +; XTENSA-NEXT: .LBB107_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI107_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB107_2 +; XTENSA-ATOMIC-NEXT: .LBB107_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB107_6 +; XTENSA-ATOMIC-NEXT: .LBB107_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI107_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB107_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB107_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB107_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB107_1 +; XTENSA-ATOMIC-NEXT: .LBB107_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i16 %b release + ret i16 %res +} + +define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i16_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l16ui a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI108_1 +; XTENSA-NEXT: j .LBB108_2 +; XTENSA-NEXT: .LBB108_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB108_4 +; XTENSA-NEXT: .LBB108_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI108_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a9, a8, .LBB108_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB108_1 +; XTENSA-NEXT: .LBB108_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI108_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB108_2 +; XTENSA-ATOMIC-NEXT: .LBB108_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB108_6 +; XTENSA-ATOMIC-NEXT: .LBB108_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI108_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB108_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB108_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB108_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB108_1 +; XTENSA-ATOMIC-NEXT: .LBB108_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i16 %b acq_rel + ret i16 %res +} + +define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i16_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l16ui a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI109_1 +; XTENSA-NEXT: j .LBB109_2 +; XTENSA-NEXT: .LBB109_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB109_4 +; XTENSA-NEXT: .LBB109_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: l32r a8, .LCPI109_0 +; XTENSA-NEXT: and a9, a3, a8 +; XTENSA-NEXT: s16i a2, a1, 0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a9, a8, .LBB109_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB109_1 +; XTENSA-NEXT: .LBB109_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI109_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB109_2 +; XTENSA-ATOMIC-NEXT: .LBB109_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a14, a14 +; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB109_6 +; XTENSA-ATOMIC-NEXT: .LBB109_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI109_0 +; XTENSA-ATOMIC-NEXT: and a6, a3, a14 +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a15, a13 +; XTENSA-ATOMIC-NEXT: and a5, a15, a14 +; XTENSA-ATOMIC-NEXT: or a7, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB109_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a15, a15 +; XTENSA-ATOMIC-NEXT: .LBB109_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a7, a14 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a14, a14 +; XTENSA-ATOMIC-NEXT: and a15, a13, a9 +; XTENSA-ATOMIC-NEXT: or a14, a15, a14 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a15, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB109_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB109_1 +; XTENSA-ATOMIC-NEXT: .LBB109_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i16 %b seq_cst + ret i16 %res +} + +define i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI110_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB110_2 +; XTENSA-ATOMIC-NEXT: .LBB110_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB110_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB110_4 +; XTENSA-ATOMIC-NEXT: .LBB110_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB110_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB110_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB110_1 +; XTENSA-ATOMIC-NEXT: .LBB110_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_xchg_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI111_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB111_2 +; XTENSA-ATOMIC-NEXT: .LBB111_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB111_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB111_4 +; XTENSA-ATOMIC-NEXT: .LBB111_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB111_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB111_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB111_1 +; XTENSA-ATOMIC-NEXT: .LBB111_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_xchg_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI112_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB112_2 +; XTENSA-ATOMIC-NEXT: .LBB112_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB112_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB112_4 +; XTENSA-ATOMIC-NEXT: .LBB112_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB112_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB112_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB112_1 +; XTENSA-ATOMIC-NEXT: .LBB112_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_xchg_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI113_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB113_2 +; XTENSA-ATOMIC-NEXT: .LBB113_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB113_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB113_4 +; XTENSA-ATOMIC-NEXT: .LBB113_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB113_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB113_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB113_1 +; XTENSA-ATOMIC-NEXT: .LBB113_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_xchg_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI114_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB114_2 +; XTENSA-ATOMIC-NEXT: .LBB114_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB114_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB114_4 +; XTENSA-ATOMIC-NEXT: .LBB114_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB114_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB114_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB114_1 +; XTENSA-ATOMIC-NEXT: .LBB114_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xchg ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI115_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB115_2 +; XTENSA-ATOMIC-NEXT: .LBB115_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB115_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB115_4 +; XTENSA-ATOMIC-NEXT: .LBB115_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: add a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB115_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB115_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB115_1 +; XTENSA-ATOMIC-NEXT: .LBB115_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_add_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI116_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB116_2 +; XTENSA-ATOMIC-NEXT: .LBB116_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB116_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB116_4 +; XTENSA-ATOMIC-NEXT: .LBB116_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: add a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB116_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB116_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB116_1 +; XTENSA-ATOMIC-NEXT: .LBB116_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_add_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI117_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB117_2 +; XTENSA-ATOMIC-NEXT: .LBB117_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB117_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB117_4 +; XTENSA-ATOMIC-NEXT: .LBB117_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: add a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB117_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB117_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB117_1 +; XTENSA-ATOMIC-NEXT: .LBB117_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_add_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI118_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB118_2 +; XTENSA-ATOMIC-NEXT: .LBB118_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB118_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB118_4 +; XTENSA-ATOMIC-NEXT: .LBB118_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: add a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB118_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB118_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB118_1 +; XTENSA-ATOMIC-NEXT: .LBB118_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_add_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI119_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB119_2 +; XTENSA-ATOMIC-NEXT: .LBB119_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB119_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB119_4 +; XTENSA-ATOMIC-NEXT: .LBB119_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: add a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB119_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB119_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB119_1 +; XTENSA-ATOMIC-NEXT: .LBB119_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw add ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI120_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB120_2 +; XTENSA-ATOMIC-NEXT: .LBB120_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB120_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB120_4 +; XTENSA-ATOMIC-NEXT: .LBB120_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: sub a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB120_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB120_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB120_1 +; XTENSA-ATOMIC-NEXT: .LBB120_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_sub_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI121_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB121_2 +; XTENSA-ATOMIC-NEXT: .LBB121_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB121_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB121_4 +; XTENSA-ATOMIC-NEXT: .LBB121_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: sub a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB121_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB121_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB121_1 +; XTENSA-ATOMIC-NEXT: .LBB121_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_sub_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI122_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB122_2 +; XTENSA-ATOMIC-NEXT: .LBB122_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB122_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB122_4 +; XTENSA-ATOMIC-NEXT: .LBB122_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: sub a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB122_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB122_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB122_1 +; XTENSA-ATOMIC-NEXT: .LBB122_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_sub_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI123_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB123_2 +; XTENSA-ATOMIC-NEXT: .LBB123_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB123_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB123_4 +; XTENSA-ATOMIC-NEXT: .LBB123_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: sub a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB123_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB123_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB123_1 +; XTENSA-ATOMIC-NEXT: .LBB123_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_sub_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI124_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB124_2 +; XTENSA-ATOMIC-NEXT: .LBB124_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB124_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB124_4 +; XTENSA-ATOMIC-NEXT: .LBB124_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: sub a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB124_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB124_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB124_1 +; XTENSA-ATOMIC-NEXT: .LBB124_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw sub ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI125_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB125_2 +; XTENSA-ATOMIC-NEXT: .LBB125_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB125_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB125_4 +; XTENSA-ATOMIC-NEXT: .LBB125_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB125_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB125_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB125_1 +; XTENSA-ATOMIC-NEXT: .LBB125_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_and_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI126_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB126_2 +; XTENSA-ATOMIC-NEXT: .LBB126_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB126_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB126_4 +; XTENSA-ATOMIC-NEXT: .LBB126_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB126_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB126_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB126_1 +; XTENSA-ATOMIC-NEXT: .LBB126_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_and_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI127_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB127_2 +; XTENSA-ATOMIC-NEXT: .LBB127_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB127_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB127_4 +; XTENSA-ATOMIC-NEXT: .LBB127_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB127_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB127_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB127_1 +; XTENSA-ATOMIC-NEXT: .LBB127_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_and_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI128_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB128_2 +; XTENSA-ATOMIC-NEXT: .LBB128_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB128_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB128_4 +; XTENSA-ATOMIC-NEXT: .LBB128_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB128_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB128_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB128_1 +; XTENSA-ATOMIC-NEXT: .LBB128_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_and_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI129_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB129_2 +; XTENSA-ATOMIC-NEXT: .LBB129_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB129_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB129_4 +; XTENSA-ATOMIC-NEXT: .LBB129_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB129_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB129_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB129_1 +; XTENSA-ATOMIC-NEXT: .LBB129_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw and ptr %a, i32 %b seq_cst + ret i32 %res +} + +;define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind { +; %res = atomicrmw nand ptr %a, i32 %b monotonic +; ret i32 %res +;} +; +;define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind { +; %res = atomicrmw nand ptr %a, i32 %b acquire +; ret i32 %res +;} +; +;define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind { +; %res = atomicrmw nand ptr %a, i32 %b release +; ret i32 %res +;} +; +;define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind { +; %res = atomicrmw nand ptr %a, i32 %b acq_rel +; ret i32 %res +;} +; +;define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind { +; %res = atomicrmw nand ptr %a, i32 %b seq_cst +; ret i32 %res +;} + +define i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI130_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB130_2 +; XTENSA-ATOMIC-NEXT: .LBB130_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB130_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB130_4 +; XTENSA-ATOMIC-NEXT: .LBB130_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB130_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB130_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB130_1 +; XTENSA-ATOMIC-NEXT: .LBB130_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_or_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI131_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB131_2 +; XTENSA-ATOMIC-NEXT: .LBB131_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB131_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB131_4 +; XTENSA-ATOMIC-NEXT: .LBB131_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB131_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB131_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB131_1 +; XTENSA-ATOMIC-NEXT: .LBB131_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_or_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI132_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB132_2 +; XTENSA-ATOMIC-NEXT: .LBB132_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB132_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB132_4 +; XTENSA-ATOMIC-NEXT: .LBB132_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB132_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB132_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB132_1 +; XTENSA-ATOMIC-NEXT: .LBB132_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_or_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI133_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB133_2 +; XTENSA-ATOMIC-NEXT: .LBB133_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB133_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB133_4 +; XTENSA-ATOMIC-NEXT: .LBB133_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB133_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB133_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB133_1 +; XTENSA-ATOMIC-NEXT: .LBB133_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_or_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI134_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB134_2 +; XTENSA-ATOMIC-NEXT: .LBB134_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB134_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB134_4 +; XTENSA-ATOMIC-NEXT: .LBB134_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB134_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB134_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB134_1 +; XTENSA-ATOMIC-NEXT: .LBB134_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw or ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI135_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB135_2 +; XTENSA-ATOMIC-NEXT: .LBB135_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB135_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB135_4 +; XTENSA-ATOMIC-NEXT: .LBB135_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB135_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB135_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB135_1 +; XTENSA-ATOMIC-NEXT: .LBB135_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_xor_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI136_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB136_2 +; XTENSA-ATOMIC-NEXT: .LBB136_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB136_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB136_4 +; XTENSA-ATOMIC-NEXT: .LBB136_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB136_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB136_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB136_1 +; XTENSA-ATOMIC-NEXT: .LBB136_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_xor_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI137_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB137_2 +; XTENSA-ATOMIC-NEXT: .LBB137_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB137_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB137_4 +; XTENSA-ATOMIC-NEXT: .LBB137_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB137_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB137_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB137_1 +; XTENSA-ATOMIC-NEXT: .LBB137_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI138_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB138_2 +; XTENSA-ATOMIC-NEXT: .LBB138_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB138_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB138_4 +; XTENSA-ATOMIC-NEXT: .LBB138_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB138_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB138_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB138_1 +; XTENSA-ATOMIC-NEXT: .LBB138_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_xor_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a11, a3, a3 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI139_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB139_2 +; XTENSA-ATOMIC-NEXT: .LBB139_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB139_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB139_4 +; XTENSA-ATOMIC-NEXT: .LBB139_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a3 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB139_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB139_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB139_1 +; XTENSA-ATOMIC-NEXT: .LBB139_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw xor ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI140_0 +; XTENSA-NEXT: j .LBB140_2 +; XTENSA-NEXT: .LBB140_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB140_4 +; XTENSA-NEXT: .LBB140_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a3, a2, .LBB140_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB140_1 +; XTENSA-NEXT: .LBB140_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB140_2 +; XTENSA-ATOMIC-NEXT: .LBB140_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB140_6 +; XTENSA-ATOMIC-NEXT: .LBB140_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB140_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB140_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB140_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB140_1 +; XTENSA-ATOMIC-NEXT: .LBB140_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI141_0 +; XTENSA-NEXT: j .LBB141_2 +; XTENSA-NEXT: .LBB141_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB141_4 +; XTENSA-NEXT: .LBB141_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a3, a2, .LBB141_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB141_1 +; XTENSA-NEXT: .LBB141_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB141_2 +; XTENSA-ATOMIC-NEXT: .LBB141_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB141_6 +; XTENSA-ATOMIC-NEXT: .LBB141_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB141_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB141_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB141_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB141_1 +; XTENSA-ATOMIC-NEXT: .LBB141_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI142_0 +; XTENSA-NEXT: j .LBB142_2 +; XTENSA-NEXT: .LBB142_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB142_4 +; XTENSA-NEXT: .LBB142_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a3, a2, .LBB142_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB142_1 +; XTENSA-NEXT: .LBB142_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB142_2 +; XTENSA-ATOMIC-NEXT: .LBB142_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB142_6 +; XTENSA-ATOMIC-NEXT: .LBB142_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB142_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB142_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB142_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB142_1 +; XTENSA-ATOMIC-NEXT: .LBB142_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI143_0 +; XTENSA-NEXT: j .LBB143_2 +; XTENSA-NEXT: .LBB143_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB143_4 +; XTENSA-NEXT: .LBB143_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a3, a2, .LBB143_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB143_1 +; XTENSA-NEXT: .LBB143_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB143_2 +; XTENSA-ATOMIC-NEXT: .LBB143_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB143_6 +; XTENSA-ATOMIC-NEXT: .LBB143_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB143_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB143_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB143_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB143_1 +; XTENSA-ATOMIC-NEXT: .LBB143_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_max_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI144_0 +; XTENSA-NEXT: j .LBB144_2 +; XTENSA-NEXT: .LBB144_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB144_4 +; XTENSA-NEXT: .LBB144_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bge a3, a2, .LBB144_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB144_1 +; XTENSA-NEXT: .LBB144_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB144_2 +; XTENSA-ATOMIC-NEXT: .LBB144_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB144_6 +; XTENSA-ATOMIC-NEXT: .LBB144_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB144_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB144_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB144_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB144_1 +; XTENSA-ATOMIC-NEXT: .LBB144_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw max ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI145_0 +; XTENSA-NEXT: j .LBB145_2 +; XTENSA-NEXT: .LBB145_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB145_4 +; XTENSA-NEXT: .LBB145_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a3, a2, .LBB145_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB145_1 +; XTENSA-NEXT: .LBB145_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB145_2 +; XTENSA-ATOMIC-NEXT: .LBB145_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB145_6 +; XTENSA-ATOMIC-NEXT: .LBB145_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB145_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB145_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB145_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB145_1 +; XTENSA-ATOMIC-NEXT: .LBB145_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI146_0 +; XTENSA-NEXT: j .LBB146_2 +; XTENSA-NEXT: .LBB146_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB146_4 +; XTENSA-NEXT: .LBB146_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a3, a2, .LBB146_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB146_1 +; XTENSA-NEXT: .LBB146_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB146_2 +; XTENSA-ATOMIC-NEXT: .LBB146_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB146_6 +; XTENSA-ATOMIC-NEXT: .LBB146_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB146_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB146_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB146_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB146_1 +; XTENSA-ATOMIC-NEXT: .LBB146_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI147_0 +; XTENSA-NEXT: j .LBB147_2 +; XTENSA-NEXT: .LBB147_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB147_4 +; XTENSA-NEXT: .LBB147_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a3, a2, .LBB147_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB147_1 +; XTENSA-NEXT: .LBB147_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB147_2 +; XTENSA-ATOMIC-NEXT: .LBB147_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB147_6 +; XTENSA-ATOMIC-NEXT: .LBB147_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB147_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB147_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB147_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB147_1 +; XTENSA-ATOMIC-NEXT: .LBB147_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI148_0 +; XTENSA-NEXT: j .LBB148_2 +; XTENSA-NEXT: .LBB148_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB148_4 +; XTENSA-NEXT: .LBB148_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a3, a2, .LBB148_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB148_1 +; XTENSA-NEXT: .LBB148_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB148_2 +; XTENSA-ATOMIC-NEXT: .LBB148_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB148_6 +; XTENSA-ATOMIC-NEXT: .LBB148_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB148_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB148_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB148_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB148_1 +; XTENSA-ATOMIC-NEXT: .LBB148_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_min_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI149_0 +; XTENSA-NEXT: j .LBB149_2 +; XTENSA-NEXT: .LBB149_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB149_4 +; XTENSA-NEXT: .LBB149_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: blt a3, a2, .LBB149_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB149_1 +; XTENSA-NEXT: .LBB149_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB149_2 +; XTENSA-ATOMIC-NEXT: .LBB149_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB149_6 +; XTENSA-ATOMIC-NEXT: .LBB149_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB149_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB149_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB149_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB149_1 +; XTENSA-ATOMIC-NEXT: .LBB149_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw min ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI150_0 +; XTENSA-NEXT: j .LBB150_2 +; XTENSA-NEXT: .LBB150_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB150_4 +; XTENSA-NEXT: .LBB150_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a3, a2, .LBB150_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB150_1 +; XTENSA-NEXT: .LBB150_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB150_2 +; XTENSA-ATOMIC-NEXT: .LBB150_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB150_6 +; XTENSA-ATOMIC-NEXT: .LBB150_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB150_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB150_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB150_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB150_1 +; XTENSA-ATOMIC-NEXT: .LBB150_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI151_0 +; XTENSA-NEXT: j .LBB151_2 +; XTENSA-NEXT: .LBB151_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB151_4 +; XTENSA-NEXT: .LBB151_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a3, a2, .LBB151_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB151_1 +; XTENSA-NEXT: .LBB151_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB151_2 +; XTENSA-ATOMIC-NEXT: .LBB151_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB151_6 +; XTENSA-ATOMIC-NEXT: .LBB151_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB151_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB151_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB151_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB151_1 +; XTENSA-ATOMIC-NEXT: .LBB151_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI152_0 +; XTENSA-NEXT: j .LBB152_2 +; XTENSA-NEXT: .LBB152_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB152_4 +; XTENSA-NEXT: .LBB152_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a3, a2, .LBB152_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB152_1 +; XTENSA-NEXT: .LBB152_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB152_2 +; XTENSA-ATOMIC-NEXT: .LBB152_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB152_6 +; XTENSA-ATOMIC-NEXT: .LBB152_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB152_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB152_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB152_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB152_1 +; XTENSA-ATOMIC-NEXT: .LBB152_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI153_0 +; XTENSA-NEXT: j .LBB153_2 +; XTENSA-NEXT: .LBB153_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB153_4 +; XTENSA-NEXT: .LBB153_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a3, a2, .LBB153_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB153_1 +; XTENSA-NEXT: .LBB153_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB153_2 +; XTENSA-ATOMIC-NEXT: .LBB153_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB153_6 +; XTENSA-ATOMIC-NEXT: .LBB153_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB153_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB153_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB153_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB153_1 +; XTENSA-ATOMIC-NEXT: .LBB153_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umax_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI154_0 +; XTENSA-NEXT: j .LBB154_2 +; XTENSA-NEXT: .LBB154_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB154_4 +; XTENSA-NEXT: .LBB154_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bgeu a3, a2, .LBB154_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB154_1 +; XTENSA-NEXT: .LBB154_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB154_2 +; XTENSA-ATOMIC-NEXT: .LBB154_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB154_6 +; XTENSA-ATOMIC-NEXT: .LBB154_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB154_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB154_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB154_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB154_1 +; XTENSA-ATOMIC-NEXT: .LBB154_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umax ptr %a, i32 %b seq_cst + ret i32 %res +} + +define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 0 +; XTENSA-NEXT: l32r a5, .LCPI155_0 +; XTENSA-NEXT: j .LBB155_2 +; XTENSA-NEXT: .LBB155_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB155_4 +; XTENSA-NEXT: .LBB155_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a3, a2, .LBB155_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB155_1 +; XTENSA-NEXT: .LBB155_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB155_2 +; XTENSA-ATOMIC-NEXT: .LBB155_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB155_6 +; XTENSA-ATOMIC-NEXT: .LBB155_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB155_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB155_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB155_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB155_1 +; XTENSA-ATOMIC-NEXT: .LBB155_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i32 %b monotonic + ret i32 %res +} + +define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 2 +; XTENSA-NEXT: l32r a5, .LCPI156_0 +; XTENSA-NEXT: j .LBB156_2 +; XTENSA-NEXT: .LBB156_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB156_4 +; XTENSA-NEXT: .LBB156_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a3, a2, .LBB156_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB156_1 +; XTENSA-NEXT: .LBB156_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB156_2 +; XTENSA-ATOMIC-NEXT: .LBB156_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB156_6 +; XTENSA-ATOMIC-NEXT: .LBB156_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB156_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB156_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB156_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB156_1 +; XTENSA-ATOMIC-NEXT: .LBB156_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i32 %b acquire + ret i32 %res +} + +define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 3 +; XTENSA-NEXT: movi a6, 0 +; XTENSA-NEXT: l32r a4, .LCPI157_0 +; XTENSA-NEXT: j .LBB157_2 +; XTENSA-NEXT: .LBB157_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB157_4 +; XTENSA-NEXT: .LBB157_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a3, a2, .LBB157_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB157_1 +; XTENSA-NEXT: .LBB157_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB157_2 +; XTENSA-ATOMIC-NEXT: .LBB157_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB157_6 +; XTENSA-ATOMIC-NEXT: .LBB157_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB157_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB157_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB157_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB157_1 +; XTENSA-ATOMIC-NEXT: .LBB157_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i32 %b release + ret i32 %res +} + +define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i32_acq_rel: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a5, a2, a2 +; XTENSA-NEXT: l32i a2, a5, 0 +; XTENSA-NEXT: movi a7, 4 +; XTENSA-NEXT: movi a6, 2 +; XTENSA-NEXT: l32r a4, .LCPI158_0 +; XTENSA-NEXT: j .LBB158_2 +; XTENSA-NEXT: .LBB158_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a5, a5 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a6, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB158_4 +; XTENSA-NEXT: .LBB158_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a3, a2, .LBB158_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB158_1 +; XTENSA-NEXT: .LBB158_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_acq_rel: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB158_2 +; XTENSA-ATOMIC-NEXT: .LBB158_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB158_6 +; XTENSA-ATOMIC-NEXT: .LBB158_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB158_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB158_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB158_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB158_1 +; XTENSA-ATOMIC-NEXT: .LBB158_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i32 %b acq_rel + ret i32 %res +} + +define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_umin_i32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI159_0 +; XTENSA-NEXT: j .LBB159_2 +; XTENSA-NEXT: .LBB159_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB159_4 +; XTENSA-NEXT: .LBB159_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a3, a3 +; XTENSA-NEXT: bltu a3, a2, .LBB159_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB159_1 +; XTENSA-NEXT: .LBB159_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB159_2 +; XTENSA-ATOMIC-NEXT: .LBB159_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB159_6 +; XTENSA-ATOMIC-NEXT: .LBB159_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a3, a3 +; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB159_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB159_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB159_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB159_1 +; XTENSA-ATOMIC-NEXT: .LBB159_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = atomicrmw umin ptr %a, i32 %b seq_cst + ret i32 %res +} diff --git a/llvm/test/CodeGen/Xtensa/forced-atomics.ll b/llvm/test/CodeGen/Xtensa/forced-atomics.ll new file mode 100644 index 0000000..eeec87b --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/forced-atomics.ll @@ -0,0 +1,1426 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=xtensa -mattr=+windowed < %s | FileCheck %s --check-prefixes=XTENSA +; RUN: llc -mtriple=xtensa -mattr=+windowed,s32c1i -mattr=+forced-atomics < %s | FileCheck %s --check-prefixes=XTENSA-ATOMIC + +define i8 @load8(ptr %p) nounwind { +; XTENSA-LABEL: load8: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load8: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i8, ptr %p seq_cst, align 1 + ret i8 %v +} + +define void @store8(ptr %p) nounwind { +; XTENSA-LABEL: store8: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store8: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s8i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i8 0, ptr %p seq_cst, align 1 + ret void +} + +define i8 @rmw8(ptr %p) nounwind { +; XTENSA-LABEL: rmw8: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI2_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw8: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: movi a11, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a11, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: j .LBB2_2 +; XTENSA-ATOMIC-NEXT: .LBB2_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB2_4 +; XTENSA-ATOMIC-NEXT: .LBB2_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: add a6, a15, a10 +; XTENSA-ATOMIC-NEXT: and a6, a6, a11 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: or a6, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB2_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB2_1 +; XTENSA-ATOMIC-NEXT: .LBB2_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw add ptr %p, i8 1 seq_cst, align 1 + ret i8 %v +} + +define i8 @cmpxchg8(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg8: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a8, 0 +; XTENSA-NEXT: s8i a8, a1, 0 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: movi a12, 1 +; XTENSA-NEXT: movi a13, 5 +; XTENSA-NEXT: l32r a8, .LCPI3_0 +; XTENSA-NEXT: or a14, a13, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l8ui a2, a1, 0 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: cmpxchg8: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a10, 0 +; XTENSA-ATOMIC-NEXT: and a7, a11, a9 +; XTENSA-ATOMIC-NEXT: movi a11, 1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a12, a11 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: .LBB3_1: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: or a14, a15, a12 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a11, a11 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB3_3 +; XTENSA-ATOMIC-NEXT: # %bb.2: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_1 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: .LBB3_3: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_1 Depth=1 +; XTENSA-ATOMIC-NEXT: bnez a7, .LBB3_5 +; XTENSA-ATOMIC-NEXT: # %bb.4: # %partword.cmpxchg.failure +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: bne a15, a7, .LBB3_1 +; XTENSA-ATOMIC-NEXT: .LBB3_5: # %partword.cmpxchg.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = cmpxchg ptr %p, i8 0, i8 1 seq_cst seq_cst + %res.0 = extractvalue { i8, i1 } %res, 0 + ret i8 %res.0 +} + +define i16 @load16(ptr %p) nounwind { +; XTENSA-LABEL: load16: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load16: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i16, ptr %p seq_cst, align 2 + ret i16 %v +} + +define void @store16(ptr %p) nounwind { +; XTENSA-LABEL: store16: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store16: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s16i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i16 0, ptr %p seq_cst, align 2 + ret void +} + +define i16 @rmw16(ptr %p) nounwind { +; XTENSA-LABEL: rmw16: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw16: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a10, a9 +; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI6_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a11, a11 +; XTENSA-ATOMIC-NEXT: movi a12, -1 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: movi a13, -4 +; XTENSA-ATOMIC-NEXT: and a13, a2, a13 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a15, a13, 0 +; XTENSA-ATOMIC-NEXT: movi a14, 0 +; XTENSA-ATOMIC-NEXT: j .LBB6_2 +; XTENSA-ATOMIC-NEXT: .LBB6_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB6_4 +; XTENSA-ATOMIC-NEXT: .LBB6_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: add a6, a15, a10 +; XTENSA-ATOMIC-NEXT: and a6, a6, a11 +; XTENSA-ATOMIC-NEXT: or a7, a7, a6 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a13, 0 +; XTENSA-ATOMIC-NEXT: or a6, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB6_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a6, a14, a14 +; XTENSA-ATOMIC-NEXT: j .LBB6_1 +; XTENSA-ATOMIC-NEXT: .LBB6_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw add ptr %p, i16 1 seq_cst, align 2 + ret i16 %v +} + +define i16 @cmpxchg16(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg16: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a8, 0 +; XTENSA-NEXT: s16i a8, a1, 0 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: movi a12, 1 +; XTENSA-NEXT: movi a13, 5 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: or a14, a13, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l16ui a2, a1, 0 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: cmpxchg16: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: slli a8, a2, 3 +; XTENSA-ATOMIC-NEXT: movi a9, 24 +; XTENSA-ATOMIC-NEXT: and a8, a8, a9 +; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI7_0 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a9, a9 +; XTENSA-ATOMIC-NEXT: movi a10, -1 +; XTENSA-ATOMIC-NEXT: xor a9, a9, a10 +; XTENSA-ATOMIC-NEXT: movi a10, -4 +; XTENSA-ATOMIC-NEXT: and a10, a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a10, 0 +; XTENSA-ATOMIC-NEXT: and a7, a11, a9 +; XTENSA-ATOMIC-NEXT: movi a11, 1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a12, a11 +; XTENSA-ATOMIC-NEXT: movi a13, 0 +; XTENSA-ATOMIC-NEXT: .LBB7_1: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a15, a7, a7 +; XTENSA-ATOMIC-NEXT: or a14, a15, a12 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0 +; XTENSA-ATOMIC-NEXT: or a7, a11, a11 +; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB7_3 +; XTENSA-ATOMIC-NEXT: # %bb.2: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_1 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a13, a13 +; XTENSA-ATOMIC-NEXT: .LBB7_3: # %partword.cmpxchg.loop +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_1 Depth=1 +; XTENSA-ATOMIC-NEXT: bnez a7, .LBB7_5 +; XTENSA-ATOMIC-NEXT: # %bb.4: # %partword.cmpxchg.failure +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a7, a14, a9 +; XTENSA-ATOMIC-NEXT: bne a15, a7, .LBB7_1 +; XTENSA-ATOMIC-NEXT: .LBB7_5: # %partword.cmpxchg.end +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a2, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %res = cmpxchg ptr %p, i16 0, i16 1 seq_cst seq_cst + %res.0 = extractvalue { i16, i1 } %res, 0 + ret i16 %res.0 +} + +define i32 @load32_unordered(ptr %p) nounwind { +; XTENSA-LABEL: load32_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load32_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i32, ptr %p unordered, align 4 + ret i32 %v +} + +define i32 @load32_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: load32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI9_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i32, ptr %p monotonic, align 4 + ret i32 %v +} + +define i32 @load32_acquire(ptr %p) nounwind { +; XTENSA-LABEL: load32_acquire: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load32_acquire: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i32, ptr %p acquire, align 4 + ret i32 %v +} + +define i32 @load32_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: load32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: load32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + %v = load atomic i32, ptr %p seq_cst, align 4 + ret i32 %v +} + +define void @store32_unordered(ptr %p) nounwind { +; XTENSA-LABEL: store32_unordered: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: or a12, a11, a11 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store32_unordered: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 0, ptr %p unordered, align 4 + ret void +} + +define void @store32_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: store32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI13_0 +; XTENSA-NEXT: or a12, a11, a11 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 0, ptr %p monotonic, align 4 + ret void +} + +define void @store32_release(ptr %p) nounwind { +; XTENSA-LABEL: store32_release: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: movi a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store32_release: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 0, ptr %p release, align 4 + ret void +} + +define void @store32_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: store32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 0 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: store32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi a8, 0 +; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw + store atomic i32 0, ptr %p seq_cst, align 4 + ret void +} + +define i32 @rmw32_add_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_add_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_add_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB16_2 +; XTENSA-ATOMIC-NEXT: .LBB16_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB16_4 +; XTENSA-ATOMIC-NEXT: .LBB16_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: addi a8, a11, 1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB16_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB16_1 +; XTENSA-ATOMIC-NEXT: .LBB16_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw add ptr %p, i32 1 monotonic, align 4 + ret i32 %v +} + +define i32 @rmw32_add_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_add_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_add_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB17_2 +; XTENSA-ATOMIC-NEXT: .LBB17_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB17_4 +; XTENSA-ATOMIC-NEXT: .LBB17_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: addi a8, a11, 1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB17_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB17_1 +; XTENSA-ATOMIC-NEXT: .LBB17_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw add ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_sub_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_sub_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_sub_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: movi a10, 1 +; XTENSA-ATOMIC-NEXT: j .LBB18_2 +; XTENSA-ATOMIC-NEXT: .LBB18_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB18_4 +; XTENSA-ATOMIC-NEXT: .LBB18_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: addi a8, a11, -1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB18_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: j .LBB18_1 +; XTENSA-ATOMIC-NEXT: .LBB18_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw sub ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_and_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_and_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_and_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB19_2 +; XTENSA-ATOMIC-NEXT: .LBB19_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB19_4 +; XTENSA-ATOMIC-NEXT: .LBB19_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: and a8, a11, a9 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB19_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB19_1 +; XTENSA-ATOMIC-NEXT: .LBB19_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw and ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_nand_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_nand_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_nand_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a13, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, -1 +; XTENSA-ATOMIC-NEXT: movi a10, -2 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: movi a12, 1 +; XTENSA-ATOMIC-NEXT: j .LBB20_2 +; XTENSA-ATOMIC-NEXT: .LBB20_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a14, 1, .LBB20_4 +; XTENSA-ATOMIC-NEXT: .LBB20_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a13, a9 +; XTENSA-ATOMIC-NEXT: or a8, a8, a10 +; XTENSA-ATOMIC-NEXT: wsr a13, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a14, a12, a12 +; XTENSA-ATOMIC-NEXT: beq a8, a13, .LBB20_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a14, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB20_1 +; XTENSA-ATOMIC-NEXT: .LBB20_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw nand ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_or_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_or_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI21_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_or_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB21_2 +; XTENSA-ATOMIC-NEXT: .LBB21_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB21_4 +; XTENSA-ATOMIC-NEXT: .LBB21_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a9 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB21_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB21_1 +; XTENSA-ATOMIC-NEXT: .LBB21_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw or ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_xor_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_xor_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI22_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_xor_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB22_2 +; XTENSA-ATOMIC-NEXT: .LBB22_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB22_4 +; XTENSA-ATOMIC-NEXT: .LBB22_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: xor a8, a11, a9 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB22_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB22_1 +; XTENSA-ATOMIC-NEXT: .LBB22_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw xor ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_max_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_max_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a5, 1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI23_0 +; XTENSA-NEXT: j .LBB23_2 +; XTENSA-NEXT: .LBB23_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB23_4 +; XTENSA-NEXT: .LBB23_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: bge a5, a2, .LBB23_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB23_1 +; XTENSA-NEXT: .LBB23_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_max_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB23_2 +; XTENSA-ATOMIC-NEXT: .LBB23_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB23_6 +; XTENSA-ATOMIC-NEXT: .LBB23_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a9, a9 +; XTENSA-ATOMIC-NEXT: bge a9, a11, .LBB23_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB23_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB23_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB23_1 +; XTENSA-ATOMIC-NEXT: .LBB23_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw max ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_min_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_min_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a12, a2, 0 +; XTENSA-NEXT: movi a6, 1 +; XTENSA-NEXT: movi a5, 2 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI24_0 +; XTENSA-NEXT: j .LBB24_2 +; XTENSA-NEXT: .LBB24_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a12, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB24_4 +; XTENSA-NEXT: .LBB24_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a12, a1, 0 +; XTENSA-NEXT: blt a12, a5, .LBB24_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-NEXT: or a12, a6, a6 +; XTENSA-NEXT: j .LBB24_1 +; XTENSA-NEXT: .LBB24_4: # %atomicrmw.end +; XTENSA-NEXT: or a2, a12, a12 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_min_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a12, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 2 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: or a8, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB24_2 +; XTENSA-ATOMIC-NEXT: .LBB24_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a13, 1, .LBB24_6 +; XTENSA-ATOMIC-NEXT: .LBB24_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: blt a12, a10, .LBB24_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a9, a9 +; XTENSA-ATOMIC-NEXT: .LBB24_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a12, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a13, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a12, .LBB24_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB24_1 +; XTENSA-ATOMIC-NEXT: .LBB24_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw min ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_umax_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_umax_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a6, a2, a2 +; XTENSA-NEXT: l32i a2, a6, 0 +; XTENSA-NEXT: movi a5, 1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI25_0 +; XTENSA-NEXT: j .LBB25_2 +; XTENSA-NEXT: .LBB25_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a6, a6 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB25_4 +; XTENSA-NEXT: .LBB25_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a2, a1, 0 +; XTENSA-NEXT: or a12, a5, a5 +; XTENSA-NEXT: bgeu a5, a2, .LBB25_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-NEXT: or a12, a2, a2 +; XTENSA-NEXT: j .LBB25_1 +; XTENSA-NEXT: .LBB25_4: # %atomicrmw.end +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_umax_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB25_2 +; XTENSA-ATOMIC-NEXT: .LBB25_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB25_6 +; XTENSA-ATOMIC-NEXT: .LBB25_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a9, a9 +; XTENSA-ATOMIC-NEXT: bgeu a9, a11, .LBB25_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a11, a11 +; XTENSA-ATOMIC-NEXT: .LBB25_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB25_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB25_1 +; XTENSA-ATOMIC-NEXT: .LBB25_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw umax ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_umin_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_umin_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a12, a2, 0 +; XTENSA-NEXT: movi a6, 1 +; XTENSA-NEXT: movi a5, 2 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI26_0 +; XTENSA-NEXT: j .LBB26_2 +; XTENSA-NEXT: .LBB26_1: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i a12, a1, 0 +; XTENSA-NEXT: bnez a10, .LBB26_4 +; XTENSA-NEXT: .LBB26_2: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a12, a1, 0 +; XTENSA-NEXT: bltu a12, a5, .LBB26_1 +; XTENSA-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-NEXT: or a12, a6, a6 +; XTENSA-NEXT: j .LBB26_1 +; XTENSA-NEXT: .LBB26_4: # %atomicrmw.end +; XTENSA-NEXT: or a2, a12, a12 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_umin_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a12, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 2 +; XTENSA-ATOMIC-NEXT: movi a11, 0 +; XTENSA-ATOMIC-NEXT: or a8, a12, a12 +; XTENSA-ATOMIC-NEXT: j .LBB26_2 +; XTENSA-ATOMIC-NEXT: .LBB26_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a13, 1, .LBB26_6 +; XTENSA-ATOMIC-NEXT: .LBB26_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: bltu a12, a10, .LBB26_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a9, a9 +; XTENSA-ATOMIC-NEXT: .LBB26_4: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a12, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a13, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a12, .LBB26_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a13, a11, a11 +; XTENSA-ATOMIC-NEXT: j .LBB26_1 +; XTENSA-ATOMIC-NEXT: .LBB26_6: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw umin ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_xchg_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a11, 1 +; XTENSA-NEXT: movi a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI27_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_xchg_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0 +; XTENSA-ATOMIC-NEXT: movi a9, 1 +; XTENSA-ATOMIC-NEXT: movi a10, 0 +; XTENSA-ATOMIC-NEXT: j .LBB27_2 +; XTENSA-ATOMIC-NEXT: .LBB27_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a11, a8, a8 +; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB27_4 +; XTENSA-ATOMIC-NEXT: .LBB27_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: wsr a11, scompare1 +; XTENSA-ATOMIC-NEXT: or a8, a9, a9 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a12, a9, a9 +; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB27_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a12, a10, a10 +; XTENSA-ATOMIC-NEXT: j .LBB27_1 +; XTENSA-ATOMIC-NEXT: .LBB27_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw xchg ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define float @rmw32_fadd_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fadd_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a10, a2, 0 +; XTENSA-NEXT: l32r a6, .LCPI28_1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI28_2 +; XTENSA-NEXT: .LBB28_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a10, a1, 0 +; XTENSA-NEXT: l32r a11, .LCPI28_0 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: or a12, a10, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: l32i a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB28_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_fadd_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI28_1 +; XTENSA-ATOMIC-NEXT: movi a5, 0 +; XTENSA-ATOMIC-NEXT: movi a4, 1 +; XTENSA-ATOMIC-NEXT: j .LBB28_2 +; XTENSA-ATOMIC-NEXT: .LBB28_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a10, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB28_4 +; XTENSA-ATOMIC-NEXT: .LBB28_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI28_0 +; XTENSA-ATOMIC-NEXT: or a10, a7, a7 +; XTENSA-ATOMIC-NEXT: callx8 a6 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0 +; XTENSA-ATOMIC-NEXT: or a8, a4, a4 +; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB28_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a5, a5 +; XTENSA-ATOMIC-NEXT: j .LBB28_1 +; XTENSA-ATOMIC-NEXT: .LBB28_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a10, a10 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw fadd ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define float @rmw32_fsub_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fsub_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a10, a2, 0 +; XTENSA-NEXT: l32r a6, .LCPI29_1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI29_2 +; XTENSA-NEXT: .LBB29_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a10, a1, 0 +; XTENSA-NEXT: l32r a11, .LCPI29_0 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: or a12, a10, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: l32i a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB29_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_fsub_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI29_1 +; XTENSA-ATOMIC-NEXT: movi a5, 0 +; XTENSA-ATOMIC-NEXT: movi a4, 1 +; XTENSA-ATOMIC-NEXT: j .LBB29_2 +; XTENSA-ATOMIC-NEXT: .LBB29_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a10, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB29_4 +; XTENSA-ATOMIC-NEXT: .LBB29_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI29_0 +; XTENSA-ATOMIC-NEXT: or a10, a7, a7 +; XTENSA-ATOMIC-NEXT: callx8 a6 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0 +; XTENSA-ATOMIC-NEXT: or a8, a4, a4 +; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB29_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a5, a5 +; XTENSA-ATOMIC-NEXT: j .LBB29_1 +; XTENSA-ATOMIC-NEXT: .LBB29_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a10, a10 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw fsub ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define float @rmw32_fmin_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fmin_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a10, a2, 0 +; XTENSA-NEXT: l32r a6, .LCPI30_1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI30_2 +; XTENSA-NEXT: .LBB30_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a10, a1, 0 +; XTENSA-NEXT: l32r a11, .LCPI30_0 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: or a12, a10, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: l32i a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB30_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_fmin_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI30_1 +; XTENSA-ATOMIC-NEXT: movi a5, 0 +; XTENSA-ATOMIC-NEXT: movi a4, 1 +; XTENSA-ATOMIC-NEXT: j .LBB30_2 +; XTENSA-ATOMIC-NEXT: .LBB30_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a10, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB30_4 +; XTENSA-ATOMIC-NEXT: .LBB30_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI30_0 +; XTENSA-ATOMIC-NEXT: or a10, a7, a7 +; XTENSA-ATOMIC-NEXT: callx8 a6 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0 +; XTENSA-ATOMIC-NEXT: or a8, a4, a4 +; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB30_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a5, a5 +; XTENSA-ATOMIC-NEXT: j .LBB30_1 +; XTENSA-ATOMIC-NEXT: .LBB30_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a10, a10 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw fmin ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define float @rmw32_fmax_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fmax_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: l32i a10, a2, 0 +; XTENSA-NEXT: l32r a6, .LCPI31_1 +; XTENSA-NEXT: movi a7, 5 +; XTENSA-NEXT: l32r a5, .LCPI31_2 +; XTENSA-NEXT: .LBB31_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i a10, a1, 0 +; XTENSA-NEXT: l32r a11, .LCPI31_0 +; XTENSA-NEXT: callx8 a6 +; XTENSA-NEXT: or a12, a10, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: or a13, a7, a7 +; XTENSA-NEXT: or a14, a7, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: l32i a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB31_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: or a2, a10, a10 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: rmw32_fmax_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI31_1 +; XTENSA-ATOMIC-NEXT: movi a5, 0 +; XTENSA-ATOMIC-NEXT: movi a4, 1 +; XTENSA-ATOMIC-NEXT: j .LBB31_2 +; XTENSA-ATOMIC-NEXT: .LBB31_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a7, a10, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB31_4 +; XTENSA-ATOMIC-NEXT: .LBB31_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI31_0 +; XTENSA-ATOMIC-NEXT: or a10, a7, a7 +; XTENSA-ATOMIC-NEXT: callx8 a6 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0 +; XTENSA-ATOMIC-NEXT: or a8, a4, a4 +; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB31_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1 +; XTENSA-ATOMIC-NEXT: or a8, a5, a5 +; XTENSA-ATOMIC-NEXT: j .LBB31_1 +; XTENSA-ATOMIC-NEXT: .LBB31_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a10, a10 +; XTENSA-ATOMIC-NEXT: retw + %v = atomicrmw fmax ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define i32 @cmpxchg32_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg32_monotonic: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a13, 0 +; XTENSA-NEXT: s32i a13, a1, 0 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: movi a12, 1 +; XTENSA-NEXT: l32r a8, .LCPI32_0 +; XTENSA-NEXT: or a14, a13, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: cmpxchg32_monotonic: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi a8, 1 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: wsr a9, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = cmpxchg ptr %p, i32 0, i32 1 monotonic monotonic + %res.0 = extractvalue { i32, i1 } %res, 0 + ret i32 %res.0 +} + +define i32 @cmpxchg32_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg32_seq_cst: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 48 +; XTENSA-NEXT: or a10, a2, a2 +; XTENSA-NEXT: movi a8, 0 +; XTENSA-NEXT: s32i a8, a1, 0 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: movi a12, 1 +; XTENSA-NEXT: movi a13, 5 +; XTENSA-NEXT: l32r a8, .LCPI33_0 +; XTENSA-NEXT: or a14, a13, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l32i a2, a1, 0 +; XTENSA-NEXT: retw +; +; XTENSA-ATOMIC-LABEL: cmpxchg32_seq_cst: +; XTENSA-ATOMIC: # %bb.0: +; XTENSA-ATOMIC-NEXT: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi a8, 1 +; XTENSA-ATOMIC-NEXT: movi a9, 0 +; XTENSA-ATOMIC-NEXT: wsr a9, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: or a2, a8, a8 +; XTENSA-ATOMIC-NEXT: retw + %res = cmpxchg ptr %p, i32 0, i32 1 seq_cst seq_cst + %res.0 = extractvalue { i32, i1 } %res, 0 + ret i32 %res.0 +} |