diff options
author | Craig Topper <craig.topper@sifive.com> | 2024-02-21 23:48:19 -0800 |
---|---|---|
committer | Craig Topper <craig.topper@sifive.com> | 2024-02-22 08:38:42 -0800 |
commit | c9afd1ad783a67210bed4fd2f7108477fc986e15 (patch) | |
tree | 6ed51f12e2415cee7b9ee398aec9878cff45dad5 /llvm | |
parent | f67ef1a8d9841718ce08a69d935ac8fd8e6112f9 (diff) | |
download | llvm-c9afd1ad783a67210bed4fd2f7108477fc986e15.zip llvm-c9afd1ad783a67210bed4fd2f7108477fc986e15.tar.gz llvm-c9afd1ad783a67210bed4fd2f7108477fc986e15.tar.bz2 |
[RISCV] Add test case showing missed opportunity to form sextload when sext and zext nneg are both present. NFC
Diffstat (limited to 'llvm')
-rw-r--r-- | llvm/test/CodeGen/RISCV/sext-zext-trunc.ll | 84 |
1 files changed, 84 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll index a2a953c..09516d9 100644 --- a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll +++ b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll @@ -871,3 +871,87 @@ define void @zext_nneg_dominating_icmp_i32_zeroext(i16 signext %0) { 5: ret void } + +; The load is used extended and non-extended in the successor basic block. The +; signed compare will cause the non-extended value to exported out of the first +; basic block using a sext to XLen. We need to CSE the zext nneg with the sext +; so that we can form a sextload. +define void @load_zext_nneg_sext_cse(ptr %p) nounwind { +; RV32I-LABEL: load_zext_nneg_sext_cse: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: lhu s0, 0(a0) +; RV32I-NEXT: slli a0, s0, 16 +; RV32I-NEXT: bltz a0, .LBB50_2 +; RV32I-NEXT: # %bb.1: # %bb1 +; RV32I-NEXT: srai a0, a0, 16 +; RV32I-NEXT: call bar_i16 +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: tail bar_i32 +; RV32I-NEXT: .LBB50_2: # %bb2 +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: load_zext_nneg_sext_cse: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: lhu s0, 0(a0) +; RV64I-NEXT: slli a0, s0, 48 +; RV64I-NEXT: bltz a0, .LBB50_2 +; RV64I-NEXT: # %bb.1: # %bb1 +; RV64I-NEXT: srai a0, a0, 48 +; RV64I-NEXT: call bar_i16 +; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: tail bar_i32 +; RV64I-NEXT: .LBB50_2: # %bb2 +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: load_zext_nneg_sext_cse: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: addi sp, sp, -16 +; RV64ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64ZBB-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64ZBB-NEXT: lhu s0, 0(a0) +; RV64ZBB-NEXT: sext.h a0, s0 +; RV64ZBB-NEXT: bltz a0, .LBB50_2 +; RV64ZBB-NEXT: # %bb.1: # %bb1 +; RV64ZBB-NEXT: call bar_i16 +; RV64ZBB-NEXT: mv a0, s0 +; RV64ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64ZBB-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64ZBB-NEXT: addi sp, sp, 16 +; RV64ZBB-NEXT: tail bar_i32 +; RV64ZBB-NEXT: .LBB50_2: # %bb2 +; RV64ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64ZBB-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64ZBB-NEXT: addi sp, sp, 16 +; RV64ZBB-NEXT: ret + %load = load i16, ptr %p + %zext = zext nneg i16 %load to i32 + %cmp = icmp sgt i16 %load, -1 + br i1 %cmp, label %bb1, label %bb2 + +bb1: + tail call void @bar_i16(i16 signext %load) + tail call void @bar_i32(i32 signext %zext) + br label %bb2 + +bb2: + ret void +} +declare void @bar_i16(i16); |