aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/RISCV
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/RISCV')
-rw-r--r--llvm/test/CodeGen/RISCV/cmov-branch-opt.ll5
-rw-r--r--llvm/test/CodeGen/RISCV/features-info.ll1
-rw-r--r--llvm/test/CodeGen/RISCV/half-arith.ll300
-rw-r--r--llvm/test/CodeGen/RISCV/xqcicli.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/xqcics.ll42
5 files changed, 154 insertions, 230 deletions
diff --git a/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll b/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll
index edec1d0..1957019 100644
--- a/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll
+++ b/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll
@@ -201,8 +201,9 @@ define signext i32 @test4(i32 signext %x, i32 signext %y, i32 signext %z) {
;
; RV32IXQCI-LABEL: test4:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: li a0, 0
-; RV32IXQCI-NEXT: qc.lieqi a0, a2, 0, 3
+; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: li a1, 3
+; RV32IXQCI-NEXT: qc.selectieqi a0, 0, a1, 0
; RV32IXQCI-NEXT: ret
%c = icmp eq i32 %z, 0
%a = select i1 %c, i32 3, i32 0
diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll
index 1a7a72d..693a40d 100644
--- a/llvm/test/CodeGen/RISCV/features-info.ll
+++ b/llvm/test/CodeGen/RISCV/features-info.ll
@@ -142,6 +142,7 @@
; CHECK-NEXT: shvstvecd - 'Shvstvecd' (vstvec supports Direct mode).
; CHECK-NEXT: shxadd-load-fusion - Enable SH(1|2|3)ADD(.UW) + load macrofusion.
; CHECK-NEXT: sifive7 - SiFive 7-Series processors.
+; CHECK-NEXT: single-element-vec-fp64 - Certain vector FP64 operations produce a single result element per cycle.
; CHECK-NEXT: smaia - 'Smaia' (Advanced Interrupt Architecture Machine Level).
; CHECK-NEXT: smcdeleg - 'Smcdeleg' (Counter Delegation Machine Level).
; CHECK-NEXT: smcntrpmf - 'Smcntrpmf' (Cycle and Instret Privilege Mode Filtering).
diff --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll
index 2ebb6e9..d089e36 100644
--- a/llvm/test/CodeGen/RISCV/half-arith.ll
+++ b/llvm/test/CodeGen/RISCV/half-arith.ll
@@ -514,6 +514,7 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s1, a1, -1
; RV32I-NEXT: and a0, a0, s1
@@ -521,13 +522,12 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s2, a0, a1
; RV32I-NEXT: and a0, a0, s1
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: lui a0, 524288
-; RV32I-NEXT: xor a0, s0, a0
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s1
+; RV32I-NEXT: and a0, s2, s1
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s0
@@ -536,6 +536,7 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
@@ -545,6 +546,7 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addi s1, a1, -1
; RV64I-NEXT: and a0, a0, s1
@@ -552,13 +554,12 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s2, a0, a1
; RV64I-NEXT: and a0, a0, s1
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: lui a0, 524288
-; RV64I-NEXT: xor a0, s0, a0
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s1
+; RV64I-NEXT: and a0, s2, s1
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s0
@@ -567,6 +568,7 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
@@ -638,11 +640,7 @@ define half @fsgnjn_h(half %a, half %b) nounwind {
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui a1, 524288
-; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2
+; RV32I-NEXT: not a0, a0
; RV32I-NEXT: lui a1, 1048568
; RV32I-NEXT: slli s1, s1, 17
; RV32I-NEXT: and a0, a0, a1
@@ -677,11 +675,7 @@ define half @fsgnjn_h(half %a, half %b) nounwind {
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2
+; RV64I-NEXT: not a0, a0
; RV64I-NEXT: lui a1, 1048568
; RV64I-NEXT: slli s1, s1, 49
; RV64I-NEXT: and a0, a0, a1
@@ -804,15 +798,14 @@ define half @fabs_h(half %a, half %b) nounwind {
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
+; RV32I-NEXT: slli s0, a0, 17
+; RV32I-NEXT: srli s0, s0, 17
; RV32I-NEXT: and a0, a0, s2
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: slli a0, a0, 1
-; RV32I-NEXT: srli a0, a0, 1
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s2
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: mv a1, s0
+; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -841,15 +834,14 @@ define half @fabs_h(half %a, half %b) nounwind {
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
+; RV64I-NEXT: slli s0, a0, 49
+; RV64I-NEXT: srli s0, s0, 49
; RV64I-NEXT: and a0, a0, s2
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: slli a0, a0, 33
-; RV64I-NEXT: srli a0, a0, 33
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s2
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: mv a1, s0
+; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -1217,25 +1209,21 @@ define half @fmsub_h(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a0, 16
-; RV32I-NEXT: addi s3, a0, -1
-; RV32I-NEXT: and a0, a2, s3
+; RV32I-NEXT: addi s2, a0, -1
+; RV32I-NEXT: and a0, a2, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui a1, 524288
-; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s3, a0, a1
+; RV32I-NEXT: and a0, s1, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s0, s3
+; RV32I-NEXT: and a0, s0, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: and a0, s2, s3
+; RV32I-NEXT: and a0, s3, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a0, s1
@@ -1261,25 +1249,21 @@ define half @fmsub_h(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a0, 16
-; RV64I-NEXT: addi s3, a0, -1
-; RV64I-NEXT: and a0, a2, s3
+; RV64I-NEXT: addi s2, a0, -1
+; RV64I-NEXT: and a0, a2, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s3, a0, a1
+; RV64I-NEXT: and a0, s1, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s0, s3
+; RV64I-NEXT: and a0, s0, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: and a0, s2, s3
+; RV64I-NEXT: and a0, s3, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: mv a0, s1
@@ -1355,43 +1339,34 @@ define half @fnmadd_h(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s1, a2
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: lui s3, 16
-; RV32I-NEXT: addi s3, s3, -1
+; RV32I-NEXT: mv s0, a2
+; RV32I-NEXT: mv s1, a1
+; RV32I-NEXT: lui a1, 16
+; RV32I-NEXT: addi s3, a1, -1
; RV32I-NEXT: and a0, a0, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: and a0, s0, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui s4, 524288
-; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s2, a0
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s2, s2, a1
+; RV32I-NEXT: xor s4, a0, a1
; RV32I-NEXT: and a0, s1, s3
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s2, s3
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: and a0, s4, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
-; RV32I-NEXT: mv a0, s2
+; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: call fmaf
; RV32I-NEXT: call __truncsfhf2
@@ -1413,43 +1388,34 @@ define half @fnmadd_h(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s4, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s1, a2
-; RV64I-NEXT: mv s0, a1
-; RV64I-NEXT: lui s3, 16
-; RV64I-NEXT: addi s3, s3, -1
+; RV64I-NEXT: mv s0, a2
+; RV64I-NEXT: mv s1, a1
+; RV64I-NEXT: lui a1, 16
+; RV64I-NEXT: addi s3, a1, -1
; RV64I-NEXT: and a0, a0, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: and a0, s0, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui s4, 524288
-; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s2, a0
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s2, s2, a1
+; RV64I-NEXT: xor s4, a0, a1
; RV64I-NEXT: and a0, s1, s3
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s2, s3
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: and a0, s4, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
-; RV64I-NEXT: mv a0, s2
+; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
; RV64I-NEXT: call fmaf
; RV64I-NEXT: call __truncsfhf2
@@ -1535,44 +1501,35 @@ define half @fnmadd_h_2(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s1, a2
-; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: lui s3, 16
-; RV32I-NEXT: addi s3, s3, -1
+; RV32I-NEXT: mv s0, a2
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: lui a0, 16
+; RV32I-NEXT: addi s3, a0, -1
; RV32I-NEXT: and a0, a1, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: and a0, s0, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui s4, 524288
-; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s2, a0
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s2, s2, a1
+; RV32I-NEXT: xor s4, a0, a1
; RV32I-NEXT: and a0, s1, s3
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s2, s3
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: and a0, s4, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: mv a1, s2
+; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: call fmaf
; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -1593,44 +1550,35 @@ define half @fnmadd_h_2(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s4, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s1, a2
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: lui s3, 16
-; RV64I-NEXT: addi s3, s3, -1
+; RV64I-NEXT: mv s0, a2
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: lui a0, 16
+; RV64I-NEXT: addi s3, a0, -1
; RV64I-NEXT: and a0, a1, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: and a0, s0, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui s4, 524288
-; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s2, a0
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s2, s2, a1
+; RV64I-NEXT: xor s4, a0, a1
; RV64I-NEXT: and a0, s1, s3
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s2, s3
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: and a0, s4, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: mv a1, s2
+; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: call fmaf
; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
@@ -1960,25 +1908,21 @@ define half @fnmsub_h(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: mv s0, a2
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: lui a1, 16
-; RV32I-NEXT: addi s3, a1, -1
-; RV32I-NEXT: and a0, a0, s3
+; RV32I-NEXT: addi s2, a1, -1
+; RV32I-NEXT: and a0, a0, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui a1, 524288
-; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s3, a0, a1
+; RV32I-NEXT: and a0, s1, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s0, s3
+; RV32I-NEXT: and a0, s0, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: and a0, s2, s3
+; RV32I-NEXT: and a0, s3, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: mv a2, s0
@@ -2003,25 +1947,21 @@ define half @fnmsub_h(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: lui a1, 16
-; RV64I-NEXT: addi s3, a1, -1
-; RV64I-NEXT: and a0, a0, s3
+; RV64I-NEXT: addi s2, a1, -1
+; RV64I-NEXT: and a0, a0, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s3, a0, a1
+; RV64I-NEXT: and a0, s1, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s0, s3
+; RV64I-NEXT: and a0, s0, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: and a0, s2, s3
+; RV64I-NEXT: and a0, s3, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: mv a2, s0
@@ -2096,25 +2036,21 @@ define half @fnmsub_h_2(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: mv s0, a2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a0, 16
-; RV32I-NEXT: addi s3, a0, -1
-; RV32I-NEXT: and a0, a1, s3
+; RV32I-NEXT: addi s2, a0, -1
+; RV32I-NEXT: and a0, a1, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui a1, 524288
-; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s3, a0, a1
+; RV32I-NEXT: and a0, s1, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s0, s3
+; RV32I-NEXT: and a0, s0, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: and a0, s2, s3
+; RV32I-NEXT: and a0, s3, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
@@ -2140,25 +2076,21 @@ define half @fnmsub_h_2(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a0, 16
-; RV64I-NEXT: addi s3, a0, -1
-; RV64I-NEXT: and a0, a1, s3
+; RV64I-NEXT: addi s2, a0, -1
+; RV64I-NEXT: and a0, a1, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s3, a0, a1
+; RV64I-NEXT: and a0, s1, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s0, s3
+; RV64I-NEXT: and a0, s0, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: and a0, s2, s3
+; RV64I-NEXT: and a0, s3, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
@@ -2519,12 +2451,8 @@ define half @fnmadd_h_contract(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: call __mulsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui a1, 524288
-; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s1, a0, a1
; RV32I-NEXT: and a0, s0, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
@@ -2580,12 +2508,8 @@ define half @fnmadd_h_contract(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: call __mulsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s1, a0, a1
; RV64I-NEXT: and a0, s0, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
diff --git a/llvm/test/CodeGen/RISCV/xqcicli.ll b/llvm/test/CodeGen/RISCV/xqcicli.ll
index 8d4caa1..cdb1947 100644
--- a/llvm/test/CodeGen/RISCV/xqcicli.ll
+++ b/llvm/test/CodeGen/RISCV/xqcicli.ll
@@ -23,7 +23,8 @@ define i32 @select_cc_example_eq(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_eq:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.lieq a0, a1, a2, 11
+; RV32IXQCI-NEXT: qc.selectine a1, a2, a0, 11
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 %b, %x
@@ -47,7 +48,8 @@ define i32 @select_cc_example_ne(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_ne:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.line a0, a1, a2, 11
+; RV32IXQCI-NEXT: qc.selectieq a1, a2, a0, 11
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 %b, %x
@@ -167,7 +169,8 @@ define i32 @select_cc_example_eq_c(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_eq_c:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.line a0, a1, a2, 11
+; RV32IXQCI-NEXT: qc.selectieq a1, a2, a0, 11
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 %b, %x
@@ -191,7 +194,8 @@ define i32 @select_cc_example_ne_c(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_ne_c:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.lieq a0, a1, a2, 11
+; RV32IXQCI-NEXT: qc.selectine a1, a2, a0, 11
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 %b, %x
@@ -312,7 +316,8 @@ define i32 @select_cc_example_eqi(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_eqi:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.lieqi a0, a1, 12, 11
+; RV32IXQCI-NEXT: qc.selectinei a1, 12, a0, 11
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 %b, 12
@@ -337,7 +342,8 @@ define i32 @select_cc_example_nei(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_nei:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.linei a0, a1, 12, 11
+; RV32IXQCI-NEXT: qc.selectieqi a1, 12, a0, 11
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 %b, 12
@@ -462,7 +468,8 @@ define i32 @select_cc_example_eqi_c1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_eqi_c1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.lieqi a0, a1, 12, 11
+; RV32IXQCI-NEXT: qc.selectinei a1, 12, a0, 11
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 12, %b
@@ -487,7 +494,8 @@ define i32 @select_cc_example_nei_c1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_nei_c1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.linei a0, a1, 12, 11
+; RV32IXQCI-NEXT: qc.selectieqi a1, 12, a0, 11
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 12, %b
@@ -612,7 +620,8 @@ define i32 @select_cc_example_eqi_c2(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_eqi_c2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.linei a0, a1, 12, 11
+; RV32IXQCI-NEXT: qc.selectieqi a1, 12, a0, 11
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 12, %b
@@ -637,7 +646,8 @@ define i32 @select_cc_example_nei_c2(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_nei_c2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.lieqi a0, a1, 12, 11
+; RV32IXQCI-NEXT: qc.selectinei a1, 12, a0, 11
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 12, %b
@@ -762,7 +772,8 @@ define i32 @select_cc_example_eqi_c3(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_eqi_c3:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.linei a0, a1, 12, 11
+; RV32IXQCI-NEXT: qc.selectieqi a1, 12, a0, 11
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 %b, 12
@@ -787,7 +798,8 @@ define i32 @select_cc_example_nei_c3(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_nei_c3:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.lieqi a0, a1, 12, 11
+; RV32IXQCI-NEXT: qc.selectinei a1, 12, a0, 11
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 %b, 12
diff --git a/llvm/test/CodeGen/RISCV/xqcics.ll b/llvm/test/CodeGen/RISCV/xqcics.ll
index c0839c9..7656a0c 100644
--- a/llvm/test/CodeGen/RISCV/xqcics.ll
+++ b/llvm/test/CodeGen/RISCV/xqcics.ll
@@ -270,8 +270,7 @@ define i32 @select_cc_example_eqi(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_eqi:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.line a2, a0, a1, 11
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.selectieq a0, a1, a2, 11
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 %a, %b
@@ -301,8 +300,7 @@ define i32 @select_cc_example_eqi_c(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_eqi_c:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.lieq a2, a0, a1, 11
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.selectine a0, a1, a2, 11
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 %a, %b
@@ -332,8 +330,7 @@ define i32 @select_cc_example_nei(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_nei:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.lieq a2, a0, a1, 11
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.selectine a0, a1, a2, 11
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 %a, %b
@@ -363,8 +360,7 @@ define i32 @select_cc_example_nei_c(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_nei_c:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.line a2, a0, a1, 11
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.selectieq a0, a1, a2, 11
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 %a, %b
@@ -395,8 +391,7 @@ define i32 @select_cc_example_ieqi(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_ieqi:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.linei a2, a0, 12, 11
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.selectieqi a0, 12, a2, 11
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 %a, 12
@@ -427,8 +422,7 @@ define i32 @select_cc_example_ieqi_c1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_ieqi_c1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.linei a2, a0, 12, 11
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.selectieqi a0, 12, a2, 11
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 12, %a
@@ -459,8 +453,7 @@ define i32 @select_cc_example_ieqi_c2(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_ieqi_c2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.lieqi a2, a0, 12, 11
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.selectinei a0, 12, a2, 11
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 %a, 12
@@ -491,8 +484,7 @@ define i32 @select_cc_example_ieqi_c3(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_ieqi_c3:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.lieqi a2, a0, 12, 11
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.selectinei a0, 12, a2, 11
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 12, %a
@@ -523,8 +515,7 @@ define i32 @select_cc_example_inei(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_inei:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.lieqi a2, a0, 12, 11
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.selectinei a0, 12, a2, 11
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 %a, 12
@@ -555,8 +546,7 @@ define i32 @select_cc_example_inei_c1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_inei_c1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.lieqi a2, a0, 12, 11
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.selectinei a0, 12, a2, 11
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 12, %a
@@ -587,8 +577,7 @@ define i32 @select_cc_example_inei_c2(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_inei_c2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.linei a2, a0, 12, 11
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.selectieqi a0, 12, a2, 11
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 %a, 12
@@ -619,8 +608,7 @@ define i32 @select_cc_example_inei_c3(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_inei_c3:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.linei a2, a0, 12, 11
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.selectieqi a0, 12, a2, 11
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 12, %a
@@ -712,8 +700,7 @@ define i32 @select_cc_example_eq1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_eq1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.line a2, a1, a0, 11
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.selectieq a0, a1, a2, 11
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 %b, %a
@@ -743,8 +730,7 @@ define i32 @select_cc_example_ne1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCI-LABEL: select_cc_example_ne1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.lieq a2, a1, a0, 11
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.selectine a0, a1, a2, 11
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 %b, %a