; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 ; RUN: llc -mtriple=riscv32 -target-abi=ilp32f -mattr=+f,+zbs < %s | FileCheck %s -check-prefix=RV32I ; RUN: llc -mtriple=riscv64 -target-abi=lp64f -mattr=+f,+zbs < %s | FileCheck %s -check-prefix=RV64I ; RUN: llc -mtriple=riscv64 -target-abi=lp64f -mattr=+f,+zbs,+xventanacondops < %s | FileCheck %s -check-prefix=RV64XVENTANACONDOPS ; RUN: llc -mtriple=riscv64 -target-abi=lp64f -mattr=+f,+zbs,+xtheadcondmov < %s | FileCheck %s -check-prefix=RV64XTHEADCONDMOV ; RUN: llc -mtriple=riscv32 -target-abi=ilp32f -mattr=+f,+zbs,+experimental-zicond < %s | FileCheck %s -check-prefix=RV32ZICOND ; RUN: llc -mtriple=riscv64 -target-abi=lp64f -mattr=+f,+zbs,+experimental-zicond < %s | FileCheck %s -check-prefix=RV64ZICOND define i64 @zero1(i64 %rs1, i1 zeroext %rc) { ; RV32I-LABEL: zero1: ; RV32I: # %bb.0: ; RV32I-NEXT: neg a2, a2 ; RV32I-NEXT: and a0, a2, a0 ; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero1: ; RV64I: # %bb.0: ; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero1: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero1: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mveqz a0, zero, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero1: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.eqz a0, a0, a2 ; RV32ZICOND-NEXT: czero.eqz a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero1: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.eqz a0, a0, a1 ; RV64ZICOND-NEXT: ret %sel = select i1 %rc, i64 %rs1, i64 0 ret i64 %sel } define i64 @zero2(i64 %rs1, i1 zeroext %rc) { ; RV32I-LABEL: zero2: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a2, a2, -1 ; RV32I-NEXT: and a0, a2, a0 ; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero2: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, a1, -1 ; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero2: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero2: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, zero, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero2: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.nez a0, a0, a2 ; RV32ZICOND-NEXT: czero.nez a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero2: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.nez a0, a0, a1 ; RV64ZICOND-NEXT: ret %sel = select i1 %rc, i64 0, i64 %rs1 ret i64 %sel } define i64 @zero_singlebit1(i64 %rs1, i64 %rs2) { ; RV32I-LABEL: zero_singlebit1: ; RV32I: # %bb.0: ; RV32I-NEXT: bexti a2, a2, 12 ; RV32I-NEXT: addi a2, a2, -1 ; RV32I-NEXT: and a0, a2, a0 ; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero_singlebit1: ; RV64I: # %bb.0: ; RV64I-NEXT: bexti a1, a1, 12 ; RV64I-NEXT: addi a1, a1, -1 ; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero_singlebit1: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: bexti a1, a1, 12 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero_singlebit1: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: lui a2, 1 ; RV64XTHEADCONDMOV-NEXT: and a1, a1, a2 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, zero, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero_singlebit1: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: bexti a2, a2, 12 ; RV32ZICOND-NEXT: czero.nez a0, a0, a2 ; RV32ZICOND-NEXT: czero.nez a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero_singlebit1: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: bexti a1, a1, 12 ; RV64ZICOND-NEXT: czero.nez a0, a0, a1 ; RV64ZICOND-NEXT: ret %and = and i64 %rs2, 4096 %rc = icmp eq i64 %and, 0 %sel = select i1 %rc, i64 %rs1, i64 0 ret i64 %sel } define i64 @zero_singlebit2(i64 %rs1, i64 %rs2) { ; RV32I-LABEL: zero_singlebit2: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a2, a2, 19 ; RV32I-NEXT: srai a2, a2, 31 ; RV32I-NEXT: and a0, a2, a0 ; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero_singlebit2: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a1, a1, 51 ; RV64I-NEXT: srai a1, a1, 63 ; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero_singlebit2: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: bexti a1, a1, 12 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero_singlebit2: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: slli a1, a1, 51 ; RV64XTHEADCONDMOV-NEXT: srai a1, a1, 63 ; RV64XTHEADCONDMOV-NEXT: and a0, a1, a0 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero_singlebit2: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: bexti a2, a2, 12 ; RV32ZICOND-NEXT: czero.eqz a0, a0, a2 ; RV32ZICOND-NEXT: czero.eqz a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero_singlebit2: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: bexti a1, a1, 12 ; RV64ZICOND-NEXT: czero.eqz a0, a0, a1 ; RV64ZICOND-NEXT: ret %and = and i64 %rs2, 4096 %rc = icmp eq i64 %and, 0 %sel = select i1 %rc, i64 0, i64 %rs1 ret i64 %sel } define i64 @add1(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: add1: ; RV32I: # %bb.0: ; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: and a4, a0, a4 ; RV32I-NEXT: add a2, a2, a4 ; RV32I-NEXT: and a0, a0, a3 ; RV32I-NEXT: add a0, a1, a0 ; RV32I-NEXT: sltu a1, a0, a1 ; RV32I-NEXT: add a1, a2, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: add1: ; RV64I: # %bb.0: ; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: add1: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: add a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: add1: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, zero, a0 ; RV64XTHEADCONDMOV-NEXT: add a0, a1, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: add1: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.eqz a4, a4, a0 ; RV32ZICOND-NEXT: add a2, a2, a4 ; RV32ZICOND-NEXT: czero.eqz a0, a3, a0 ; RV32ZICOND-NEXT: add a0, a1, a0 ; RV32ZICOND-NEXT: sltu a1, a0, a1 ; RV32ZICOND-NEXT: add a1, a2, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: add1: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.eqz a0, a2, a0 ; RV64ZICOND-NEXT: add a0, a1, a0 ; RV64ZICOND-NEXT: ret %add = add i64 %rs1, %rs2 %sel = select i1 %rc, i64 %add, i64 %rs1 ret i64 %sel } define i64 @add2(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: add2: ; RV32I: # %bb.0: ; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: and a2, a0, a2 ; RV32I-NEXT: add a2, a4, a2 ; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: add a0, a3, a0 ; RV32I-NEXT: sltu a1, a0, a3 ; RV32I-NEXT: add a1, a2, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: add2: ; RV64I: # %bb.0: ; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: add a0, a2, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: add2: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: add a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: add2: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: add a0, a2, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: add2: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.eqz a2, a2, a0 ; RV32ZICOND-NEXT: add a2, a4, a2 ; RV32ZICOND-NEXT: czero.eqz a0, a1, a0 ; RV32ZICOND-NEXT: add a0, a3, a0 ; RV32ZICOND-NEXT: sltu a1, a0, a3 ; RV32ZICOND-NEXT: add a1, a2, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: add2: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 ; RV64ZICOND-NEXT: add a0, a2, a0 ; RV64ZICOND-NEXT: ret %add = add i64 %rs1, %rs2 %sel = select i1 %rc, i64 %add, i64 %rs2 ret i64 %sel } define i64 @add3(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: add3: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, a0, -1 ; RV32I-NEXT: and a4, a0, a4 ; RV32I-NEXT: add a2, a2, a4 ; RV32I-NEXT: and a0, a0, a3 ; RV32I-NEXT: add a0, a1, a0 ; RV32I-NEXT: sltu a1, a0, a1 ; RV32I-NEXT: add a1, a2, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: add3: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: add3: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: add a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: add3: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, zero, a0 ; RV64XTHEADCONDMOV-NEXT: add a0, a1, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: add3: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.nez a4, a4, a0 ; RV32ZICOND-NEXT: add a2, a2, a4 ; RV32ZICOND-NEXT: czero.nez a0, a3, a0 ; RV32ZICOND-NEXT: add a0, a1, a0 ; RV32ZICOND-NEXT: sltu a1, a0, a1 ; RV32ZICOND-NEXT: add a1, a2, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: add3: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.nez a0, a2, a0 ; RV64ZICOND-NEXT: add a0, a1, a0 ; RV64ZICOND-NEXT: ret %add = add i64 %rs1, %rs2 %sel = select i1 %rc, i64 %rs1, i64 %add ret i64 %sel } define i64 @add4(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: add4: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, a0, -1 ; RV32I-NEXT: and a2, a0, a2 ; RV32I-NEXT: add a2, a4, a2 ; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: add a0, a3, a0 ; RV32I-NEXT: sltu a1, a0, a3 ; RV32I-NEXT: add a1, a2, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: add4: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: add a0, a2, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: add4: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: add a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: add4: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: add a0, a2, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: add4: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.nez a2, a2, a0 ; RV32ZICOND-NEXT: add a2, a4, a2 ; RV32ZICOND-NEXT: czero.nez a0, a1, a0 ; RV32ZICOND-NEXT: add a0, a3, a0 ; RV32ZICOND-NEXT: sltu a1, a0, a3 ; RV32ZICOND-NEXT: add a1, a2, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: add4: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.nez a0, a1, a0 ; RV64ZICOND-NEXT: add a0, a2, a0 ; RV64ZICOND-NEXT: ret %add = add i64 %rs1, %rs2 %sel = select i1 %rc, i64 %rs2, i64 %add ret i64 %sel } define i64 @sub1(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: sub1: ; RV32I: # %bb.0: ; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: and a3, a0, a3 ; RV32I-NEXT: sltu a5, a1, a3 ; RV32I-NEXT: and a0, a0, a4 ; RV32I-NEXT: sub a2, a2, a0 ; RV32I-NEXT: sub a2, a2, a5 ; RV32I-NEXT: sub a0, a1, a3 ; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sub1: ; RV64I: # %bb.0: ; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: sub1: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: sub a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: sub1: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, zero, a0 ; RV64XTHEADCONDMOV-NEXT: sub a0, a1, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: sub1: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.eqz a3, a3, a0 ; RV32ZICOND-NEXT: sltu a5, a1, a3 ; RV32ZICOND-NEXT: czero.eqz a0, a4, a0 ; RV32ZICOND-NEXT: sub a2, a2, a0 ; RV32ZICOND-NEXT: sub a2, a2, a5 ; RV32ZICOND-NEXT: sub a0, a1, a3 ; RV32ZICOND-NEXT: mv a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: sub1: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.eqz a0, a2, a0 ; RV64ZICOND-NEXT: sub a0, a1, a0 ; RV64ZICOND-NEXT: ret %sub = sub i64 %rs1, %rs2 %sel = select i1 %rc, i64 %sub, i64 %rs1 ret i64 %sel } define i64 @sub2(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: sub2: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, a0, -1 ; RV32I-NEXT: and a3, a0, a3 ; RV32I-NEXT: sltu a5, a1, a3 ; RV32I-NEXT: and a0, a0, a4 ; RV32I-NEXT: sub a2, a2, a0 ; RV32I-NEXT: sub a2, a2, a5 ; RV32I-NEXT: sub a0, a1, a3 ; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sub2: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: sub2: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: sub a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: sub2: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, zero, a0 ; RV64XTHEADCONDMOV-NEXT: sub a0, a1, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: sub2: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.nez a3, a3, a0 ; RV32ZICOND-NEXT: sltu a5, a1, a3 ; RV32ZICOND-NEXT: czero.nez a0, a4, a0 ; RV32ZICOND-NEXT: sub a2, a2, a0 ; RV32ZICOND-NEXT: sub a2, a2, a5 ; RV32ZICOND-NEXT: sub a0, a1, a3 ; RV32ZICOND-NEXT: mv a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: sub2: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.nez a0, a2, a0 ; RV64ZICOND-NEXT: sub a0, a1, a0 ; RV64ZICOND-NEXT: ret %sub = sub i64 %rs1, %rs2 %sel = select i1 %rc, i64 %rs1, i64 %sub ret i64 %sel } define i64 @or1(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: or1: ; RV32I: # %bb.0: ; RV32I-NEXT: neg a5, a0 ; RV32I-NEXT: and a0, a5, a3 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: and a1, a5, a4 ; RV32I-NEXT: or a1, a2, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: or1: ; RV64I: # %bb.0: ; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: or1: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: or1: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, zero, a0 ; RV64XTHEADCONDMOV-NEXT: or a0, a1, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: or1: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.eqz a3, a3, a0 ; RV32ZICOND-NEXT: or a3, a1, a3 ; RV32ZICOND-NEXT: czero.eqz a1, a4, a0 ; RV32ZICOND-NEXT: or a1, a2, a1 ; RV32ZICOND-NEXT: mv a0, a3 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: or1: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.eqz a0, a2, a0 ; RV64ZICOND-NEXT: or a0, a1, a0 ; RV64ZICOND-NEXT: ret %or = or i64 %rs1, %rs2 %sel = select i1 %rc, i64 %or, i64 %rs1 ret i64 %sel } define i64 @or2(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: or2: ; RV32I: # %bb.0: ; RV32I-NEXT: neg a5, a0 ; RV32I-NEXT: and a0, a5, a1 ; RV32I-NEXT: or a0, a3, a0 ; RV32I-NEXT: and a1, a5, a2 ; RV32I-NEXT: or a1, a4, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: or2: ; RV64I: # %bb.0: ; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: or2: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: or2: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: or a0, a2, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: or2: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.eqz a1, a1, a0 ; RV32ZICOND-NEXT: or a3, a3, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a2, a0 ; RV32ZICOND-NEXT: or a1, a4, a1 ; RV32ZICOND-NEXT: mv a0, a3 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: or2: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 ; RV64ZICOND-NEXT: or a0, a2, a0 ; RV64ZICOND-NEXT: ret %or = or i64 %rs1, %rs2 %sel = select i1 %rc, i64 %or, i64 %rs2 ret i64 %sel } define i64 @or3(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: or3: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a5, a0, -1 ; RV32I-NEXT: and a0, a5, a3 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: and a1, a5, a4 ; RV32I-NEXT: or a1, a2, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: or3: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: or3: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: or3: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, zero, a0 ; RV64XTHEADCONDMOV-NEXT: or a0, a1, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: or3: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.nez a3, a3, a0 ; RV32ZICOND-NEXT: or a3, a1, a3 ; RV32ZICOND-NEXT: czero.nez a1, a4, a0 ; RV32ZICOND-NEXT: or a1, a2, a1 ; RV32ZICOND-NEXT: mv a0, a3 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: or3: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.nez a0, a2, a0 ; RV64ZICOND-NEXT: or a0, a1, a0 ; RV64ZICOND-NEXT: ret %or = or i64 %rs1, %rs2 %sel = select i1 %rc, i64 %rs1, i64 %or ret i64 %sel } define i64 @or4(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: or4: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a5, a0, -1 ; RV32I-NEXT: and a0, a5, a1 ; RV32I-NEXT: or a0, a3, a0 ; RV32I-NEXT: and a1, a5, a2 ; RV32I-NEXT: or a1, a4, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: or4: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: or4: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: or4: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: or a0, a2, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: or4: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.nez a1, a1, a0 ; RV32ZICOND-NEXT: or a3, a3, a1 ; RV32ZICOND-NEXT: czero.nez a1, a2, a0 ; RV32ZICOND-NEXT: or a1, a4, a1 ; RV32ZICOND-NEXT: mv a0, a3 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: or4: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.nez a0, a1, a0 ; RV64ZICOND-NEXT: or a0, a2, a0 ; RV64ZICOND-NEXT: ret %or = or i64 %rs1, %rs2 %sel = select i1 %rc, i64 %rs2, i64 %or ret i64 %sel } define i64 @xor1(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: xor1: ; RV32I: # %bb.0: ; RV32I-NEXT: neg a5, a0 ; RV32I-NEXT: and a0, a5, a3 ; RV32I-NEXT: xor a0, a1, a0 ; RV32I-NEXT: and a1, a5, a4 ; RV32I-NEXT: xor a1, a2, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: xor1: ; RV64I: # %bb.0: ; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: xor a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: xor1: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: xor a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: xor1: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, zero, a0 ; RV64XTHEADCONDMOV-NEXT: xor a0, a1, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: xor1: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.eqz a3, a3, a0 ; RV32ZICOND-NEXT: xor a3, a1, a3 ; RV32ZICOND-NEXT: czero.eqz a1, a4, a0 ; RV32ZICOND-NEXT: xor a1, a2, a1 ; RV32ZICOND-NEXT: mv a0, a3 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: xor1: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.eqz a0, a2, a0 ; RV64ZICOND-NEXT: xor a0, a1, a0 ; RV64ZICOND-NEXT: ret %xor = xor i64 %rs1, %rs2 %sel = select i1 %rc, i64 %xor, i64 %rs1 ret i64 %sel } define i64 @xor2(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: xor2: ; RV32I: # %bb.0: ; RV32I-NEXT: neg a5, a0 ; RV32I-NEXT: and a0, a5, a1 ; RV32I-NEXT: xor a0, a3, a0 ; RV32I-NEXT: and a1, a5, a2 ; RV32I-NEXT: xor a1, a4, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: xor2: ; RV64I: # %bb.0: ; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: xor a0, a2, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: xor2: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: xor a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: xor2: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: xor a0, a2, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: xor2: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.eqz a1, a1, a0 ; RV32ZICOND-NEXT: xor a3, a3, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a2, a0 ; RV32ZICOND-NEXT: xor a1, a4, a1 ; RV32ZICOND-NEXT: mv a0, a3 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: xor2: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 ; RV64ZICOND-NEXT: xor a0, a2, a0 ; RV64ZICOND-NEXT: ret %xor = xor i64 %rs1, %rs2 %sel = select i1 %rc, i64 %xor, i64 %rs2 ret i64 %sel } define i64 @xor3(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: xor3: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a5, a0, -1 ; RV32I-NEXT: and a0, a5, a3 ; RV32I-NEXT: xor a0, a1, a0 ; RV32I-NEXT: and a1, a5, a4 ; RV32I-NEXT: xor a1, a2, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: xor3: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: xor a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: xor3: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: xor a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: xor3: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, zero, a0 ; RV64XTHEADCONDMOV-NEXT: xor a0, a1, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: xor3: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.nez a3, a3, a0 ; RV32ZICOND-NEXT: xor a3, a1, a3 ; RV32ZICOND-NEXT: czero.nez a1, a4, a0 ; RV32ZICOND-NEXT: xor a1, a2, a1 ; RV32ZICOND-NEXT: mv a0, a3 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: xor3: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.nez a0, a2, a0 ; RV64ZICOND-NEXT: xor a0, a1, a0 ; RV64ZICOND-NEXT: ret %xor = xor i64 %rs1, %rs2 %sel = select i1 %rc, i64 %rs1, i64 %xor ret i64 %sel } define i64 @xor4(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: xor4: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a5, a0, -1 ; RV32I-NEXT: and a0, a5, a1 ; RV32I-NEXT: xor a0, a3, a0 ; RV32I-NEXT: and a1, a5, a2 ; RV32I-NEXT: xor a1, a4, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: xor4: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: xor a0, a2, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: xor4: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: xor a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: xor4: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: xor a0, a2, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: xor4: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.nez a1, a1, a0 ; RV32ZICOND-NEXT: xor a3, a3, a1 ; RV32ZICOND-NEXT: czero.nez a1, a2, a0 ; RV32ZICOND-NEXT: xor a1, a4, a1 ; RV32ZICOND-NEXT: mv a0, a3 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: xor4: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.nez a0, a1, a0 ; RV64ZICOND-NEXT: xor a0, a2, a0 ; RV64ZICOND-NEXT: ret %xor = xor i64 %rs1, %rs2 %sel = select i1 %rc, i64 %rs2, i64 %xor ret i64 %sel } define i64 @and1(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: and1: ; RV32I: # %bb.0: ; RV32I-NEXT: beqz a0, .LBB18_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: and a2, a2, a4 ; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: .LBB18_2: ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: and1: ; RV64I: # %bb.0: ; RV64I-NEXT: beqz a0, .LBB18_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: .LBB18_2: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: and1: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: and a2, a1, a2 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: and1: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: and a2, a1, a2 ; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, a1, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: and1: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: and a4, a2, a4 ; RV32ZICOND-NEXT: and a3, a1, a3 ; RV32ZICOND-NEXT: czero.nez a1, a1, a0 ; RV32ZICOND-NEXT: or a3, a3, a1 ; RV32ZICOND-NEXT: czero.nez a1, a2, a0 ; RV32ZICOND-NEXT: or a1, a4, a1 ; RV32ZICOND-NEXT: mv a0, a3 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: and1: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: and a2, a1, a2 ; RV64ZICOND-NEXT: czero.nez a0, a1, a0 ; RV64ZICOND-NEXT: or a0, a2, a0 ; RV64ZICOND-NEXT: ret %and = and i64 %rs1, %rs2 %sel = select i1 %rc, i64 %and, i64 %rs1 ret i64 %sel } define i64 @and2(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: and2: ; RV32I: # %bb.0: ; RV32I-NEXT: beqz a0, .LBB19_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: and a4, a2, a4 ; RV32I-NEXT: and a3, a1, a3 ; RV32I-NEXT: .LBB19_2: ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: mv a1, a4 ; RV32I-NEXT: ret ; ; RV64I-LABEL: and2: ; RV64I: # %bb.0: ; RV64I-NEXT: beqz a0, .LBB19_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: and a2, a1, a2 ; RV64I-NEXT: .LBB19_2: ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: and2: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: and a1, a1, a2 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: and2: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: and a1, a1, a2 ; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, a2, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: and2: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: and a5, a2, a4 ; RV32ZICOND-NEXT: and a1, a1, a3 ; RV32ZICOND-NEXT: czero.nez a2, a3, a0 ; RV32ZICOND-NEXT: or a2, a1, a2 ; RV32ZICOND-NEXT: czero.nez a1, a4, a0 ; RV32ZICOND-NEXT: or a1, a5, a1 ; RV32ZICOND-NEXT: mv a0, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: and2: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: and a1, a1, a2 ; RV64ZICOND-NEXT: czero.nez a0, a2, a0 ; RV64ZICOND-NEXT: or a0, a1, a0 ; RV64ZICOND-NEXT: ret %and = and i64 %rs1, %rs2 %sel = select i1 %rc, i64 %and, i64 %rs2 ret i64 %sel } define i64 @and3(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: and3: ; RV32I: # %bb.0: ; RV32I-NEXT: bnez a0, .LBB20_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: and a2, a2, a4 ; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: .LBB20_2: ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: and3: ; RV64I: # %bb.0: ; RV64I-NEXT: bnez a0, .LBB20_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: .LBB20_2: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: and3: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: and a2, a1, a2 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: and3: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: and a2, a1, a2 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, a1, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: and3: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: and a4, a2, a4 ; RV32ZICOND-NEXT: and a3, a1, a3 ; RV32ZICOND-NEXT: czero.eqz a1, a1, a0 ; RV32ZICOND-NEXT: or a3, a3, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a2, a0 ; RV32ZICOND-NEXT: or a1, a4, a1 ; RV32ZICOND-NEXT: mv a0, a3 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: and3: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: and a2, a1, a2 ; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 ; RV64ZICOND-NEXT: or a0, a2, a0 ; RV64ZICOND-NEXT: ret %and = and i64 %rs1, %rs2 %sel = select i1 %rc, i64 %rs1, i64 %and ret i64 %sel } define i64 @and4(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: and4: ; RV32I: # %bb.0: ; RV32I-NEXT: bnez a0, .LBB21_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: and a4, a2, a4 ; RV32I-NEXT: and a3, a1, a3 ; RV32I-NEXT: .LBB21_2: ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: mv a1, a4 ; RV32I-NEXT: ret ; ; RV64I-LABEL: and4: ; RV64I: # %bb.0: ; RV64I-NEXT: bnez a0, .LBB21_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: and a2, a1, a2 ; RV64I-NEXT: .LBB21_2: ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: and4: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: and a1, a1, a2 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: and4: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: and a1, a1, a2 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, a2, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: and4: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: and a5, a2, a4 ; RV32ZICOND-NEXT: and a1, a1, a3 ; RV32ZICOND-NEXT: czero.eqz a2, a3, a0 ; RV32ZICOND-NEXT: or a2, a1, a2 ; RV32ZICOND-NEXT: czero.eqz a1, a4, a0 ; RV32ZICOND-NEXT: or a1, a5, a1 ; RV32ZICOND-NEXT: mv a0, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: and4: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: and a1, a1, a2 ; RV64ZICOND-NEXT: czero.eqz a0, a2, a0 ; RV64ZICOND-NEXT: or a0, a1, a0 ; RV64ZICOND-NEXT: ret %and = and i64 %rs1, %rs2 %sel = select i1 %rc, i64 %rs2, i64 %and ret i64 %sel } define i64 @basic(i1 zeroext %rc, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: basic: ; RV32I: # %bb.0: ; RV32I-NEXT: bnez a0, .LBB22_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a1, a3 ; RV32I-NEXT: mv a2, a4 ; RV32I-NEXT: .LBB22_2: ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: basic: ; RV64I: # %bb.0: ; RV64I-NEXT: bnez a0, .LBB22_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a1, a2 ; RV64I-NEXT: .LBB22_2: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: basic: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a2, a2, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: basic: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, a2, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: basic: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: czero.nez a3, a3, a0 ; RV32ZICOND-NEXT: czero.eqz a1, a1, a0 ; RV32ZICOND-NEXT: or a3, a1, a3 ; RV32ZICOND-NEXT: czero.nez a1, a4, a0 ; RV32ZICOND-NEXT: czero.eqz a0, a2, a0 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: mv a0, a3 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: basic: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.nez a2, a2, a0 ; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 ; RV64ZICOND-NEXT: or a0, a0, a2 ; RV64ZICOND-NEXT: ret %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @seteq(i64 %a, i64 %b, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: seteq: ; RV32I: # %bb.0: ; RV32I-NEXT: xor a1, a1, a3 ; RV32I-NEXT: xor a0, a0, a2 ; RV32I-NEXT: or a2, a0, a1 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: beqz a2, .LBB23_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a6 ; RV32I-NEXT: mv a1, a7 ; RV32I-NEXT: .LBB23_2: ; RV32I-NEXT: ret ; ; RV64I-LABEL: seteq: ; RV64I: # %bb.0: ; RV64I-NEXT: beq a0, a1, .LBB23_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a2, a3 ; RV64I-NEXT: .LBB23_2: ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: seteq: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a3, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: seteq: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: xor a0, a0, a1 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, a3, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: seteq: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xor a1, a1, a3 ; RV32ZICOND-NEXT: xor a0, a0, a2 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a6, a1 ; RV32ZICOND-NEXT: czero.nez a2, a4, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.eqz a2, a7, a1 ; RV32ZICOND-NEXT: czero.nez a1, a5, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: seteq: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: xor a0, a0, a1 ; RV64ZICOND-NEXT: czero.eqz a1, a3, a0 ; RV64ZICOND-NEXT: czero.nez a0, a2, a0 ; RV64ZICOND-NEXT: or a0, a0, a1 ; RV64ZICOND-NEXT: ret %rc = icmp eq i64 %a, %b %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @setne(i64 %a, i64 %b, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: setne: ; RV32I: # %bb.0: ; RV32I-NEXT: xor a1, a1, a3 ; RV32I-NEXT: xor a0, a0, a2 ; RV32I-NEXT: or a2, a0, a1 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: bnez a2, .LBB24_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a6 ; RV32I-NEXT: mv a1, a7 ; RV32I-NEXT: .LBB24_2: ; RV32I-NEXT: ret ; ; RV64I-LABEL: setne: ; RV64I: # %bb.0: ; RV64I-NEXT: bne a0, a1, .LBB24_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a2, a3 ; RV64I-NEXT: .LBB24_2: ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: setne: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a1, a3, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: setne: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: xor a0, a0, a1 ; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, a3, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: setne: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xor a1, a1, a3 ; RV32ZICOND-NEXT: xor a0, a0, a2 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a6, a1 ; RV32ZICOND-NEXT: czero.eqz a2, a4, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.nez a2, a7, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a5, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: setne: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: xor a0, a0, a1 ; RV64ZICOND-NEXT: czero.nez a1, a3, a0 ; RV64ZICOND-NEXT: czero.eqz a0, a2, a0 ; RV64ZICOND-NEXT: or a0, a0, a1 ; RV64ZICOND-NEXT: ret %rc = icmp ne i64 %a, %b %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @setgt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: setgt: ; RV32I: # %bb.0: ; RV32I-NEXT: beq a1, a3, .LBB25_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: slt a0, a3, a1 ; RV32I-NEXT: beqz a0, .LBB25_3 ; RV32I-NEXT: j .LBB25_4 ; RV32I-NEXT: .LBB25_2: ; RV32I-NEXT: sltu a0, a2, a0 ; RV32I-NEXT: bnez a0, .LBB25_4 ; RV32I-NEXT: .LBB25_3: ; RV32I-NEXT: mv a4, a6 ; RV32I-NEXT: mv a5, a7 ; RV32I-NEXT: .LBB25_4: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: ret ; ; RV64I-LABEL: setgt: ; RV64I: # %bb.0: ; RV64I-NEXT: blt a1, a0, .LBB25_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a2, a3 ; RV64I-NEXT: .LBB25_2: ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: setgt: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: slt a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a1, a3, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: setgt: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: slt a0, a1, a0 ; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, a3, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: setgt: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xor t0, a1, a3 ; RV32ZICOND-NEXT: slt a1, a3, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a1, t0 ; RV32ZICOND-NEXT: sltu a0, a2, a0 ; RV32ZICOND-NEXT: czero.nez a0, a0, t0 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a6, a1 ; RV32ZICOND-NEXT: czero.eqz a2, a4, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.nez a2, a7, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a5, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: setgt: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: slt a0, a1, a0 ; RV64ZICOND-NEXT: czero.nez a1, a3, a0 ; RV64ZICOND-NEXT: czero.eqz a0, a2, a0 ; RV64ZICOND-NEXT: or a0, a0, a1 ; RV64ZICOND-NEXT: ret %rc = icmp sgt i64 %a, %b %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @setge(i64 %a, i64 %b, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: setge: ; RV32I: # %bb.0: ; RV32I-NEXT: beq a1, a3, .LBB26_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: slt a0, a1, a3 ; RV32I-NEXT: bnez a0, .LBB26_3 ; RV32I-NEXT: j .LBB26_4 ; RV32I-NEXT: .LBB26_2: ; RV32I-NEXT: sltu a0, a0, a2 ; RV32I-NEXT: beqz a0, .LBB26_4 ; RV32I-NEXT: .LBB26_3: ; RV32I-NEXT: mv a4, a6 ; RV32I-NEXT: mv a5, a7 ; RV32I-NEXT: .LBB26_4: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: ret ; ; RV64I-LABEL: setge: ; RV64I: # %bb.0: ; RV64I-NEXT: bge a0, a1, .LBB26_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a2, a3 ; RV64I-NEXT: .LBB26_2: ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: setge: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: slt a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a3, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: setge: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: slt a0, a0, a1 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, a3, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: setge: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xor t0, a1, a3 ; RV32ZICOND-NEXT: slt a1, a1, a3 ; RV32ZICOND-NEXT: czero.eqz a1, a1, t0 ; RV32ZICOND-NEXT: sltu a0, a0, a2 ; RV32ZICOND-NEXT: czero.nez a0, a0, t0 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a6, a1 ; RV32ZICOND-NEXT: czero.nez a2, a4, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.eqz a2, a7, a1 ; RV32ZICOND-NEXT: czero.nez a1, a5, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: setge: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: slt a0, a0, a1 ; RV64ZICOND-NEXT: czero.eqz a1, a3, a0 ; RV64ZICOND-NEXT: czero.nez a0, a2, a0 ; RV64ZICOND-NEXT: or a0, a0, a1 ; RV64ZICOND-NEXT: ret %rc = icmp sge i64 %a, %b %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @setlt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: setlt: ; RV32I: # %bb.0: ; RV32I-NEXT: beq a1, a3, .LBB27_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: slt a0, a1, a3 ; RV32I-NEXT: beqz a0, .LBB27_3 ; RV32I-NEXT: j .LBB27_4 ; RV32I-NEXT: .LBB27_2: ; RV32I-NEXT: sltu a0, a0, a2 ; RV32I-NEXT: bnez a0, .LBB27_4 ; RV32I-NEXT: .LBB27_3: ; RV32I-NEXT: mv a4, a6 ; RV32I-NEXT: mv a5, a7 ; RV32I-NEXT: .LBB27_4: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: ret ; ; RV64I-LABEL: setlt: ; RV64I: # %bb.0: ; RV64I-NEXT: blt a0, a1, .LBB27_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a2, a3 ; RV64I-NEXT: .LBB27_2: ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: setlt: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: slt a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a1, a3, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: setlt: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: slt a0, a0, a1 ; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, a3, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: setlt: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xor t0, a1, a3 ; RV32ZICOND-NEXT: slt a1, a1, a3 ; RV32ZICOND-NEXT: czero.eqz a1, a1, t0 ; RV32ZICOND-NEXT: sltu a0, a0, a2 ; RV32ZICOND-NEXT: czero.nez a0, a0, t0 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a6, a1 ; RV32ZICOND-NEXT: czero.eqz a2, a4, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.nez a2, a7, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a5, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: setlt: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: slt a0, a0, a1 ; RV64ZICOND-NEXT: czero.nez a1, a3, a0 ; RV64ZICOND-NEXT: czero.eqz a0, a2, a0 ; RV64ZICOND-NEXT: or a0, a0, a1 ; RV64ZICOND-NEXT: ret %rc = icmp slt i64 %a, %b %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @setle(i64 %a, i64 %b, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: setle: ; RV32I: # %bb.0: ; RV32I-NEXT: beq a1, a3, .LBB28_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: slt a0, a3, a1 ; RV32I-NEXT: bnez a0, .LBB28_3 ; RV32I-NEXT: j .LBB28_4 ; RV32I-NEXT: .LBB28_2: ; RV32I-NEXT: sltu a0, a2, a0 ; RV32I-NEXT: beqz a0, .LBB28_4 ; RV32I-NEXT: .LBB28_3: ; RV32I-NEXT: mv a4, a6 ; RV32I-NEXT: mv a5, a7 ; RV32I-NEXT: .LBB28_4: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: ret ; ; RV64I-LABEL: setle: ; RV64I: # %bb.0: ; RV64I-NEXT: bge a1, a0, .LBB28_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a2, a3 ; RV64I-NEXT: .LBB28_2: ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: setle: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: slt a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a3, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: setle: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: slt a0, a1, a0 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, a3, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: setle: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xor t0, a1, a3 ; RV32ZICOND-NEXT: slt a1, a3, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a1, t0 ; RV32ZICOND-NEXT: sltu a0, a2, a0 ; RV32ZICOND-NEXT: czero.nez a0, a0, t0 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a6, a1 ; RV32ZICOND-NEXT: czero.nez a2, a4, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.eqz a2, a7, a1 ; RV32ZICOND-NEXT: czero.nez a1, a5, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: setle: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: slt a0, a1, a0 ; RV64ZICOND-NEXT: czero.eqz a1, a3, a0 ; RV64ZICOND-NEXT: czero.nez a0, a2, a0 ; RV64ZICOND-NEXT: or a0, a0, a1 ; RV64ZICOND-NEXT: ret %rc = icmp sle i64 %a, %b %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @setugt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: setugt: ; RV32I: # %bb.0: ; RV32I-NEXT: beq a1, a3, .LBB29_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sltu a0, a3, a1 ; RV32I-NEXT: beqz a0, .LBB29_3 ; RV32I-NEXT: j .LBB29_4 ; RV32I-NEXT: .LBB29_2: ; RV32I-NEXT: sltu a0, a2, a0 ; RV32I-NEXT: bnez a0, .LBB29_4 ; RV32I-NEXT: .LBB29_3: ; RV32I-NEXT: mv a4, a6 ; RV32I-NEXT: mv a5, a7 ; RV32I-NEXT: .LBB29_4: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: ret ; ; RV64I-LABEL: setugt: ; RV64I: # %bb.0: ; RV64I-NEXT: bltu a1, a0, .LBB29_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a2, a3 ; RV64I-NEXT: .LBB29_2: ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: setugt: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: sltu a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a1, a3, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: setugt: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: sltu a0, a1, a0 ; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, a3, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: setugt: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xor t0, a1, a3 ; RV32ZICOND-NEXT: sltu a1, a3, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a1, t0 ; RV32ZICOND-NEXT: sltu a0, a2, a0 ; RV32ZICOND-NEXT: czero.nez a0, a0, t0 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a6, a1 ; RV32ZICOND-NEXT: czero.eqz a2, a4, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.nez a2, a7, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a5, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: setugt: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: sltu a0, a1, a0 ; RV64ZICOND-NEXT: czero.nez a1, a3, a0 ; RV64ZICOND-NEXT: czero.eqz a0, a2, a0 ; RV64ZICOND-NEXT: or a0, a0, a1 ; RV64ZICOND-NEXT: ret %rc = icmp ugt i64 %a, %b %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @setuge(i64 %a, i64 %b, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: setuge: ; RV32I: # %bb.0: ; RV32I-NEXT: beq a1, a3, .LBB30_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sltu a0, a1, a3 ; RV32I-NEXT: bnez a0, .LBB30_3 ; RV32I-NEXT: j .LBB30_4 ; RV32I-NEXT: .LBB30_2: ; RV32I-NEXT: sltu a0, a0, a2 ; RV32I-NEXT: beqz a0, .LBB30_4 ; RV32I-NEXT: .LBB30_3: ; RV32I-NEXT: mv a4, a6 ; RV32I-NEXT: mv a5, a7 ; RV32I-NEXT: .LBB30_4: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: ret ; ; RV64I-LABEL: setuge: ; RV64I: # %bb.0: ; RV64I-NEXT: bgeu a0, a1, .LBB30_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a2, a3 ; RV64I-NEXT: .LBB30_2: ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: setuge: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: sltu a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a3, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: setuge: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: sltu a0, a0, a1 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, a3, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: setuge: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xor t0, a1, a3 ; RV32ZICOND-NEXT: sltu a1, a1, a3 ; RV32ZICOND-NEXT: czero.eqz a1, a1, t0 ; RV32ZICOND-NEXT: sltu a0, a0, a2 ; RV32ZICOND-NEXT: czero.nez a0, a0, t0 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a6, a1 ; RV32ZICOND-NEXT: czero.nez a2, a4, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.eqz a2, a7, a1 ; RV32ZICOND-NEXT: czero.nez a1, a5, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: setuge: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: sltu a0, a0, a1 ; RV64ZICOND-NEXT: czero.eqz a1, a3, a0 ; RV64ZICOND-NEXT: czero.nez a0, a2, a0 ; RV64ZICOND-NEXT: or a0, a0, a1 ; RV64ZICOND-NEXT: ret %rc = icmp uge i64 %a, %b %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @setult(i64 %a, i64 %b, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: setult: ; RV32I: # %bb.0: ; RV32I-NEXT: beq a1, a3, .LBB31_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sltu a0, a1, a3 ; RV32I-NEXT: beqz a0, .LBB31_3 ; RV32I-NEXT: j .LBB31_4 ; RV32I-NEXT: .LBB31_2: ; RV32I-NEXT: sltu a0, a0, a2 ; RV32I-NEXT: bnez a0, .LBB31_4 ; RV32I-NEXT: .LBB31_3: ; RV32I-NEXT: mv a4, a6 ; RV32I-NEXT: mv a5, a7 ; RV32I-NEXT: .LBB31_4: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: ret ; ; RV64I-LABEL: setult: ; RV64I: # %bb.0: ; RV64I-NEXT: bltu a0, a1, .LBB31_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a2, a3 ; RV64I-NEXT: .LBB31_2: ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: setult: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: sltu a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a1, a3, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: setult: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: sltu a0, a0, a1 ; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, a3, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: setult: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xor t0, a1, a3 ; RV32ZICOND-NEXT: sltu a1, a1, a3 ; RV32ZICOND-NEXT: czero.eqz a1, a1, t0 ; RV32ZICOND-NEXT: sltu a0, a0, a2 ; RV32ZICOND-NEXT: czero.nez a0, a0, t0 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a6, a1 ; RV32ZICOND-NEXT: czero.eqz a2, a4, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.nez a2, a7, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a5, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: setult: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: sltu a0, a0, a1 ; RV64ZICOND-NEXT: czero.nez a1, a3, a0 ; RV64ZICOND-NEXT: czero.eqz a0, a2, a0 ; RV64ZICOND-NEXT: or a0, a0, a1 ; RV64ZICOND-NEXT: ret %rc = icmp ult i64 %a, %b %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @setule(i64 %a, i64 %b, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: setule: ; RV32I: # %bb.0: ; RV32I-NEXT: beq a1, a3, .LBB32_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sltu a0, a3, a1 ; RV32I-NEXT: bnez a0, .LBB32_3 ; RV32I-NEXT: j .LBB32_4 ; RV32I-NEXT: .LBB32_2: ; RV32I-NEXT: sltu a0, a2, a0 ; RV32I-NEXT: beqz a0, .LBB32_4 ; RV32I-NEXT: .LBB32_3: ; RV32I-NEXT: mv a4, a6 ; RV32I-NEXT: mv a5, a7 ; RV32I-NEXT: .LBB32_4: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: ret ; ; RV64I-LABEL: setule: ; RV64I: # %bb.0: ; RV64I-NEXT: bgeu a1, a0, .LBB32_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a2, a3 ; RV64I-NEXT: .LBB32_2: ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: setule: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: sltu a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a3, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: setule: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: sltu a0, a1, a0 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, a3, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: setule: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xor t0, a1, a3 ; RV32ZICOND-NEXT: sltu a1, a3, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a1, t0 ; RV32ZICOND-NEXT: sltu a0, a2, a0 ; RV32ZICOND-NEXT: czero.nez a0, a0, t0 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a6, a1 ; RV32ZICOND-NEXT: czero.nez a2, a4, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.eqz a2, a7, a1 ; RV32ZICOND-NEXT: czero.nez a1, a5, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: setule: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: sltu a0, a1, a0 ; RV64ZICOND-NEXT: czero.eqz a1, a3, a0 ; RV64ZICOND-NEXT: czero.nez a0, a2, a0 ; RV64ZICOND-NEXT: or a0, a0, a1 ; RV64ZICOND-NEXT: ret %rc = icmp ule i64 %a, %b %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @seteq_zero(i64 %a, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: seteq_zero: ; RV32I: # %bb.0: ; RV32I-NEXT: or a6, a0, a1 ; RV32I-NEXT: mv a1, a3 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: beqz a6, .LBB33_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: .LBB33_2: ; RV32I-NEXT: ret ; ; RV64I-LABEL: seteq_zero: ; RV64I: # %bb.0: ; RV64I-NEXT: beqz a0, .LBB33_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a1, a2 ; RV64I-NEXT: .LBB33_2: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: seteq_zero: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskc a2, a2, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: seteq_zero: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, a2, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: seteq_zero: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a4, a1 ; RV32ZICOND-NEXT: czero.nez a2, a2, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.eqz a2, a5, a1 ; RV32ZICOND-NEXT: czero.nez a1, a3, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: seteq_zero: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.eqz a2, a2, a0 ; RV64ZICOND-NEXT: czero.nez a0, a1, a0 ; RV64ZICOND-NEXT: or a0, a0, a2 ; RV64ZICOND-NEXT: ret %rc = icmp eq i64 %a, 0 %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @setne_zero(i64 %a, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: setne_zero: ; RV32I: # %bb.0: ; RV32I-NEXT: or a6, a0, a1 ; RV32I-NEXT: mv a1, a3 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: bnez a6, .LBB34_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: .LBB34_2: ; RV32I-NEXT: ret ; ; RV64I-LABEL: setne_zero: ; RV64I: # %bb.0: ; RV64I-NEXT: bnez a0, .LBB34_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a1, a2 ; RV64I-NEXT: .LBB34_2: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: setne_zero: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a2, a2, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: setne_zero: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, a2, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: setne_zero: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a4, a1 ; RV32ZICOND-NEXT: czero.eqz a2, a2, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.nez a2, a5, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a3, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: setne_zero: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.nez a2, a2, a0 ; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 ; RV64ZICOND-NEXT: or a0, a0, a2 ; RV64ZICOND-NEXT: ret %rc = icmp ne i64 %a, 0 %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @seteq_constant(i64 %a, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: seteq_constant: ; RV32I: # %bb.0: ; RV32I-NEXT: xori a0, a0, 123 ; RV32I-NEXT: or a6, a0, a1 ; RV32I-NEXT: mv a1, a3 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: beqz a6, .LBB35_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: .LBB35_2: ; RV32I-NEXT: ret ; ; RV64I-LABEL: seteq_constant: ; RV64I: # %bb.0: ; RV64I-NEXT: li a3, 123 ; RV64I-NEXT: beq a0, a3, .LBB35_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a1, a2 ; RV64I-NEXT: .LBB35_2: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: seteq_constant: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: addi a0, a0, -123 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a2, a2, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: seteq_constant: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: addi a0, a0, -123 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, a2, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: seteq_constant: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xori a0, a0, 123 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a4, a1 ; RV32ZICOND-NEXT: czero.nez a2, a2, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.eqz a2, a5, a1 ; RV32ZICOND-NEXT: czero.nez a1, a3, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: seteq_constant: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: addi a0, a0, -123 ; RV64ZICOND-NEXT: czero.eqz a2, a2, a0 ; RV64ZICOND-NEXT: czero.nez a0, a1, a0 ; RV64ZICOND-NEXT: or a0, a0, a2 ; RV64ZICOND-NEXT: ret %rc = icmp eq i64 %a, 123 %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @setne_constant(i64 %a, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: setne_constant: ; RV32I: # %bb.0: ; RV32I-NEXT: xori a0, a0, 456 ; RV32I-NEXT: or a6, a0, a1 ; RV32I-NEXT: mv a1, a3 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: bnez a6, .LBB36_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: .LBB36_2: ; RV32I-NEXT: ret ; ; RV64I-LABEL: setne_constant: ; RV64I: # %bb.0: ; RV64I-NEXT: li a3, 456 ; RV64I-NEXT: bne a0, a3, .LBB36_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a1, a2 ; RV64I-NEXT: .LBB36_2: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: setne_constant: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: addi a0, a0, -456 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a2, a2, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: setne_constant: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: addi a0, a0, -456 ; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, a2, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: setne_constant: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xori a0, a0, 456 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a4, a1 ; RV32ZICOND-NEXT: czero.eqz a2, a2, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.nez a2, a5, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a3, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: setne_constant: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: addi a0, a0, -456 ; RV64ZICOND-NEXT: czero.nez a2, a2, a0 ; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 ; RV64ZICOND-NEXT: or a0, a0, a2 ; RV64ZICOND-NEXT: ret %rc = icmp ne i64 %a, 456 %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @seteq_2048(i64 %a, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: seteq_2048: ; RV32I: # %bb.0: ; RV32I-NEXT: binvi a0, a0, 11 ; RV32I-NEXT: or a6, a0, a1 ; RV32I-NEXT: mv a1, a3 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: beqz a6, .LBB37_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: .LBB37_2: ; RV32I-NEXT: ret ; ; RV64I-LABEL: seteq_2048: ; RV64I: # %bb.0: ; RV64I-NEXT: bseti a3, zero, 11 ; RV64I-NEXT: beq a0, a3, .LBB37_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a1, a2 ; RV64I-NEXT: .LBB37_2: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: seteq_2048: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: addi a0, a0, -2048 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a2, a2, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: seteq_2048: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: addi a0, a0, -2048 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, a2, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: seteq_2048: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: binvi a0, a0, 11 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a4, a1 ; RV32ZICOND-NEXT: czero.nez a2, a2, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.eqz a2, a5, a1 ; RV32ZICOND-NEXT: czero.nez a1, a3, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: seteq_2048: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: addi a0, a0, -2048 ; RV64ZICOND-NEXT: czero.eqz a2, a2, a0 ; RV64ZICOND-NEXT: czero.nez a0, a1, a0 ; RV64ZICOND-NEXT: or a0, a0, a2 ; RV64ZICOND-NEXT: ret %rc = icmp eq i64 %a, 2048 %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @seteq_neg2048(i64 %a, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: seteq_neg2048: ; RV32I: # %bb.0: ; RV32I-NEXT: not a1, a1 ; RV32I-NEXT: xori a0, a0, -2048 ; RV32I-NEXT: or a6, a0, a1 ; RV32I-NEXT: mv a1, a3 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: beqz a6, .LBB38_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: .LBB38_2: ; RV32I-NEXT: ret ; ; RV64I-LABEL: seteq_neg2048: ; RV64I: # %bb.0: ; RV64I-NEXT: li a3, -2048 ; RV64I-NEXT: beq a0, a3, .LBB38_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a1, a2 ; RV64I-NEXT: .LBB38_2: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: seteq_neg2048: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a2, a2, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: seteq_neg2048: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: xori a0, a0, -2048 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, a2, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: seteq_neg2048: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: not a1, a1 ; RV32ZICOND-NEXT: xori a0, a0, -2048 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a4, a1 ; RV32ZICOND-NEXT: czero.nez a2, a2, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.eqz a2, a5, a1 ; RV32ZICOND-NEXT: czero.nez a1, a3, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: seteq_neg2048: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: xori a0, a0, -2048 ; RV64ZICOND-NEXT: czero.eqz a2, a2, a0 ; RV64ZICOND-NEXT: czero.nez a0, a1, a0 ; RV64ZICOND-NEXT: or a0, a0, a2 ; RV64ZICOND-NEXT: ret %rc = icmp eq i64 %a, -2048 %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @setne_neg2048(i64 %a, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: setne_neg2048: ; RV32I: # %bb.0: ; RV32I-NEXT: not a1, a1 ; RV32I-NEXT: xori a0, a0, -2048 ; RV32I-NEXT: or a6, a0, a1 ; RV32I-NEXT: mv a1, a3 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: bnez a6, .LBB39_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: .LBB39_2: ; RV32I-NEXT: ret ; ; RV64I-LABEL: setne_neg2048: ; RV64I: # %bb.0: ; RV64I-NEXT: li a3, -2048 ; RV64I-NEXT: bne a0, a3, .LBB39_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a1, a2 ; RV64I-NEXT: .LBB39_2: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: setne_neg2048: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a2, a2, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a2 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: setne_neg2048: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: xori a0, a0, -2048 ; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, a2, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: setne_neg2048: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: not a1, a1 ; RV32ZICOND-NEXT: xori a0, a0, -2048 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a4, a1 ; RV32ZICOND-NEXT: czero.eqz a2, a2, a1 ; RV32ZICOND-NEXT: or a0, a2, a0 ; RV32ZICOND-NEXT: czero.nez a2, a5, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a3, a1 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: setne_neg2048: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: xori a0, a0, -2048 ; RV64ZICOND-NEXT: czero.nez a2, a2, a0 ; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 ; RV64ZICOND-NEXT: or a0, a0, a2 ; RV64ZICOND-NEXT: ret %rc = icmp ne i64 %a, -2048 %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } define i64 @zero1_seteq(i64 %a, i64 %b, i64 %rs1) { ; RV32I-LABEL: zero1_seteq: ; RV32I: # %bb.0: ; RV32I-NEXT: xor a1, a1, a3 ; RV32I-NEXT: xor a0, a0, a2 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a4 ; RV32I-NEXT: and a1, a1, a5 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero1_seteq: ; RV64I: # %bb.0: ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero1_seteq: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero1_seteq: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: xor a0, a0, a1 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero1_seteq: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xor a1, a1, a3 ; RV32ZICOND-NEXT: xor a0, a0, a2 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a4, a1 ; RV32ZICOND-NEXT: czero.nez a1, a5, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero1_seteq: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: xor a0, a0, a1 ; RV64ZICOND-NEXT: czero.nez a0, a2, a0 ; RV64ZICOND-NEXT: ret %rc = icmp eq i64 %a, %b %sel = select i1 %rc, i64 %rs1, i64 0 ret i64 %sel } define i64 @zero2_seteq(i64 %a, i64 %b, i64 %rs1) { ; RV32I-LABEL: zero2_seteq: ; RV32I: # %bb.0: ; RV32I-NEXT: xor a1, a1, a3 ; RV32I-NEXT: xor a0, a0, a2 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a4 ; RV32I-NEXT: and a1, a1, a5 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero2_seteq: ; RV64I: # %bb.0: ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero2_seteq: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero2_seteq: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: xor a0, a0, a1 ; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero2_seteq: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xor a1, a1, a3 ; RV32ZICOND-NEXT: xor a0, a0, a2 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a4, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a5, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero2_seteq: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: xor a0, a0, a1 ; RV64ZICOND-NEXT: czero.eqz a0, a2, a0 ; RV64ZICOND-NEXT: ret %rc = icmp eq i64 %a, %b %sel = select i1 %rc, i64 0, i64 %rs1 ret i64 %sel } define i64 @zero1_setne(i64 %a, i64 %b, i64 %rs1) { ; RV32I-LABEL: zero1_setne: ; RV32I: # %bb.0: ; RV32I-NEXT: xor a1, a1, a3 ; RV32I-NEXT: xor a0, a0, a2 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a4 ; RV32I-NEXT: and a1, a1, a5 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero1_setne: ; RV64I: # %bb.0: ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero1_setne: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero1_setne: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: xor a0, a0, a1 ; RV64XTHEADCONDMOV-NEXT: th.mveqz a2, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero1_setne: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xor a1, a1, a3 ; RV32ZICOND-NEXT: xor a0, a0, a2 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a4, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a5, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero1_setne: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: xor a0, a0, a1 ; RV64ZICOND-NEXT: czero.eqz a0, a2, a0 ; RV64ZICOND-NEXT: ret %rc = icmp ne i64 %a, %b %sel = select i1 %rc, i64 %rs1, i64 0 ret i64 %sel } define i64 @zero2_setne(i64 %a, i64 %b, i64 %rs1) { ; RV32I-LABEL: zero2_setne: ; RV32I: # %bb.0: ; RV32I-NEXT: xor a1, a1, a3 ; RV32I-NEXT: xor a0, a0, a2 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a4 ; RV32I-NEXT: and a1, a1, a5 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero2_setne: ; RV64I: # %bb.0: ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero2_setne: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a2, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero2_setne: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: xor a0, a0, a1 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a2, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero2_setne: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xor a1, a1, a3 ; RV32ZICOND-NEXT: xor a0, a0, a2 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a4, a1 ; RV32ZICOND-NEXT: czero.nez a1, a5, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero2_setne: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: xor a0, a0, a1 ; RV64ZICOND-NEXT: czero.nez a0, a2, a0 ; RV64ZICOND-NEXT: ret %rc = icmp ne i64 %a, %b %sel = select i1 %rc, i64 0, i64 %rs1 ret i64 %sel } define i64 @zero1_seteq_zero(i64 %a, i64 %rs1) { ; RV32I-LABEL: zero1_seteq_zero: ; RV32I: # %bb.0: ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a2 ; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero1_seteq_zero: ; RV64I: # %bb.0: ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero1_seteq_zero: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero1_seteq_zero: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero1_seteq_zero: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a2, a1 ; RV32ZICOND-NEXT: czero.nez a1, a3, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero1_seteq_zero: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.nez a0, a1, a0 ; RV64ZICOND-NEXT: ret %rc = icmp eq i64 %a, 0 %sel = select i1 %rc, i64 %rs1, i64 0 ret i64 %sel } define i64 @zero2_seteq_zero(i64 %a, i64 %rs1) { ; RV32I-LABEL: zero2_seteq_zero: ; RV32I: # %bb.0: ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a2 ; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero2_seteq_zero: ; RV64I: # %bb.0: ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero2_seteq_zero: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero2_seteq_zero: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero2_seteq_zero: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a2, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a3, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero2_seteq_zero: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 ; RV64ZICOND-NEXT: ret %rc = icmp eq i64 %a, 0 %sel = select i1 %rc, i64 0, i64 %rs1 ret i64 %sel } define i64 @zero1_setne_zero(i64 %a, i64 %rs1) { ; RV32I-LABEL: zero1_setne_zero: ; RV32I: # %bb.0: ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a2 ; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero1_setne_zero: ; RV64I: # %bb.0: ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero1_setne_zero: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero1_setne_zero: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero1_setne_zero: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a2, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a3, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero1_setne_zero: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 ; RV64ZICOND-NEXT: ret %rc = icmp ne i64 %a, 0 %sel = select i1 %rc, i64 %rs1, i64 0 ret i64 %sel } define i64 @zero2_setne_zero(i64 %a, i64 %rs1) { ; RV32I-LABEL: zero2_setne_zero: ; RV32I: # %bb.0: ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a2 ; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero2_setne_zero: ; RV64I: # %bb.0: ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero2_setne_zero: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero2_setne_zero: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero2_setne_zero: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a2, a1 ; RV32ZICOND-NEXT: czero.nez a1, a3, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero2_setne_zero: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: czero.nez a0, a1, a0 ; RV64ZICOND-NEXT: ret %rc = icmp ne i64 %a, 0 %sel = select i1 %rc, i64 0, i64 %rs1 ret i64 %sel } define i64 @zero1_seteq_constant(i64 %a, i64 %rs1) { ; RV32I-LABEL: zero1_seteq_constant: ; RV32I: # %bb.0: ; RV32I-NEXT: not a1, a1 ; RV32I-NEXT: xori a0, a0, -231 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a2 ; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero1_seteq_constant: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a0, a0, 231 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero1_seteq_constant: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: addi a0, a0, 231 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero1_seteq_constant: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: addi a0, a0, 231 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero1_seteq_constant: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: not a1, a1 ; RV32ZICOND-NEXT: xori a0, a0, -231 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a2, a1 ; RV32ZICOND-NEXT: czero.nez a1, a3, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero1_seteq_constant: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: addi a0, a0, 231 ; RV64ZICOND-NEXT: czero.nez a0, a1, a0 ; RV64ZICOND-NEXT: ret %rc = icmp eq i64 %a, -231 %sel = select i1 %rc, i64 %rs1, i64 0 ret i64 %sel } define i64 @zero2_seteq_constant(i64 %a, i64 %rs1) { ; RV32I-LABEL: zero2_seteq_constant: ; RV32I: # %bb.0: ; RV32I-NEXT: xori a0, a0, 546 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a2 ; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero2_seteq_constant: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a0, a0, -546 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero2_seteq_constant: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: addi a0, a0, -546 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero2_seteq_constant: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: addi a0, a0, -546 ; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero2_seteq_constant: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xori a0, a0, 546 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a2, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a3, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero2_seteq_constant: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: addi a0, a0, -546 ; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 ; RV64ZICOND-NEXT: ret %rc = icmp eq i64 %a, 546 %sel = select i1 %rc, i64 0, i64 %rs1 ret i64 %sel } define i64 @zero1_setne_constant(i64 %a, i64 %rs1) { ; RV32I-LABEL: zero1_setne_constant: ; RV32I: # %bb.0: ; RV32I-NEXT: xori a0, a0, 321 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a2 ; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero1_setne_constant: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a0, a0, -321 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero1_setne_constant: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: addi a0, a0, -321 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero1_setne_constant: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: addi a0, a0, -321 ; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero1_setne_constant: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: xori a0, a0, 321 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a2, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a3, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero1_setne_constant: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: addi a0, a0, -321 ; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 ; RV64ZICOND-NEXT: ret %rc = icmp ne i64 %a, 321 %sel = select i1 %rc, i64 %rs1, i64 0 ret i64 %sel } define i64 @zero2_setne_constant(i64 %a, i64 %rs1) { ; RV32I-LABEL: zero2_setne_constant: ; RV32I: # %bb.0: ; RV32I-NEXT: not a1, a1 ; RV32I-NEXT: xori a0, a0, -654 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a2 ; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero2_setne_constant: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a0, a0, 654 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero2_setne_constant: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: addi a0, a0, 654 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero2_setne_constant: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: addi a0, a0, 654 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero2_setne_constant: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: not a1, a1 ; RV32ZICOND-NEXT: xori a0, a0, -654 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a2, a1 ; RV32ZICOND-NEXT: czero.nez a1, a3, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero2_setne_constant: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: addi a0, a0, 654 ; RV64ZICOND-NEXT: czero.nez a0, a1, a0 ; RV64ZICOND-NEXT: ret %rc = icmp ne i64 %a, -654 %sel = select i1 %rc, i64 0, i64 %rs1 ret i64 %sel } define i64 @zero1_seteq_neg2048(i64 %a, i64 %rs1) { ; RV32I-LABEL: zero1_seteq_neg2048: ; RV32I: # %bb.0: ; RV32I-NEXT: not a1, a1 ; RV32I-NEXT: xori a0, a0, -2048 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a2 ; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero1_seteq_neg2048: ; RV64I: # %bb.0: ; RV64I-NEXT: xori a0, a0, -2048 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero1_seteq_neg2048: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero1_seteq_neg2048: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: xori a0, a0, -2048 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero1_seteq_neg2048: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: not a1, a1 ; RV32ZICOND-NEXT: xori a0, a0, -2048 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a2, a1 ; RV32ZICOND-NEXT: czero.nez a1, a3, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero1_seteq_neg2048: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: xori a0, a0, -2048 ; RV64ZICOND-NEXT: czero.nez a0, a1, a0 ; RV64ZICOND-NEXT: ret %rc = icmp eq i64 %a, -2048 %sel = select i1 %rc, i64 %rs1, i64 0 ret i64 %sel } define i64 @zero2_seteq_neg2048(i64 %a, i64 %rs1) { ; RV32I-LABEL: zero2_seteq_neg2048: ; RV32I: # %bb.0: ; RV32I-NEXT: not a1, a1 ; RV32I-NEXT: xori a0, a0, -2048 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a2 ; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero2_seteq_neg2048: ; RV64I: # %bb.0: ; RV64I-NEXT: xori a0, a0, -2048 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero2_seteq_neg2048: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero2_seteq_neg2048: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: xori a0, a0, -2048 ; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero2_seteq_neg2048: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: not a1, a1 ; RV32ZICOND-NEXT: xori a0, a0, -2048 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a2, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a3, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero2_seteq_neg2048: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: xori a0, a0, -2048 ; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 ; RV64ZICOND-NEXT: ret %rc = icmp eq i64 %a, -2048 %sel = select i1 %rc, i64 0, i64 %rs1 ret i64 %sel } define i64 @zero1_setne_neg2048(i64 %a, i64 %rs1) { ; RV32I-LABEL: zero1_setne_neg2048: ; RV32I: # %bb.0: ; RV32I-NEXT: not a1, a1 ; RV32I-NEXT: xori a0, a0, -2048 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a2 ; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero1_setne_neg2048: ; RV64I: # %bb.0: ; RV64I-NEXT: xori a0, a0, -2048 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero1_setne_neg2048: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero1_setne_neg2048: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: xori a0, a0, -2048 ; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero1_setne_neg2048: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: not a1, a1 ; RV32ZICOND-NEXT: xori a0, a0, -2048 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.eqz a0, a2, a1 ; RV32ZICOND-NEXT: czero.eqz a1, a3, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero1_setne_neg2048: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: xori a0, a0, -2048 ; RV64ZICOND-NEXT: czero.eqz a0, a1, a0 ; RV64ZICOND-NEXT: ret %rc = icmp ne i64 %a, -2048 %sel = select i1 %rc, i64 %rs1, i64 0 ret i64 %sel } define i64 @zero2_setne_neg2048(i64 %a, i64 %rs1) { ; RV32I-LABEL: zero2_setne_neg2048: ; RV32I: # %bb.0: ; RV32I-NEXT: not a1, a1 ; RV32I-NEXT: xori a0, a0, -2048 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: and a0, a1, a2 ; RV32I-NEXT: and a1, a1, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zero2_setne_neg2048: ; RV64I: # %bb.0: ; RV64I-NEXT: xori a0, a0, -2048 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: zero2_setne_neg2048: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: xori a0, a0, -2048 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a1, a0 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: zero2_setne_neg2048: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: xori a0, a0, -2048 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: mv a0, a1 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: zero2_setne_neg2048: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: not a1, a1 ; RV32ZICOND-NEXT: xori a0, a0, -2048 ; RV32ZICOND-NEXT: or a1, a0, a1 ; RV32ZICOND-NEXT: czero.nez a0, a2, a1 ; RV32ZICOND-NEXT: czero.nez a1, a3, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: zero2_setne_neg2048: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: xori a0, a0, -2048 ; RV64ZICOND-NEXT: czero.nez a0, a1, a0 ; RV64ZICOND-NEXT: ret %rc = icmp ne i64 %a, -2048 %sel = select i1 %rc, i64 0, i64 %rs1 ret i64 %sel } define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nounwind { ; RV32I-LABEL: sextw_removal_maskc: ; RV32I: # %bb.0: # %bb ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: slli a0, a0, 31 ; RV32I-NEXT: srai a0, a0, 31 ; RV32I-NEXT: and s1, a0, a1 ; RV32I-NEXT: .LBB56_1: # %bb2 ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call bar@plt ; RV32I-NEXT: sll s1, s1, s0 ; RV32I-NEXT: bnez a0, .LBB56_1 ; RV32I-NEXT: # %bb.2: # %bb7 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sextw_removal_maskc: ; RV64I: # %bb.0: # %bb ; RV64I-NEXT: addi sp, sp, -32 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a2 ; RV64I-NEXT: slli a0, a0, 63 ; RV64I-NEXT: srai a0, a0, 63 ; RV64I-NEXT: and s1, a0, a1 ; RV64I-NEXT: .LBB56_1: # %bb2 ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: call bar@plt ; RV64I-NEXT: sllw s1, s1, s0 ; RV64I-NEXT: bnez a0, .LBB56_1 ; RV64I-NEXT: # %bb.2: # %bb7 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 32 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: sextw_removal_maskc: ; RV64XVENTANACONDOPS: # %bb.0: # %bb ; RV64XVENTANACONDOPS-NEXT: addi sp, sp, -32 ; RV64XVENTANACONDOPS-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64XVENTANACONDOPS-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64XVENTANACONDOPS-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64XVENTANACONDOPS-NEXT: mv s0, a2 ; RV64XVENTANACONDOPS-NEXT: andi a0, a0, 1 ; RV64XVENTANACONDOPS-NEXT: vt.maskc s1, a1, a0 ; RV64XVENTANACONDOPS-NEXT: .LBB56_1: # %bb2 ; RV64XVENTANACONDOPS-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64XVENTANACONDOPS-NEXT: mv a0, s1 ; RV64XVENTANACONDOPS-NEXT: call bar@plt ; RV64XVENTANACONDOPS-NEXT: sllw s1, s1, s0 ; RV64XVENTANACONDOPS-NEXT: bnez a0, .LBB56_1 ; RV64XVENTANACONDOPS-NEXT: # %bb.2: # %bb7 ; RV64XVENTANACONDOPS-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64XVENTANACONDOPS-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64XVENTANACONDOPS-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64XVENTANACONDOPS-NEXT: addi sp, sp, 32 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: sextw_removal_maskc: ; RV64XTHEADCONDMOV: # %bb.0: # %bb ; RV64XTHEADCONDMOV-NEXT: addi sp, sp, -32 ; RV64XTHEADCONDMOV-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64XTHEADCONDMOV-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64XTHEADCONDMOV-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64XTHEADCONDMOV-NEXT: mv s0, a2 ; RV64XTHEADCONDMOV-NEXT: mv s1, a1 ; RV64XTHEADCONDMOV-NEXT: andi a0, a0, 1 ; RV64XTHEADCONDMOV-NEXT: th.mveqz s1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: .LBB56_1: # %bb2 ; RV64XTHEADCONDMOV-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64XTHEADCONDMOV-NEXT: sext.w a0, s1 ; RV64XTHEADCONDMOV-NEXT: call bar@plt ; RV64XTHEADCONDMOV-NEXT: sllw s1, s1, s0 ; RV64XTHEADCONDMOV-NEXT: bnez a0, .LBB56_1 ; RV64XTHEADCONDMOV-NEXT: # %bb.2: # %bb7 ; RV64XTHEADCONDMOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64XTHEADCONDMOV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64XTHEADCONDMOV-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64XTHEADCONDMOV-NEXT: addi sp, sp, 32 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: sextw_removal_maskc: ; RV32ZICOND: # %bb.0: # %bb ; RV32ZICOND-NEXT: addi sp, sp, -16 ; RV32ZICOND-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32ZICOND-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32ZICOND-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32ZICOND-NEXT: mv s0, a2 ; RV32ZICOND-NEXT: andi a0, a0, 1 ; RV32ZICOND-NEXT: czero.eqz s1, a1, a0 ; RV32ZICOND-NEXT: .LBB56_1: # %bb2 ; RV32ZICOND-NEXT: # =>This Inner Loop Header: Depth=1 ; RV32ZICOND-NEXT: mv a0, s1 ; RV32ZICOND-NEXT: call bar@plt ; RV32ZICOND-NEXT: sll s1, s1, s0 ; RV32ZICOND-NEXT: bnez a0, .LBB56_1 ; RV32ZICOND-NEXT: # %bb.2: # %bb7 ; RV32ZICOND-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32ZICOND-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32ZICOND-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32ZICOND-NEXT: addi sp, sp, 16 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: sextw_removal_maskc: ; RV64ZICOND: # %bb.0: # %bb ; RV64ZICOND-NEXT: addi sp, sp, -32 ; RV64ZICOND-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64ZICOND-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64ZICOND-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64ZICOND-NEXT: mv s0, a2 ; RV64ZICOND-NEXT: andi a0, a0, 1 ; RV64ZICOND-NEXT: czero.eqz s1, a1, a0 ; RV64ZICOND-NEXT: .LBB56_1: # %bb2 ; RV64ZICOND-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64ZICOND-NEXT: mv a0, s1 ; RV64ZICOND-NEXT: call bar@plt ; RV64ZICOND-NEXT: sllw s1, s1, s0 ; RV64ZICOND-NEXT: bnez a0, .LBB56_1 ; RV64ZICOND-NEXT: # %bb.2: # %bb7 ; RV64ZICOND-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64ZICOND-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64ZICOND-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64ZICOND-NEXT: addi sp, sp, 32 ; RV64ZICOND-NEXT: ret bb: %i = select i1 %c, i32 %arg, i32 0 br label %bb2 bb2: ; preds = %bb2, %bb %i3 = phi i32 [ %i, %bb ], [ %i5, %bb2 ] %i4 = tail call signext i32 @bar(i32 signext %i3) %i5 = shl i32 %i3, %arg1 %i6 = icmp eq i32 %i4, 0 br i1 %i6, label %bb7, label %bb2 bb7: ; preds = %bb2 ret void } declare signext i32 @bar(i32 signext) define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) nounwind { ; RV32I-LABEL: sextw_removal_maskcn: ; RV32I: # %bb.0: # %bb ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: addi a0, a0, -1 ; RV32I-NEXT: and s1, a0, a1 ; RV32I-NEXT: .LBB57_1: # %bb2 ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call bar@plt ; RV32I-NEXT: sll s1, s1, s0 ; RV32I-NEXT: bnez a0, .LBB57_1 ; RV32I-NEXT: # %bb.2: # %bb7 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sextw_removal_maskcn: ; RV64I: # %bb.0: # %bb ; RV64I-NEXT: addi sp, sp, -32 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a2 ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: addiw a0, a0, -1 ; RV64I-NEXT: and s1, a0, a1 ; RV64I-NEXT: .LBB57_1: # %bb2 ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: call bar@plt ; RV64I-NEXT: sllw s1, s1, s0 ; RV64I-NEXT: bnez a0, .LBB57_1 ; RV64I-NEXT: # %bb.2: # %bb7 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 32 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: sextw_removal_maskcn: ; RV64XVENTANACONDOPS: # %bb.0: # %bb ; RV64XVENTANACONDOPS-NEXT: addi sp, sp, -32 ; RV64XVENTANACONDOPS-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64XVENTANACONDOPS-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64XVENTANACONDOPS-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64XVENTANACONDOPS-NEXT: mv s0, a2 ; RV64XVENTANACONDOPS-NEXT: andi a0, a0, 1 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn s1, a1, a0 ; RV64XVENTANACONDOPS-NEXT: .LBB57_1: # %bb2 ; RV64XVENTANACONDOPS-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64XVENTANACONDOPS-NEXT: mv a0, s1 ; RV64XVENTANACONDOPS-NEXT: call bar@plt ; RV64XVENTANACONDOPS-NEXT: sllw s1, s1, s0 ; RV64XVENTANACONDOPS-NEXT: bnez a0, .LBB57_1 ; RV64XVENTANACONDOPS-NEXT: # %bb.2: # %bb7 ; RV64XVENTANACONDOPS-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64XVENTANACONDOPS-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64XVENTANACONDOPS-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64XVENTANACONDOPS-NEXT: addi sp, sp, 32 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: sextw_removal_maskcn: ; RV64XTHEADCONDMOV: # %bb.0: # %bb ; RV64XTHEADCONDMOV-NEXT: addi sp, sp, -32 ; RV64XTHEADCONDMOV-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64XTHEADCONDMOV-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64XTHEADCONDMOV-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64XTHEADCONDMOV-NEXT: mv s0, a2 ; RV64XTHEADCONDMOV-NEXT: mv s1, a1 ; RV64XTHEADCONDMOV-NEXT: andi a0, a0, 1 ; RV64XTHEADCONDMOV-NEXT: th.mvnez s1, zero, a0 ; RV64XTHEADCONDMOV-NEXT: .LBB57_1: # %bb2 ; RV64XTHEADCONDMOV-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64XTHEADCONDMOV-NEXT: sext.w a0, s1 ; RV64XTHEADCONDMOV-NEXT: call bar@plt ; RV64XTHEADCONDMOV-NEXT: sllw s1, s1, s0 ; RV64XTHEADCONDMOV-NEXT: bnez a0, .LBB57_1 ; RV64XTHEADCONDMOV-NEXT: # %bb.2: # %bb7 ; RV64XTHEADCONDMOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64XTHEADCONDMOV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64XTHEADCONDMOV-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64XTHEADCONDMOV-NEXT: addi sp, sp, 32 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: sextw_removal_maskcn: ; RV32ZICOND: # %bb.0: # %bb ; RV32ZICOND-NEXT: addi sp, sp, -16 ; RV32ZICOND-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32ZICOND-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32ZICOND-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32ZICOND-NEXT: mv s0, a2 ; RV32ZICOND-NEXT: andi a0, a0, 1 ; RV32ZICOND-NEXT: czero.nez s1, a1, a0 ; RV32ZICOND-NEXT: .LBB57_1: # %bb2 ; RV32ZICOND-NEXT: # =>This Inner Loop Header: Depth=1 ; RV32ZICOND-NEXT: mv a0, s1 ; RV32ZICOND-NEXT: call bar@plt ; RV32ZICOND-NEXT: sll s1, s1, s0 ; RV32ZICOND-NEXT: bnez a0, .LBB57_1 ; RV32ZICOND-NEXT: # %bb.2: # %bb7 ; RV32ZICOND-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32ZICOND-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32ZICOND-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32ZICOND-NEXT: addi sp, sp, 16 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: sextw_removal_maskcn: ; RV64ZICOND: # %bb.0: # %bb ; RV64ZICOND-NEXT: addi sp, sp, -32 ; RV64ZICOND-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64ZICOND-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64ZICOND-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64ZICOND-NEXT: mv s0, a2 ; RV64ZICOND-NEXT: andi a0, a0, 1 ; RV64ZICOND-NEXT: czero.nez s1, a1, a0 ; RV64ZICOND-NEXT: .LBB57_1: # %bb2 ; RV64ZICOND-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64ZICOND-NEXT: mv a0, s1 ; RV64ZICOND-NEXT: call bar@plt ; RV64ZICOND-NEXT: sllw s1, s1, s0 ; RV64ZICOND-NEXT: bnez a0, .LBB57_1 ; RV64ZICOND-NEXT: # %bb.2: # %bb7 ; RV64ZICOND-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64ZICOND-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64ZICOND-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64ZICOND-NEXT: addi sp, sp, 32 ; RV64ZICOND-NEXT: ret bb: %i = select i1 %c, i32 0, i32 %arg br label %bb2 bb2: ; preds = %bb2, %bb %i3 = phi i32 [ %i, %bb ], [ %i5, %bb2 ] %i4 = tail call signext i32 @bar(i32 signext %i3) %i5 = shl i32 %i3, %arg1 %i6 = icmp eq i32 %i4, 0 br i1 %i6, label %bb7, label %bb2 bb7: ; preds = %bb2 ret void } define i32 @setune_32(float %a, float %b, i32 %rs1, i32 %rs2) { ; RV32I-LABEL: setune_32: ; RV32I: # %bb.0: ; RV32I-NEXT: feq.s a2, fa0, fa1 ; RV32I-NEXT: beqz a2, .LBB58_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: .LBB58_2: ; RV32I-NEXT: ret ; ; RV64I-LABEL: setune_32: ; RV64I: # %bb.0: ; RV64I-NEXT: feq.s a2, fa0, fa1 ; RV64I-NEXT: beqz a2, .LBB58_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: .LBB58_2: ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: setune_32: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: feq.s a2, fa0, fa1 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a2 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: setune_32: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: feq.s a2, fa0, fa1 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, a1, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: setune_32: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: feq.s a2, fa0, fa1 ; RV32ZICOND-NEXT: czero.eqz a1, a1, a2 ; RV32ZICOND-NEXT: czero.nez a0, a0, a2 ; RV32ZICOND-NEXT: or a0, a0, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: setune_32: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: feq.s a2, fa0, fa1 ; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 ; RV64ZICOND-NEXT: czero.nez a0, a0, a2 ; RV64ZICOND-NEXT: or a0, a0, a1 ; RV64ZICOND-NEXT: ret %rc = fcmp une float %a, %b %sel = select i1 %rc, i32 %rs1, i32 %rs2 ret i32 %sel } define i64 @setune_64(float %a, float %b, i64 %rs1, i64 %rs2) { ; RV32I-LABEL: setune_64: ; RV32I: # %bb.0: ; RV32I-NEXT: feq.s a4, fa0, fa1 ; RV32I-NEXT: beqz a4, .LBB59_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: mv a1, a3 ; RV32I-NEXT: .LBB59_2: ; RV32I-NEXT: ret ; ; RV64I-LABEL: setune_64: ; RV64I: # %bb.0: ; RV64I-NEXT: feq.s a2, fa0, fa1 ; RV64I-NEXT: beqz a2, .LBB59_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: .LBB59_2: ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: setune_64: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: feq.s a2, fa0, fa1 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a2 ; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: setune_64: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: feq.s a2, fa0, fa1 ; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, a1, a2 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: setune_64: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: feq.s a4, fa0, fa1 ; RV32ZICOND-NEXT: czero.eqz a2, a2, a4 ; RV32ZICOND-NEXT: czero.nez a0, a0, a4 ; RV32ZICOND-NEXT: or a0, a0, a2 ; RV32ZICOND-NEXT: czero.eqz a2, a3, a4 ; RV32ZICOND-NEXT: czero.nez a1, a1, a4 ; RV32ZICOND-NEXT: or a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: setune_64: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: feq.s a2, fa0, fa1 ; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 ; RV64ZICOND-NEXT: czero.nez a0, a0, a2 ; RV64ZICOND-NEXT: or a0, a0, a1 ; RV64ZICOND-NEXT: ret %rc = fcmp une float %a, %b %sel = select i1 %rc, i64 %rs1, i64 %rs2 ret i64 %sel } ; Test that we can ComputeNumSignBits across basic blocks when the live out is ; RISCVISD::SELECT_CC. There should be no slli+srai or sext.h in the output. define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2, i16 signext %3) nounwind { ; RV32I-LABEL: numsignbits: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a3 ; RV32I-NEXT: beqz a0, .LBB60_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: .LBB60_2: ; RV32I-NEXT: beqz a1, .LBB60_4 ; RV32I-NEXT: # %bb.3: ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call bat@plt ; RV32I-NEXT: .LBB60_4: ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: numsignbits: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a3 ; RV64I-NEXT: beqz a0, .LBB60_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv s0, a2 ; RV64I-NEXT: .LBB60_2: ; RV64I-NEXT: beqz a1, .LBB60_4 ; RV64I-NEXT: # %bb.3: ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call bat@plt ; RV64I-NEXT: .LBB60_4: ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: numsignbits: ; RV64XVENTANACONDOPS: # %bb.0: ; RV64XVENTANACONDOPS-NEXT: addi sp, sp, -16 ; RV64XVENTANACONDOPS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64XVENTANACONDOPS-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; RV64XVENTANACONDOPS-NEXT: vt.maskc a2, a2, a0 ; RV64XVENTANACONDOPS-NEXT: vt.maskcn s0, a3, a0 ; RV64XVENTANACONDOPS-NEXT: or s0, s0, a2 ; RV64XVENTANACONDOPS-NEXT: beqz a1, .LBB60_2 ; RV64XVENTANACONDOPS-NEXT: # %bb.1: ; RV64XVENTANACONDOPS-NEXT: mv a0, s0 ; RV64XVENTANACONDOPS-NEXT: call bat@plt ; RV64XVENTANACONDOPS-NEXT: .LBB60_2: ; RV64XVENTANACONDOPS-NEXT: mv a0, s0 ; RV64XVENTANACONDOPS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64XVENTANACONDOPS-NEXT: ld s0, 0(sp) # 8-byte Folded Reload ; RV64XVENTANACONDOPS-NEXT: addi sp, sp, 16 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: numsignbits: ; RV64XTHEADCONDMOV: # %bb.0: ; RV64XTHEADCONDMOV-NEXT: addi sp, sp, -16 ; RV64XTHEADCONDMOV-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64XTHEADCONDMOV-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; RV64XTHEADCONDMOV-NEXT: mv s0, a2 ; RV64XTHEADCONDMOV-NEXT: th.mveqz s0, a3, a0 ; RV64XTHEADCONDMOV-NEXT: beqz a1, .LBB60_2 ; RV64XTHEADCONDMOV-NEXT: # %bb.1: ; RV64XTHEADCONDMOV-NEXT: mv a0, s0 ; RV64XTHEADCONDMOV-NEXT: call bat@plt ; RV64XTHEADCONDMOV-NEXT: .LBB60_2: ; RV64XTHEADCONDMOV-NEXT: mv a0, s0 ; RV64XTHEADCONDMOV-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64XTHEADCONDMOV-NEXT: ld s0, 0(sp) # 8-byte Folded Reload ; RV64XTHEADCONDMOV-NEXT: addi sp, sp, 16 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: numsignbits: ; RV32ZICOND: # %bb.0: ; RV32ZICOND-NEXT: addi sp, sp, -16 ; RV32ZICOND-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32ZICOND-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32ZICOND-NEXT: czero.eqz a2, a2, a0 ; RV32ZICOND-NEXT: czero.nez s0, a3, a0 ; RV32ZICOND-NEXT: or s0, s0, a2 ; RV32ZICOND-NEXT: beqz a1, .LBB60_2 ; RV32ZICOND-NEXT: # %bb.1: ; RV32ZICOND-NEXT: mv a0, s0 ; RV32ZICOND-NEXT: call bat@plt ; RV32ZICOND-NEXT: .LBB60_2: ; RV32ZICOND-NEXT: mv a0, s0 ; RV32ZICOND-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32ZICOND-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32ZICOND-NEXT: addi sp, sp, 16 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: numsignbits: ; RV64ZICOND: # %bb.0: ; RV64ZICOND-NEXT: addi sp, sp, -16 ; RV64ZICOND-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64ZICOND-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; RV64ZICOND-NEXT: czero.eqz a2, a2, a0 ; RV64ZICOND-NEXT: czero.nez s0, a3, a0 ; RV64ZICOND-NEXT: or s0, s0, a2 ; RV64ZICOND-NEXT: beqz a1, .LBB60_2 ; RV64ZICOND-NEXT: # %bb.1: ; RV64ZICOND-NEXT: mv a0, s0 ; RV64ZICOND-NEXT: call bat@plt ; RV64ZICOND-NEXT: .LBB60_2: ; RV64ZICOND-NEXT: mv a0, s0 ; RV64ZICOND-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64ZICOND-NEXT: ld s0, 0(sp) # 8-byte Folded Reload ; RV64ZICOND-NEXT: addi sp, sp, 16 ; RV64ZICOND-NEXT: ret %5 = icmp eq i16 %0, 0 %6 = select i1 %5, i16 %3, i16 %2 %7 = icmp eq i16 %1, 0 br i1 %7, label %9, label %8 8: ; preds = %4 tail call void @bat(i16 signext %6) br label %9 9: ; preds = %8, %4 ret i16 %6 } declare void @bat(i16 signext) define i64 @single_bit(i64 %x) { ; RV32I-LABEL: single_bit: ; RV32I: # %bb.0: # %entry ; RV32I-NEXT: slli a2, a0, 21 ; RV32I-NEXT: srai a2, a2, 31 ; RV32I-NEXT: and a0, a2, a0 ; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: single_bit: ; RV64I: # %bb.0: # %entry ; RV64I-NEXT: slli a1, a0, 53 ; RV64I-NEXT: srai a1, a1, 63 ; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: single_bit: ; RV64XVENTANACONDOPS: # %bb.0: # %entry ; RV64XVENTANACONDOPS-NEXT: andi a1, a0, 1024 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: single_bit: ; RV64XTHEADCONDMOV: # %bb.0: # %entry ; RV64XTHEADCONDMOV-NEXT: slli a1, a0, 53 ; RV64XTHEADCONDMOV-NEXT: srai a1, a1, 63 ; RV64XTHEADCONDMOV-NEXT: and a0, a1, a0 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: single_bit: ; RV32ZICOND: # %bb.0: # %entry ; RV32ZICOND-NEXT: andi a2, a0, 1024 ; RV32ZICOND-NEXT: czero.eqz a0, a0, a2 ; RV32ZICOND-NEXT: czero.eqz a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: single_bit: ; RV64ZICOND: # %bb.0: # %entry ; RV64ZICOND-NEXT: andi a1, a0, 1024 ; RV64ZICOND-NEXT: czero.eqz a0, a0, a1 ; RV64ZICOND-NEXT: ret entry: %and = and i64 %x, 1024 %tobool.not = icmp eq i64 %and, 0 %cond = select i1 %tobool.not, i64 0, i64 %x ret i64 %cond } ; Test to fold select with single bit check to (and (sra (shl x))). define i64 @single_bit2(i64 %x) { ; RV32I-LABEL: single_bit2: ; RV32I: # %bb.0: # %entry ; RV32I-NEXT: slli a2, a0, 20 ; RV32I-NEXT: srai a2, a2, 31 ; RV32I-NEXT: and a0, a2, a0 ; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: single_bit2: ; RV64I: # %bb.0: # %entry ; RV64I-NEXT: slli a1, a0, 52 ; RV64I-NEXT: srai a1, a1, 63 ; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64XVENTANACONDOPS-LABEL: single_bit2: ; RV64XVENTANACONDOPS: # %bb.0: # %entry ; RV64XVENTANACONDOPS-NEXT: bexti a1, a0, 11 ; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a0, a1 ; RV64XVENTANACONDOPS-NEXT: ret ; ; RV64XTHEADCONDMOV-LABEL: single_bit2: ; RV64XTHEADCONDMOV: # %bb.0: # %entry ; RV64XTHEADCONDMOV-NEXT: slli a1, a0, 52 ; RV64XTHEADCONDMOV-NEXT: srai a1, a1, 63 ; RV64XTHEADCONDMOV-NEXT: and a0, a1, a0 ; RV64XTHEADCONDMOV-NEXT: ret ; ; RV32ZICOND-LABEL: single_bit2: ; RV32ZICOND: # %bb.0: # %entry ; RV32ZICOND-NEXT: bexti a2, a0, 11 ; RV32ZICOND-NEXT: czero.eqz a0, a0, a2 ; RV32ZICOND-NEXT: czero.eqz a1, a1, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: single_bit2: ; RV64ZICOND: # %bb.0: # %entry ; RV64ZICOND-NEXT: bexti a1, a0, 11 ; RV64ZICOND-NEXT: czero.eqz a0, a0, a1 ; RV64ZICOND-NEXT: ret entry: %and = and i64 %x, 2048 %tobool.not = icmp eq i64 %and, 0 %cond = select i1 %tobool.not, i64 0, i64 %x ret i64 %cond }