diff options
author | Craig Topper <craig.topper@sifive.com> | 2024-08-20 16:20:20 -0700 |
---|---|---|
committer | Craig Topper <craig.topper@sifive.com> | 2024-08-20 16:20:25 -0700 |
commit | a16f0dc9c2f0690e28622b0d80bd154fb0e6a30a (patch) | |
tree | 84d2b23071745317ad814f12c617db9beadd3db8 | |
parent | 4a4b233f35adaed44e50157db3846d0d23f2f6e1 (diff) | |
download | llvm-a16f0dc9c2f0690e28622b0d80bd154fb0e6a30a.zip llvm-a16f0dc9c2f0690e28622b0d80bd154fb0e6a30a.tar.gz llvm-a16f0dc9c2f0690e28622b0d80bd154fb0e6a30a.tar.bz2 |
[RISCV][GISel] Allow >2*XLen integers in isSupportedReturnType.
3 files changed, 117 insertions, 1 deletions
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp index f7fa0e1..f46264a 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp @@ -363,7 +363,7 @@ static bool isSupportedReturnType(Type *T, const RISCVSubtarget &Subtarget, // TODO: Integers larger than 2*XLen are passed indirectly which is not // supported yet. if (T->isIntegerTy()) - return T->getIntegerBitWidth() <= Subtarget.getXLen() * 2; + return true; if (T->isHalfTy() || T->isFloatTy() || T->isDoubleTy()) return true; if (T->isPointerTy()) diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll index 5ca1bf7..57ff26a 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll @@ -886,6 +886,64 @@ define i32 @caller_small_scalar_ret() nounwind { ret i32 %3 } +; Check return of >2x xlen scalars + +define i128 @callee_large_scalar_ret() nounwind { + ; RV32I-LABEL: name: callee_large_scalar_ret + ; RV32I: bb.1 (%ir-block.0): + ; RV32I-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 1234567898765432123456789 + ; RV32I-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0 + ; RV32I-NEXT: G_STORE [[C]](s128), [[FRAME_INDEX]](p0) :: (store (s128) into %stack.0, align 8) + ; RV32I-NEXT: $x10 = COPY [[FRAME_INDEX]](p0) + ; RV32I-NEXT: PseudoRET implicit $x10 + ret i128 1234567898765432123456789 +} + +define i32 @caller_large_scalar_ret() nounwind { + ; ILP32-LABEL: name: caller_large_scalar_ret + ; ILP32: bb.1 (%ir-block.0): + ; ILP32-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 9876543212345678987654321 + ; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 + ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @callee_large_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10 + ; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 + ; ILP32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; ILP32-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load (s128), align 8) + ; ILP32-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[C]](s128), [[LOAD]] + ; ILP32-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1) + ; ILP32-NEXT: $x10 = COPY [[ZEXT]](s32) + ; ILP32-NEXT: PseudoRET implicit $x10 + ; + ; ILP32F-LABEL: name: caller_large_scalar_ret + ; ILP32F: bb.1 (%ir-block.0): + ; ILP32F-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 9876543212345678987654321 + ; ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 + ; ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @callee_large_scalar_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10 + ; ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 + ; ILP32F-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; ILP32F-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load (s128), align 8) + ; ILP32F-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[C]](s128), [[LOAD]] + ; ILP32F-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1) + ; ILP32F-NEXT: $x10 = COPY [[ZEXT]](s32) + ; ILP32F-NEXT: PseudoRET implicit $x10 + ; + ; ILP32D-LABEL: name: caller_large_scalar_ret + ; ILP32D: bb.1 (%ir-block.0): + ; ILP32D-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 9876543212345678987654321 + ; ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 + ; ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_large_scalar_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $x10 + ; ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 + ; ILP32D-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; ILP32D-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load (s128), align 8) + ; ILP32D-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[C]](s128), [[LOAD]] + ; ILP32D-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1) + ; ILP32D-NEXT: $x10 = COPY [[ZEXT]](s32) + ; ILP32D-NEXT: PseudoRET implicit $x10 + %1 = call i128 @callee_large_scalar_ret() + %2 = icmp eq i128 9876543212345678987654321, %1 + %3 = zext i1 %2 to i32 + ret i32 %3 +} + ; Check return of 2x xlen structs %struct.small = type { i32, ptr } diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll index 2499f8c..088cac9 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll @@ -541,6 +541,64 @@ define i64 @caller_small_scalar_ret() nounwind { ret i64 %3 } +; Check return of >2x xlen scalars + +define i256 @callee_large_scalar_ret() nounwind { + ; RV64I-LABEL: name: callee_large_scalar_ret + ; RV64I: bb.1 (%ir-block.0): + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s256) = G_CONSTANT i256 -1 + ; RV64I-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0 + ; RV64I-NEXT: G_STORE [[C]](s256), [[FRAME_INDEX]](p0) :: (store (s256) into %stack.0, align 16) + ; RV64I-NEXT: $x10 = COPY [[FRAME_INDEX]](p0) + ; RV64I-NEXT: PseudoRET implicit $x10 + ret i256 -1 +} + +define i64 @caller_large_scalar_ret() nounwind { + ; LP64-LABEL: name: caller_large_scalar_ret + ; LP64: bb.1 (%ir-block.0): + ; LP64-NEXT: [[C:%[0-9]+]]:_(s256) = G_CONSTANT i256 -2 + ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 + ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10 + ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 + ; LP64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; LP64-NEXT: [[LOAD:%[0-9]+]]:_(s256) = G_LOAD [[COPY]](p0) :: (load (s256), align 16) + ; LP64-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[C]](s256), [[LOAD]] + ; LP64-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ICMP]](s1) + ; LP64-NEXT: $x10 = COPY [[ZEXT]](s64) + ; LP64-NEXT: PseudoRET implicit $x10 + ; + ; LP64F-LABEL: name: caller_large_scalar_ret + ; LP64F: bb.1 (%ir-block.0): + ; LP64F-NEXT: [[C:%[0-9]+]]:_(s256) = G_CONSTANT i256 -2 + ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 + ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10 + ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 + ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; LP64F-NEXT: [[LOAD:%[0-9]+]]:_(s256) = G_LOAD [[COPY]](p0) :: (load (s256), align 16) + ; LP64F-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[C]](s256), [[LOAD]] + ; LP64F-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ICMP]](s1) + ; LP64F-NEXT: $x10 = COPY [[ZEXT]](s64) + ; LP64F-NEXT: PseudoRET implicit $x10 + ; + ; LP64D-LABEL: name: caller_large_scalar_ret + ; LP64D: bb.1 (%ir-block.0): + ; LP64D-NEXT: [[C:%[0-9]+]]:_(s256) = G_CONSTANT i256 -2 + ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 + ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $x10 + ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 + ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; LP64D-NEXT: [[LOAD:%[0-9]+]]:_(s256) = G_LOAD [[COPY]](p0) :: (load (s256), align 16) + ; LP64D-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[C]](s256), [[LOAD]] + ; LP64D-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ICMP]](s1) + ; LP64D-NEXT: $x10 = COPY [[ZEXT]](s64) + ; LP64D-NEXT: PseudoRET implicit $x10 + %1 = call i256 @callee_small_scalar_ret() + %2 = icmp eq i256 -2, %1 + %3 = zext i1 %2 to i64 + ret i64 %3 +} + ; Check return of 2x xlen structs %struct.small = type { i64, ptr } |