diff options
author | Hank Chang <hank.chang@sifive.com> | 2025-03-13 15:07:07 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2025-03-13 15:07:07 +0800 |
commit | bd748b33958f8889d280afd4396b189edd0745bf (patch) | |
tree | dcf6085c88ff1dfa9447f69dc1227d4e156a8c46 /llvm | |
parent | 6345b009c3e58a6cd0eca835d5a935f8784cfda6 (diff) | |
download | llvm-bd748b33958f8889d280afd4396b189edd0745bf.zip llvm-bd748b33958f8889d280afd4396b189edd0745bf.tar.gz llvm-bd748b33958f8889d280afd4396b189edd0745bf.tar.bz2 |
[RISCV] Add implicit operand {VL, VTYPE} in RISCVInsertVSETVLI when u… (#130733)
…sing inline assembly.
Fixing [#128636](https://github.com/llvm/llvm-project/pull/128636).
This patch has RISCVInsertVSETVLI to add implicit use operand to inline
assembly, this approach is suggested by @preames and the implementation
I referenced is from @topperc . The purpose of adding vl, vtype implicit
operand is to prevent Post-RA scheduler moving vsetvl across inline
assembly.
Diffstat (limited to 'llvm')
-rw-r--r-- | llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 7 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/vsetvl-cross-inline-asm.ll | 28 |
2 files changed, 35 insertions, 0 deletions
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index 7433603..2247610 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -1531,6 +1531,13 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) { /*isImp*/ true)); } + if (MI.isInlineAsm()) { + MI.addOperand(MachineOperand::CreateReg(RISCV::VL, /*isDef*/ true, + /*isImp*/ true)); + MI.addOperand(MachineOperand::CreateReg(RISCV::VTYPE, /*isDef*/ true, + /*isImp*/ true)); + } + if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VL, /*TRI=*/nullptr) || MI.modifiesRegister(RISCV::VTYPE, /*TRI=*/nullptr)) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvl-cross-inline-asm.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvl-cross-inline-asm.ll new file mode 100644 index 0000000..f2b566e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvl-cross-inline-asm.ll @@ -0,0 +1,28 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -verify-machineinstrs < %s | FileCheck %s + +define void @foo(<vscale x 8 x half> %0) { +; CHECK-LABEL: foo: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: lui a0, 1 +; CHECK-NEXT: addiw a0, a0, -1096 +; CHECK-NEXT: vmv.v.i v11, 0 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: #APP +; CHECK-NEXT: vfmadd.vv v11, v10, v10 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: #APP +; CHECK-NEXT: vfmadd.vv v11, v10, v10 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vse16.v v8, (zero) +; CHECK-NEXT: ret +entry: + %2 = tail call i64 @llvm.riscv.vsetvli.i64(i64 3000, i64 0, i64 0) + %3 = tail call <vscale x 2 x float> asm sideeffect "vfmadd.vv $0, $1, $2", "=^vr,^vr,^vr,0"(<vscale x 2 x float> zeroinitializer, <vscale x 2 x float> zeroinitializer, <vscale x 2 x float> zeroinitializer) + %4 = tail call <vscale x 2 x float> asm sideeffect "vfmadd.vv $0, $1, $2", "=^vr,^vr,^vr,0"(<vscale x 2 x float> zeroinitializer, <vscale x 2 x float> zeroinitializer, <vscale x 2 x float> %3) + tail call void @llvm.riscv.vse.nxv8f16.i64(<vscale x 8 x half> %0, ptr null, i64 %2) + ret void +} |