aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2019-06-05 21:02:10 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2019-06-05 21:02:10 +0000
commitc8af2415480e5562c953b2ed76584e60f55a660f (patch)
tree93a54d8aa441db18bbfa5fc9556f75fcbab67618
parentb73bafaff701aaabc18fe3d28a6afecb211cf485 (diff)
downloadllvm-c8af2415480e5562c953b2ed76584e60f55a660f.zip
llvm-c8af2415480e5562c953b2ed76584e60f55a660f.tar.gz
llvm-c8af2415480e5562c953b2ed76584e60f55a660f.tar.bz2
Merging r359898:
------------------------------------------------------------------------ r359898 | arsenm | 2019-05-03 08:21:53 -0700 (Fri, 03 May 2019) | 3 lines AMDGPU: Support shrinking add with FI in SIFoldOperands Avoids test regression in a future patch ------------------------------------------------------------------------ llvm-svn: 362648
-rw-r--r--llvm/lib/Target/AMDGPU/SIFoldOperands.cpp72
-rw-r--r--llvm/test/CodeGen/AMDGPU/fold-fi-operand-shrink.mir230
2 files changed, 267 insertions, 35 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 50a109d..d679abd 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -201,53 +201,55 @@ static bool updateOperand(FoldCandidate &Fold,
Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
}
}
+ }
- if (Fold.needsShrink()) {
- MachineBasicBlock *MBB = MI->getParent();
- auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI);
- if (Liveness != MachineBasicBlock::LQR_Dead)
- return false;
+ if ((Fold.isImm() || Fold.isFI()) && Fold.needsShrink()) {
+ MachineBasicBlock *MBB = MI->getParent();
+ auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI);
+ if (Liveness != MachineBasicBlock::LQR_Dead)
+ return false;
- MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
- int Op32 = Fold.getShrinkOpcode();
- MachineOperand &Dst0 = MI->getOperand(0);
- MachineOperand &Dst1 = MI->getOperand(1);
- assert(Dst0.isDef() && Dst1.isDef());
+ MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+ int Op32 = Fold.getShrinkOpcode();
+ MachineOperand &Dst0 = MI->getOperand(0);
+ MachineOperand &Dst1 = MI->getOperand(1);
+ assert(Dst0.isDef() && Dst1.isDef());
- bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
+ bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
- const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
- unsigned NewReg0 = MRI.createVirtualRegister(Dst0RC);
+ const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
+ unsigned NewReg0 = MRI.createVirtualRegister(Dst0RC);
- MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
+ MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
- if (HaveNonDbgCarryUse) {
- BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
- .addReg(AMDGPU::VCC, RegState::Kill);
- }
-
- // Keep the old instruction around to avoid breaking iterators, but
- // replace it with a dummy instruction to remove uses.
- //
- // FIXME: We should not invert how this pass looks at operands to avoid
- // this. Should track set of foldable movs instead of looking for uses
- // when looking at a use.
- Dst0.setReg(NewReg0);
- for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
- MI->RemoveOperand(I);
- MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
-
- if (Fold.isCommuted())
- TII.commuteInstruction(*Inst32, false);
- return true;
+ if (HaveNonDbgCarryUse) {
+ BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
+ .addReg(AMDGPU::VCC, RegState::Kill);
}
- Old.ChangeToImmediate(Fold.ImmToFold);
+ // Keep the old instruction around to avoid breaking iterators, but
+ // replace it with a dummy instruction to remove uses.
+ //
+ // FIXME: We should not invert how this pass looks at operands to avoid
+ // this. Should track set of foldable movs instead of looking for uses
+ // when looking at a use.
+ Dst0.setReg(NewReg0);
+ for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
+ MI->RemoveOperand(I);
+ MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
+
+ if (Fold.isCommuted())
+ TII.commuteInstruction(*Inst32, false);
return true;
}
assert(!Fold.needsShrink() && "not handled");
+ if (Fold.isImm()) {
+ Old.ChangeToImmediate(Fold.ImmToFold);
+ return true;
+ }
+
if (Fold.isFI()) {
Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
return true;
@@ -348,7 +350,7 @@ static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
if ((Opc == AMDGPU::V_ADD_I32_e64 ||
Opc == AMDGPU::V_SUB_I32_e64 ||
Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME
- OpToFold->isImm()) {
+ (OpToFold->isImm() || OpToFold->isFI())) {
MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
// Verify the other operand is a VGPR, otherwise we would violate the
diff --git a/llvm/test/CodeGen/AMDGPU/fold-fi-operand-shrink.mir b/llvm/test/CodeGen/AMDGPU/fold-fi-operand-shrink.mir
new file mode 100644
index 0000000..ab54466
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fold-fi-operand-shrink.mir
@@ -0,0 +1,230 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -run-pass si-fold-operands,dead-mi-elimination %s -o - | FileCheck -check-prefix=GCN %s
+
+---
+
+# First operand is FI is in a VGPR, other operand is a VGPR
+name: shrink_vgpr_fi_vgpr_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: shrink_vgpr_fi_vgpr_v_add_i32_e64_no_carry_out_use
+ ; GCN: liveins: $vgpr0
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 [[V_MOV_B32_e32_]], [[COPY]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %1:vgpr_32 = COPY $vgpr0
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is a VGPR, other operand FI is in a VGPR
+name: shrink_vgpr_vgpr_fi_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: shrink_vgpr_vgpr_fi_v_add_i32_e64_no_carry_out_use
+ ; GCN: liveins: $vgpr0
+ ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 [[COPY]], [[V_MOV_B32_e32_]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:vgpr_32 = COPY $vgpr0
+ %1:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is FI is in an SGPR, other operand is a VGPR
+name: shrink_vgpr_fi_sgpr_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+ liveins: $sgpr0
+
+ ; GCN-LABEL: name: shrink_vgpr_fi_sgpr_v_add_i32_e64_no_carry_out_use
+ ; GCN: liveins: $sgpr0
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
+ ; GCN: [[V_ADD_I32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_I32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_I32_e64 [[COPY]], [[V_MOV_B32_e32_]], implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e64_]]
+ %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %1:sreg_32_xm0 = COPY $sgpr0
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is an SGPR, other operand FI is in a VGPR
+name: shrink_sgpr_vgpr_fi_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+ liveins: $sgpr0
+
+ ; GCN-LABEL: name: shrink_sgpr_vgpr_fi_v_add_i32_e64_no_carry_out_use
+ ; GCN: liveins: $sgpr0
+ ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ ; GCN: [[V_ADD_I32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_I32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_I32_e64 [[V_MOV_B32_e32_]], [[COPY]], implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e64_]]
+ %0:sreg_32_xm0 = COPY $sgpr0
+ %1:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is FI is in an SGPR, other operand is a VGPR
+name: shrink_sgpr_fi_vgpr_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: shrink_sgpr_fi_vgpr_v_add_i32_e64_no_carry_out_use
+ ; GCN: liveins: $vgpr0
+ ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 %stack.0
+ ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 [[S_MOV_B32_]], [[COPY]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:sreg_32_xm0 = S_MOV_B32 %stack.0
+ %1:vgpr_32 = COPY $vgpr0
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is a VGPR, other operand FI is in an SGPR
+name: shrink_vgpr_sgpr_fi_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16}
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: shrink_vgpr_sgpr_fi_v_add_i32_e64_no_carry_out_use
+ ; GCN: liveins: $vgpr0
+ ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 %stack.0
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 [[S_MOV_B32_]], [[COPY]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:vgpr_32 = COPY $vgpr0
+ %1:sreg_32_xm0 = S_MOV_B32 %stack.0
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is FI is in a VGPR, other operand is an inline imm in a VGPR
+name: shrink_vgpr_imm_fi_vgpr_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+
+ ; GCN-LABEL: name: shrink_vgpr_imm_fi_vgpr_v_add_i32_e64_no_carry_out_use
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 16, [[V_MOV_B32_e32_]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %1:vgpr_32 = V_MOV_B32_e32 16, implicit $exec
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is an inline imm in a VGPR, other operand FI is in a VGPR
+name: shrink_vgpr_imm_vgpr_fi_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+
+ ; GCN-LABEL: name: shrink_vgpr_imm_vgpr_fi_v_add_i32_e64_no_carry_out_use
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ ; GCN: [[V_ADD_I32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_I32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_I32_e64 16, [[V_MOV_B32_e32_]], implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e64_]]
+ %0:vgpr_32 = V_MOV_B32_e32 16, implicit $exec
+ %1:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is FI is in a VGPR, other operand is an literal constant in a VGPR
+name: shrink_vgpr_k_fi_vgpr_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+
+ ; GCN-LABEL: name: shrink_vgpr_k_fi_vgpr_v_add_i32_e64_no_carry_out_use
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 1234, [[V_MOV_B32_e32_]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %1:vgpr_32 = V_MOV_B32_e32 1234, implicit $exec
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...
+
+---
+
+# First operand is a literal constant in a VGPR, other operand FI is in a VGPR
+name: shrink_vgpr_k_vgpr_fi_v_add_i32_e64_no_carry_out_use
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, offset: 0, size: 64, alignment: 16 }
+body: |
+ bb.0:
+
+ ; GCN-LABEL: name: shrink_vgpr_k_vgpr_fi_v_add_i32_e64_no_carry_out_use
+ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1234, implicit $exec
+ ; GCN: [[V_ADD_I32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_I32_e32 %stack.0, [[V_MOV_B32_e32_]], implicit-def $vcc, implicit $exec
+ ; GCN: S_ENDPGM implicit [[V_ADD_I32_e32_]]
+ %0:vgpr_32 = V_MOV_B32_e32 1234, implicit $exec
+ %1:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
+ %2:vgpr_32, %3:sreg_64 = V_ADD_I32_e64 %0, %1, implicit $exec
+ S_ENDPGM implicit %2
+
+...