diff options
| author | Konstantin Zhuravlyov <kzhuravl_dev@outlook.com> | 2016-06-25 03:11:28 +0000 |
|---|---|---|
| committer | Konstantin Zhuravlyov <kzhuravl_dev@outlook.com> | 2016-06-25 03:11:28 +0000 |
| commit | f2f3d14774e6019048dccd1ecbca340ae5bbfca1 (patch) | |
| tree | 65f692e2c2223031bede6426b5f4d1e6646d5655 /llvm/lib/Target/AMDGPU/SIFrameLowering.cpp | |
| parent | 92d33bd2af3452b7272f8d27a75c0e344ae20f38 (diff) | |
| download | llvm-f2f3d14774e6019048dccd1ecbca340ae5bbfca1.zip llvm-f2f3d14774e6019048dccd1ecbca340ae5bbfca1.tar.gz llvm-f2f3d14774e6019048dccd1ecbca340ae5bbfca1.tar.bz2 | |
[AMDGPU] Emit debugger prologue and emit the rest of the debugger fields in the kernel code header
Debugger prologue is emitted if -mattr=+amdgpu-debugger-emit-prologue.
Debugger prologue writes work group IDs and work item IDs to scratch memory at fixed location in the following format:
- offset 0: work group ID x
- offset 4: work group ID y
- offset 8: work group ID z
- offset 16: work item ID x
- offset 20: work item ID y
- offset 24: work item ID z
Set
- amd_kernel_code_t::debug_wavefront_private_segment_offset_sgpr to scratch wave offset reg
- amd_kernel_code_t::debug_private_segment_buffer_sgpr to scratch rsrc reg
- amd_kernel_code_t::is_debug_supported to true if all debugger features are enabled
Differential Revision: http://reviews.llvm.org/D20335
llvm-svn: 273769
Diffstat (limited to 'llvm/lib/Target/AMDGPU/SIFrameLowering.cpp')
| -rw-r--r-- | llvm/lib/Target/AMDGPU/SIFrameLowering.cpp | 50 |
1 files changed, 49 insertions, 1 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp index 1f3b361..03b11f0 100644 --- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp @@ -39,6 +39,12 @@ static ArrayRef<MCPhysReg> getAllSGPRs() { void SIFrameLowering::emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const { + // Emit debugger prologue if "amdgpu-debugger-emit-prologue" attribute was + // specified. + const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); + if (ST.debuggerEmitPrologue()) + emitDebuggerPrologue(MF, MBB); + if (!MF.getFrameInfo()->hasStackObjects()) return; @@ -54,7 +60,6 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF, if (hasOnlySGPRSpills(MFI, MF.getFrameInfo())) return; - const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); const SIInstrInfo *TII = ST.getInstrInfo(); const SIRegisterInfo *TRI = &TII->getRegisterInfo(); MachineRegisterInfo &MRI = MF.getRegInfo(); @@ -87,6 +92,8 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF, // pointer. Because we only detect if flat instructions are used at all, // this will be used more often than necessary on VI. + // Debug location must be unknown since the first debug location is used to + // determine the end of the prologue. DebugLoc DL; unsigned FlatScratchInitReg @@ -289,3 +296,44 @@ void SIFrameLowering::processFunctionBeforeFrameFinalized( RS->addScavengingFrameIndex(ScavengeFI); } } + +void SIFrameLowering::emitDebuggerPrologue(MachineFunction &MF, + MachineBasicBlock &MBB) const { + const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); + const SIInstrInfo *TII = ST.getInstrInfo(); + const SIRegisterInfo *TRI = &TII->getRegisterInfo(); + const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); + + MachineBasicBlock::iterator I = MBB.begin(); + DebugLoc DL; + + // For each dimension: + for (unsigned i = 0; i < 3; ++i) { + // Get work group ID SGPR, and make it live-in again. + unsigned WorkGroupIDSGPR = MFI->getWorkGroupIDSGPR(i); + MF.getRegInfo().addLiveIn(WorkGroupIDSGPR); + MBB.addLiveIn(WorkGroupIDSGPR); + + // Since SGPRs are spilled into VGPRs, copy work group ID SGPR to VGPR in + // order to spill it to scratch. + unsigned WorkGroupIDVGPR = + MF.getRegInfo().createVirtualRegister(&AMDGPU::VGPR_32RegClass); + BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), WorkGroupIDVGPR) + .addReg(WorkGroupIDSGPR); + + // Spill work group ID. + int WorkGroupIDObjectIdx = MFI->getDebuggerWorkGroupIDStackObjectIndex(i); + TII->storeRegToStackSlot(MBB, I, WorkGroupIDVGPR, false, + WorkGroupIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI); + + // Get work item ID VGPR, and make it live-in again. + unsigned WorkItemIDVGPR = MFI->getWorkItemIDVGPR(i); + MF.getRegInfo().addLiveIn(WorkItemIDVGPR); + MBB.addLiveIn(WorkItemIDVGPR); + + // Spill work item ID. + int WorkItemIDObjectIdx = MFI->getDebuggerWorkItemIDStackObjectIndex(i); + TII->storeRegToStackSlot(MBB, I, WorkItemIDVGPR, false, + WorkItemIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI); + } +} |
