diff options
author | James Y Knight <jyknight@google.com> | 2023-12-18 16:51:06 -0500 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-12-18 16:51:06 -0500 |
commit | 137f785fa6a1abb1651a603e3ce5b0e1f00e5be4 (patch) | |
tree | cdd370b1ea7f0614211b2f76c57495fc264d1605 | |
parent | 83680f8c5388d76c3f5b15cc9ad565b28c86af35 (diff) | |
download | llvm-upstream/users/clementval/acc_device_type_none.zip llvm-upstream/users/clementval/acc_device_type_none.tar.gz llvm-upstream/users/clementval/acc_device_type_none.tar.bz2 |
[AMDGPU] Set MaxAtomicSizeInBitsSupported. (#75185)upstream/users/clementval/acc_device_type_none
This will result in larger atomic operations getting expanded to
`__atomic_*` libcalls via AtomicExpandPass, which matches what Clang
already does in the frontend.
While AMDGPU currently disables the use of all libcalls, I've changed it
to instead disable all of them _except_ the atomic ones. Those are
already be emitted by the Clang frontend, and enabling them in the
backend allows the same behavior there.
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 10 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/atomic-oversize.ll | 10 | ||||
-rw-r--r-- | llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll | 24 |
3 files changed, 28 insertions, 16 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index 9d74430..156a264 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -506,9 +506,11 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM, setOperationAction(ISD::SELECT, MVT::v12f32, Promote); AddPromotedToType(ISD::SELECT, MVT::v12f32, MVT::v12i32); - // There are no libcalls of any kind. - for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I) - setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr); + // Disable most libcalls. + for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I) { + if (I < RTLIB::ATOMIC_LOAD || I > RTLIB::ATOMIC_FETCH_NAND_16) + setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr); + } setSchedulingPreference(Sched::RegPressure); setJumpIsExpensive(true); @@ -556,6 +558,8 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM, ISD::FSUB, ISD::FNEG, ISD::FABS, ISD::AssertZext, ISD::AssertSext, ISD::INTRINSIC_WO_CHAIN}); + + setMaxAtomicSizeInBitsSupported(64); } bool AMDGPUTargetLowering::mayIgnoreSignedZero(SDValue Op) const { diff --git a/llvm/test/CodeGen/AMDGPU/atomic-oversize.ll b/llvm/test/CodeGen/AMDGPU/atomic-oversize.ll new file mode 100644 index 0000000..f62a93f --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/atomic-oversize.ll @@ -0,0 +1,10 @@ +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s + +define void @test(ptr %a) nounwind { +; CHECK-LABEL: test: +; CHECK: __atomic_load_16 +; CHECK: __atomic_store_16 + %1 = load atomic i128, ptr %a seq_cst, align 16 + store atomic i128 %1, ptr %a seq_cst, align 16 + ret void +} diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll index bdfd90d..6c84474 100644 --- a/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll +++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll @@ -1,15 +1,13 @@ -; RUN: not --crash opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s 2>&1 | FileCheck %s -; The AtomicExpand pass cannot handle missing libcalls (yet) so reports a fatal error. -; CHECK: LLVM ERROR: expandAtomicOpToLibcall shouldn't fail for Load +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s 2>&1 | FileCheck --check-prefix=GCN %s define i32 @atomic_load_global_align1(ptr addrspace(1) %ptr) { ; GCN-LABEL: @atomic_load_global_align1( ; GCN-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(1) [[PTR:%.*]] to ptr -; GCN-NEXT: [[TMP3:%.*]] = alloca i32, align 4 -; GCN-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP3]]) -; GCN-NEXT: call void @0(i64 4, ptr [[TMP2]], ptr [[TMP3]], i32 5) -; GCN-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4 -; GCN-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP3]]) +; GCN-NEXT: [[TMP3:%.*]] = alloca i32, align 4, addrspace(5) +; GCN-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[TMP3]]) +; GCN-NEXT: call void @__atomic_load(i64 4, ptr [[TMP2]], ptr addrspace(5) [[TMP3]], i32 5) +; GCN-NEXT: [[TMP5:%.*]] = load i32, ptr addrspace(5) [[TMP3]], align 4 +; GCN-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[TMP3]]) ; GCN-NEXT: ret i32 [[TMP5]] ; %val = load atomic i32, ptr addrspace(1) %ptr seq_cst, align 1 @@ -19,11 +17,11 @@ define i32 @atomic_load_global_align1(ptr addrspace(1) %ptr) { define void @atomic_store_global_align1(ptr addrspace(1) %ptr, i32 %val) { ; GCN-LABEL: @atomic_store_global_align1( ; GCN-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(1) [[PTR:%.*]] to ptr -; GCN-NEXT: [[TMP3:%.*]] = alloca i32, align 4 -; GCN-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP3]]) -; GCN-NEXT: store i32 [[VAL:%.*]], ptr [[TMP3]], align 4 -; GCN-NEXT: call void @1(i64 4, ptr [[TMP2]], ptr [[TMP3]], i32 0) -; GCN-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP3]]) +; GCN-NEXT: [[TMP3:%.*]] = alloca i32, align 4, addrspace(5) +; GCN-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[TMP3]]) +; GCN-NEXT: store i32 [[VAL:%.*]], ptr addrspace(5) [[TMP3]], align 4 +; GCN-NEXT: call void @__atomic_store(i64 4, ptr [[TMP2]], ptr addrspace(5) [[TMP3]], i32 0) +; GCN-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[TMP3]]) ; GCN-NEXT: ret void ; store atomic i32 %val, ptr addrspace(1) %ptr monotonic, align 1 |