aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAmara Emerson <aemerson@apple.com>2019-08-14 21:30:30 +0000
committerAmara Emerson <aemerson@apple.com>2019-08-14 21:30:30 +0000
commit1222cfd5fee3db3d78f38cdebf11e67baffa1989 (patch)
treeb92c8044fefc0ce64d773621efbe7f3f8936e9f8
parentdef9928204e76b33cac8a98d54e995b7a6bb145d (diff)
downloadllvm-1222cfd5fee3db3d78f38cdebf11e67baffa1989.zip
llvm-1222cfd5fee3db3d78f38cdebf11e67baffa1989.tar.gz
llvm-1222cfd5fee3db3d78f38cdebf11e67baffa1989.tar.bz2
[AArch64][GlobalISel] Custom selection for s8 load acquire.
Implement this single atomic load instruction so that we can compile stack protector code. Differential Revision: https://reviews.llvm.org/D66245 llvm-svn: 368923
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp9
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir37
2 files changed, 45 insertions, 1 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
index f594200..88d522e 100644
--- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -1740,7 +1740,14 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
auto &MemOp = **I.memoperands_begin();
if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
- LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
+ // For now we just support s8 acquire loads to be able to compile stack
+ // protector code.
+ if (MemOp.getOrdering() == AtomicOrdering::Acquire &&
+ MemOp.getSize() == 1) {
+ I.setDesc(TII.get(AArch64::LDARB));
+ return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
+ }
+ LLVM_DEBUG(dbgs() << "Atomic load/store not fully supported yet\n");
return false;
}
unsigned MemSizeInBits = MemOp.getSize() * 8;
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir
new file mode 100644
index 0000000..516e0fb
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir
@@ -0,0 +1,37 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+--- |
+ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+ target triple = "aarch64"
+
+ define i8 @load_acq_i8(i8* %ptr) {
+ %v = load atomic i8, i8* %ptr acquire, align 8
+ ret i8 %v
+ }
+
+...
+---
+name: load_acq_i8
+alignment: 2
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+liveins:
+ - { reg: '$x0' }
+machineFunctionInfo: {}
+body: |
+ bb.1:
+ liveins: $x0
+
+ ; CHECK-LABEL: name: load_acq_i8
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK: [[LDARB:%[0-9]+]]:gpr32 = LDARB [[COPY]] :: (load acquire 1 from %ir.ptr, align 8)
+ ; CHECK: $w0 = COPY [[LDARB]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:gpr(p0) = COPY $x0
+ %2:gpr(s32) = G_LOAD %0(p0) :: (load acquire 1 from %ir.ptr, align 8)
+ $w0 = COPY %2(s32)
+ RET_ReallyLR implicit $w0
+
+...