aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBradley Smith <bradley.smith@arm.com>2016-01-15 10:26:17 +0000
committerBradley Smith <bradley.smith@arm.com>2016-01-15 10:26:17 +0000
commita1189106d5a1b9e9ff57ea6fa53c24e891f1d09c (patch)
tree8b948d68b54961bb5d8084f27d023833c2dbd172
parent519563e371e6d4294b1a62ffe444bf68d6267cc8 (diff)
downloadllvm-a1189106d5a1b9e9ff57ea6fa53c24e891f1d09c.zip
llvm-a1189106d5a1b9e9ff57ea6fa53c24e891f1d09c.tar.gz
llvm-a1189106d5a1b9e9ff57ea6fa53c24e891f1d09c.tar.bz2
[ARM] Add B.W and CBZ instructions to ARMv8-M Baseline
llvm-svn: 257881
-rw-r--r--llvm/lib/Target/ARM/ARMConstantIslandPass.cpp12
-rw-r--r--llvm/lib/Target/ARM/ARMInstrThumb2.td7
-rw-r--r--llvm/lib/Target/ARM/ARMSubtarget.cpp2
-rw-r--r--llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp7
-rw-r--r--llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp6
-rw-r--r--llvm/test/CodeGen/ARM/fast-tail-call.ll1
-rw-r--r--llvm/test/MC/ARM/thumb1-relax-8m-baseline.s10
-rw-r--r--llvm/test/MC/ARM/thumbv8m.s12
8 files changed, 46 insertions, 11 deletions
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index 55c1684..df168c7 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -478,10 +478,18 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
MadeChange = true;
}
- // Shrink 32-bit Thumb2 branch, load, and store instructions.
+ // Shrink 32-bit Thumb2 load and store instructions.
if (isThumb2 && !STI->prefers32BitThumb())
MadeChange |= optimizeThumb2Instructions();
+ // Shrink 32-bit branch instructions.
+ if (isThumb && STI->hasV8MBaselineOps())
+ MadeChange |= optimizeThumb2Branches();
+
+ // Optimize jump tables using TBB / TBH.
+ if (isThumb2)
+ MadeChange |= optimizeThumb2JumpTables();
+
// After a while, this might be made debug-only, but it is not expensive.
verify();
@@ -1852,8 +1860,6 @@ bool ARMConstantIslands::optimizeThumb2Instructions() {
}
}
- MadeChange |= optimizeThumb2Branches();
- MadeChange |= optimizeThumb2JumpTables();
return MadeChange;
}
diff --git a/llvm/lib/Target/ARM/ARMInstrThumb2.td b/llvm/lib/Target/ARM/ARMInstrThumb2.td
index 568cbfc..ddc1495 100644
--- a/llvm/lib/Target/ARM/ARMInstrThumb2.td
+++ b/llvm/lib/Target/ARM/ARMInstrThumb2.td
@@ -3530,7 +3530,8 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
let isPredicable = 1 in
def t2B : T2I<(outs), (ins uncondbrtarget:$target), IIC_Br,
"b", ".w\t$target",
- [(br bb:$target)]>, Sched<[WriteBr]> {
+ [(br bb:$target)]>, Sched<[WriteBr]>,
+ Requires<[IsThumb, HasV8MBaseline]> {
let Inst{31-27} = 0b11110;
let Inst{15-14} = 0b10;
let Inst{12} = 1;
@@ -3661,7 +3662,7 @@ let isBranch = 1, isTerminator = 1 in {
def tCBZ : T1I<(outs), (ins tGPR:$Rn, t_cbtarget:$target), IIC_Br,
"cbz\t$Rn, $target", []>,
T1Misc<{0,0,?,1,?,?,?}>,
- Requires<[IsThumb2]>, Sched<[WriteBr]> {
+ Requires<[IsThumb, HasV8MBaseline]>, Sched<[WriteBr]> {
// A8.6.27
bits<6> target;
bits<3> Rn;
@@ -3673,7 +3674,7 @@ let isBranch = 1, isTerminator = 1 in {
def tCBNZ : T1I<(outs), (ins tGPR:$Rn, t_cbtarget:$target), IIC_Br,
"cbnz\t$Rn, $target", []>,
T1Misc<{1,0,?,1,?,?,?}>,
- Requires<[IsThumb2]>, Sched<[WriteBr]> {
+ Requires<[IsThumb, HasV8MBaseline]>, Sched<[WriteBr]> {
// A8.6.27
bits<6> target;
bits<3> Rn;
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp
index 2a94116..44bba0c 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.cpp
+++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp
@@ -234,7 +234,7 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
// registers are the 4 used for parameters. We don't currently do this
// case.
- SupportsTailCall = !isThumb1Only();
+ SupportsTailCall = !isThumb() || hasV8MBaselineOps();
if (isTargetMachO() && isTargetIOS() && getTargetTriple().isOSVersionLT(5, 0))
SupportsTailCall = false;
diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index c69a741..5a63f04 100644
--- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -269,6 +269,9 @@ class ARMAsmParser : public MCTargetAsmParser {
bool hasV8Ops() const {
return getSTI().getFeatureBits()[ARM::HasV8Ops];
}
+ bool hasV8MBaseline() const {
+ return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
+ }
bool hasARM() const {
return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
}
@@ -4673,14 +4676,14 @@ void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
// classify tB as either t2B or t1B based on range of immediate operand
case ARM::tB: {
ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
- if (!op.isSignedOffset<11, 1>() && isThumbTwo())
+ if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
Inst.setOpcode(ARM::t2B);
break;
}
// classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
case ARM::tBcc: {
ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
- if (!op.isSignedOffset<8, 1>() && isThumbTwo())
+ if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
Inst.setOpcode(ARM::t2Bcc);
break;
}
diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index fa52c93..6101495 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -159,6 +159,7 @@ void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op) const {
bool HasThumb2 = STI->getFeatureBits()[ARM::FeatureThumb2];
+ bool HasV8MBaselineOps = STI->getFeatureBits()[ARM::HasV8MBaselineOps];
switch (Op) {
default:
@@ -170,7 +171,7 @@ unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op) const {
case ARM::tADR:
return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
case ARM::tB:
- return HasThumb2 ? (unsigned)ARM::t2B : Op;
+ return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
case ARM::tCBZ:
return ARM::tHINT;
case ARM::tCBNZ:
@@ -563,7 +564,8 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
}
case ARM::fixup_arm_thumb_br:
// Offset by 4 and don't encode the lower bit, which is always 0.
- if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2]) {
+ if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2] &&
+ !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
if (FixupDiagnostic) {
Ctx->reportError(Fixup.getLoc(), FixupDiagnostic);
diff --git a/llvm/test/CodeGen/ARM/fast-tail-call.ll b/llvm/test/CodeGen/ARM/fast-tail-call.ll
index 6472016..c93028b 100644
--- a/llvm/test/CodeGen/ARM/fast-tail-call.ll
+++ b/llvm/test/CodeGen/ARM/fast-tail-call.ll
@@ -1,4 +1,5 @@
; RUN: llc -mtriple=thumbv7-linux-gnueabi -O0 < %s | FileCheck %s
+; RUN: llc -mtriple=thumbv8m.base-arm-none-eabi -filetype=obj < %s
; Primarily a non-crash test: Thumbv7 Linux does not have FastISel support,
; which led (via a convoluted route) to DAG nodes after a TC_RETURN that
diff --git a/llvm/test/MC/ARM/thumb1-relax-8m-baseline.s b/llvm/test/MC/ARM/thumb1-relax-8m-baseline.s
new file mode 100644
index 0000000..e9c88bb
--- /dev/null
+++ b/llvm/test/MC/ARM/thumb1-relax-8m-baseline.s
@@ -0,0 +1,10 @@
+@ RUN: not llvm-mc -triple thumbv6m-none-eabi -filetype=obj -o /dev/null %s 2>&1 | FileCheck %s --check-prefix=CHECK-V6M
+@ RUN: llvm-mc -triple thumbv8m.base-none-eabi -filetype=obj -o /dev/null %s 2>&1 | FileCheck %s --check-prefix=CHECK-V8MBASE --allow-empty
+
+@ CHECK-V8MBASE-NOT: out of range pc-relative fixup value
+@ CHECK-V6M: out of range pc-relative fixup value
+ b Lfar2
+
+ .space 2050
+Lfar2:
+ .word 42
diff --git a/llvm/test/MC/ARM/thumbv8m.s b/llvm/test/MC/ARM/thumbv8m.s
index f61d00f..41f6dc9 100644
--- a/llvm/test/MC/ARM/thumbv8m.s
+++ b/llvm/test/MC/ARM/thumbv8m.s
@@ -23,6 +23,18 @@ isb sy
// 'Code optimization'
+// CHECK: cbz r3, .Ltmp0 @ encoding: [0x03'A',0xb1'A']
+// CHECK-NEXT: @ fixup A - offset: 0, value: .Ltmp0, kind: fixup_arm_thumb_cb
+cbz r3, 1f
+
+// CHECK: cbnz r3, .Ltmp0 @ encoding: [0x03'A',0xb9'A']
+// CHECK-NEXT: @ fixup A - offset: 0, value: .Ltmp0, kind: fixup_arm_thumb_cb
+cbnz r3, 1f
+
+// CHECK: b.w .Ltmp0 @ encoding: [A,0xf0'A',A,0x90'A']
+// CHECK-NEXT: @ fixup A - offset: 0, value: .Ltmp0, kind: fixup_t2_uncondbranch
+b.w 1f
+
// CHECK: sdiv r1, r2, r3 @ encoding: [0x92,0xfb,0xf3,0xf1]
sdiv r1, r2, r3