aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
diff options
context:
space:
mode:
authorOliver Stannard <oliver.stannard@arm.com>2015-12-16 11:35:44 +0000
committerOliver Stannard <oliver.stannard@arm.com>2015-12-16 11:35:44 +0000
commit48568cbe18020b64d2cce24b0804fd36295738d5 (patch)
treee46534ec0816198d48ed1d3932ddf625c86b8f90 /llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
parente75e6e2a2393f9236fc5d492233dc1d8633c2691 (diff)
downloadllvm-48568cbe18020b64d2cce24b0804fd36295738d5.zip
llvm-48568cbe18020b64d2cce24b0804fd36295738d5.tar.gz
llvm-48568cbe18020b64d2cce24b0804fd36295738d5.tar.bz2
[ARM] Add ARMv8.2-A FP16 scalar instructions
ARMv8.2-A adds 16-bit floating point versions of all existing VFP floating-point instructions. This is an optional extension, so all of these instructions require the FeatureFullFP16 subtarget feature. The assembly for these instructions uses S registers (AArch32 does not have H registers), but the instructions have ".f16" type specifiers rather than ".f32" or ".f64". The top 16 bits of each source register are ignored, and the top 16 bits of the destination register are set to zero. These instructions are mostly the same as the 32- and 64-bit versions, but they use coprocessor 9 rather than 10 and 11. Two new instructions, VMOVX and VINS, have been added to allow packing and extracting two 16-bit floats stored in the top and bottom halves of an S register. New fixup kinds have been added for the PC-relative load and store instructions, but no ELF relocations have been added as they have a range of 512 bytes. Differential Revision: http://reviews.llvm.org/D15038 llvm-svn: 255762
Diffstat (limited to 'llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp')
-rw-r--r--llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp44
1 files changed, 41 insertions, 3 deletions
diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 73f3308..72c98f0 100644
--- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -1183,6 +1183,20 @@ public:
return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
Val == INT32_MIN;
}
+ bool isAddrMode5FP16() const {
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup. If it is a constant, it's something else
+ // and we reject it.
+ if (isImm() && !isa<MCConstantExpr>(getImm()))
+ return true;
+ if (!isMem() || Memory.Alignment != 0) return false;
+ // Check for register offset.
+ if (Memory.OffsetRegNum) return false;
+ // Immediate offset in range [-510, 510] and a multiple of 2.
+ if (!Memory.OffsetImm) return true;
+ int64_t Val = Memory.OffsetImm->getValue();
+ return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) || Val == INT32_MIN;
+ }
bool isMemTBB() const {
if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
@@ -2145,6 +2159,28 @@ public:
Inst.addOperand(MCOperand::createImm(Val));
}
+ void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup. If it is a constant, it's something else
+ // and we reject it.
+ if (isImm()) {
+ Inst.addOperand(MCOperand::createExpr(getImm()));
+ Inst.addOperand(MCOperand::createImm(0));
+ return;
+ }
+
+ // The lower bit is always zero and as such is not encoded.
+ int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0;
+ ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
+ // Special case for #-0
+ if (Val == INT32_MIN) Val = 0;
+ if (Val < 0) Val = -Val;
+ Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
+ Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
+ Inst.addOperand(MCOperand::createImm(Val));
+ }
+
void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
// If we have an immediate that's not a constant, treat it as a label
@@ -4973,7 +5009,8 @@ ARMAsmParser::parseFPImm(OperandVector &Operands) {
// vmov.i{8|16|32|64} <dreg|qreg>, #imm
ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
bool isVmovf = TyOp.isToken() &&
- (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64");
+ (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
+ TyOp.getToken() == ".f16");
ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
Mnemonic.getToken() == "fconsts");
@@ -5265,7 +5302,7 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" ||
Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
- Mnemonic.startswith("vsel"))
+ Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx")
return Mnemonic;
// First, split out any predication code. Ignore mnemonics we know aren't
@@ -5369,7 +5406,8 @@ void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
- (FullInst.startswith("vmull") && FullInst.endswith(".p64"))) {
+ (FullInst.startswith("vmull") && FullInst.endswith(".p64")) ||
+ Mnemonic == "vmovx" || Mnemonic == "vins") {
// These mnemonics are never predicable
CanAcceptPredicationCode = false;
} else if (!isThumb()) {