aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp46
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.h2
-rw-r--r--llvm/test/CodeGen/ARM/cmse-harden-call-returned-values.ll552
-rw-r--r--llvm/test/CodeGen/ARM/cmse-harden-entry-arguments.ll368
4 files changed, 953 insertions, 15 deletions
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index bfe137b..5490c3c 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -156,6 +156,17 @@ static const MCPhysReg GPRArgRegs[] = {
ARM::R0, ARM::R1, ARM::R2, ARM::R3
};
+static SDValue handleCMSEValue(const SDValue &Value, const ISD::InputArg &Arg,
+ SelectionDAG &DAG, const SDLoc &DL) {
+ assert(Arg.ArgVT.isScalarInteger());
+ assert(Arg.ArgVT.bitsLT(MVT::i32));
+ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, Arg.ArgVT, Value);
+ SDValue Ext =
+ DAG.getNode(Arg.Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL,
+ MVT::i32, Trunc);
+ return Ext;
+}
+
void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT) {
if (VT != PromotedLdStVT) {
setOperationAction(ISD::LOAD, VT, Promote);
@@ -2193,7 +2204,7 @@ SDValue ARMTargetLowering::LowerCallResult(
SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
- SDValue ThisVal) const {
+ SDValue ThisVal, bool isCmseNSCall) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
@@ -2271,6 +2282,15 @@ SDValue ARMTargetLowering::LowerCallResult(
(VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
Val = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Val);
+ // On CMSE Non-secure Calls, call results (returned values) whose bitwidth
+ // is less than 32 bits must be sign- or zero-extended after the call for
+ // security reasons. Although the ABI mandates an extension done by the
+ // callee, the latter cannot be trusted to follow the rules of the ABI.
+ const ISD::InputArg &Arg = Ins[VA.getValNo()];
+ if (isCmseNSCall && Arg.ArgVT.isScalarInteger() &&
+ VA.getLocVT().isScalarInteger() && Arg.ArgVT.bitsLT(MVT::i32))
+ Val = handleCMSEValue(Val, Arg, DAG, dl);
+
InVals.push_back(Val);
}
@@ -2882,7 +2902,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// return.
return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG,
InVals, isThisReturn,
- isThisReturn ? OutVals[0] : SDValue());
+ isThisReturn ? OutVals[0] : SDValue(), isCmseNSCall);
}
/// HandleByVal - Every parameter *after* a byval parameter is passed
@@ -4485,8 +4505,6 @@ SDValue ARMTargetLowering::LowerFormalArguments(
*DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg));
- SmallVector<SDValue, 16> ArgValues;
- SDValue ArgValue;
Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin();
unsigned CurArgIdx = 0;
@@ -4541,6 +4559,7 @@ SDValue ARMTargetLowering::LowerFormalArguments(
// Arguments stored in registers.
if (VA.isRegLoc()) {
EVT RegVT = VA.getLocVT();
+ SDValue ArgValue;
if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) {
// f64 and vector types are split up into multiple registers or
@@ -4604,16 +4623,6 @@ SDValue ARMTargetLowering::LowerFormalArguments(
case CCValAssign::BCvt:
ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
break;
- case CCValAssign::SExt:
- ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
- DAG.getValueType(VA.getValVT()));
- ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
- break;
- case CCValAssign::ZExt:
- ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
- DAG.getValueType(VA.getValVT()));
- ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
- break;
}
// f16 arguments have their size extended to 4 bytes and passed as if they
@@ -4623,6 +4632,15 @@ SDValue ARMTargetLowering::LowerFormalArguments(
(VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
ArgValue = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), ArgValue);
+ // On CMSE Entry Functions, formal integer arguments whose bitwidth is
+ // less than 32 bits must be sign- or zero-extended in the callee for
+ // security reasons. Although the ABI mandates an extension done by the
+ // caller, the latter cannot be trusted to follow the rules of the ABI.
+ const ISD::InputArg &Arg = Ins[VA.getValNo()];
+ if (AFI->isCmseNSEntryFunction() && Arg.ArgVT.isScalarInteger() &&
+ RegVT.isScalarInteger() && Arg.ArgVT.bitsLT(MVT::i32))
+ ArgValue = handleCMSEValue(ArgValue, Arg, DAG, dl);
+
InVals.push_back(ArgValue);
} else { // VA.isRegLoc()
// Only arguments passed on the stack should make it here.
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 62a52bd..a255e9b 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -894,7 +894,7 @@ class VectorType;
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
- SDValue ThisVal) const;
+ SDValue ThisVal, bool isCmseNSCall) const;
bool supportSplitCSR(MachineFunction *MF) const override {
return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
diff --git a/llvm/test/CodeGen/ARM/cmse-harden-call-returned-values.ll b/llvm/test/CodeGen/ARM/cmse-harden-call-returned-values.ll
new file mode 100644
index 0000000..58eef44
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/cmse-harden-call-returned-values.ll
@@ -0,0 +1,552 @@
+; RUN: llc %s -mtriple=thumbv8m.main -o - | FileCheck %s --check-prefixes V8M-COMMON,V8M-LE
+; RUN: llc %s -mtriple=thumbebv8m.main -o - | FileCheck %s --check-prefixes V8M-COMMON,V8M-BE
+; RUN: llc %s -mtriple=thumbv8.1m.main -o - | FileCheck %s --check-prefixes V81M-COMMON,V81M-LE
+; RUN: llc %s -mtriple=thumbebv8.1m.main -o - | FileCheck %s --check-prefixes V81M-COMMON,V81M-BE
+
+@get_idx = hidden local_unnamed_addr global ptr null, align 4
+@arr = hidden local_unnamed_addr global [256 x i32] zeroinitializer, align 4
+
+define i32 @access_i16() {
+; V8M-COMMON-LABEL: access_i16:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: push {r7, lr}
+; V8M-COMMON-NEXT: movw r0, :lower16:get_idx
+; V8M-COMMON-NEXT: movt r0, :upper16:get_idx
+; V8M-COMMON-NEXT: ldr r0, [r0]
+; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: bic r0, r0, #1
+; V8M-COMMON-NEXT: sub sp, #136
+; V8M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V8M-COMMON-NEXT: mov r1, r0
+; V8M-COMMON-NEXT: mov r2, r0
+; V8M-COMMON-NEXT: mov r3, r0
+; V8M-COMMON-NEXT: mov r4, r0
+; V8M-COMMON-NEXT: mov r5, r0
+; V8M-COMMON-NEXT: mov r6, r0
+; V8M-COMMON-NEXT: mov r7, r0
+; V8M-COMMON-NEXT: mov r8, r0
+; V8M-COMMON-NEXT: mov r9, r0
+; V8M-COMMON-NEXT: mov r10, r0
+; V8M-COMMON-NEXT: mov r11, r0
+; V8M-COMMON-NEXT: mov r12, r0
+; V8M-COMMON-NEXT: msr apsr_nzcvq, r0
+; V8M-COMMON-NEXT: blxns r0
+; V8M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V8M-COMMON-NEXT: add sp, #136
+; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: movw r1, :lower16:arr
+; V8M-COMMON-NEXT: sxth r0, r0
+; V8M-COMMON-NEXT: movt r1, :upper16:arr
+; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V8M-COMMON-NEXT: pop {r7, pc}
+;
+; V81M-COMMON-LABEL: access_i16:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: push {r7, lr}
+; V81M-COMMON-NEXT: movw r0, :lower16:get_idx
+; V81M-COMMON-NEXT: movt r0, :upper16:get_idx
+; V81M-COMMON-NEXT: ldr r0, [r0]
+; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: bic r0, r0, #1
+; V81M-COMMON-NEXT: sub sp, #136
+; V81M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-COMMON-NEXT: blxns r0
+; V81M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V81M-COMMON-NEXT: add sp, #136
+; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: movw r1, :lower16:arr
+; V81M-COMMON-NEXT: sxth r0, r0
+; V81M-COMMON-NEXT: movt r1, :upper16:arr
+; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V81M-COMMON-NEXT: pop {r7, pc}
+entry:
+ %0 = load ptr, ptr @get_idx, align 4
+ %call = tail call signext i16 %0() "cmse_nonsecure_call"
+ %idxprom = sext i16 %call to i32
+ %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+ %1 = load i32, ptr %arrayidx, align 4
+ ret i32 %1
+}
+
+define i32 @access_u16() {
+; V8M-COMMON-LABEL: access_u16:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: push {r7, lr}
+; V8M-COMMON-NEXT: movw r0, :lower16:get_idx
+; V8M-COMMON-NEXT: movt r0, :upper16:get_idx
+; V8M-COMMON-NEXT: ldr r0, [r0]
+; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: bic r0, r0, #1
+; V8M-COMMON-NEXT: sub sp, #136
+; V8M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V8M-COMMON-NEXT: mov r1, r0
+; V8M-COMMON-NEXT: mov r2, r0
+; V8M-COMMON-NEXT: mov r3, r0
+; V8M-COMMON-NEXT: mov r4, r0
+; V8M-COMMON-NEXT: mov r5, r0
+; V8M-COMMON-NEXT: mov r6, r0
+; V8M-COMMON-NEXT: mov r7, r0
+; V8M-COMMON-NEXT: mov r8, r0
+; V8M-COMMON-NEXT: mov r9, r0
+; V8M-COMMON-NEXT: mov r10, r0
+; V8M-COMMON-NEXT: mov r11, r0
+; V8M-COMMON-NEXT: mov r12, r0
+; V8M-COMMON-NEXT: msr apsr_nzcvq, r0
+; V8M-COMMON-NEXT: blxns r0
+; V8M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V8M-COMMON-NEXT: add sp, #136
+; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: movw r1, :lower16:arr
+; V8M-COMMON-NEXT: uxth r0, r0
+; V8M-COMMON-NEXT: movt r1, :upper16:arr
+; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V8M-COMMON-NEXT: pop {r7, pc}
+;
+; V81M-COMMON-LABEL: access_u16:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: push {r7, lr}
+; V81M-COMMON-NEXT: movw r0, :lower16:get_idx
+; V81M-COMMON-NEXT: movt r0, :upper16:get_idx
+; V81M-COMMON-NEXT: ldr r0, [r0]
+; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: bic r0, r0, #1
+; V81M-COMMON-NEXT: sub sp, #136
+; V81M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-COMMON-NEXT: blxns r0
+; V81M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V81M-COMMON-NEXT: add sp, #136
+; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: movw r1, :lower16:arr
+; V81M-COMMON-NEXT: uxth r0, r0
+; V81M-COMMON-NEXT: movt r1, :upper16:arr
+; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V81M-COMMON-NEXT: pop {r7, pc}
+entry:
+ %0 = load ptr, ptr @get_idx, align 4
+ %call = tail call zeroext i16 %0() "cmse_nonsecure_call"
+ %idxprom = zext i16 %call to i32
+ %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+ %1 = load i32, ptr %arrayidx, align 4
+ ret i32 %1
+}
+
+define i32 @access_i8() {
+; V8M-COMMON-LABEL: access_i8:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: push {r7, lr}
+; V8M-COMMON-NEXT: movw r0, :lower16:get_idx
+; V8M-COMMON-NEXT: movt r0, :upper16:get_idx
+; V8M-COMMON-NEXT: ldr r0, [r0]
+; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: bic r0, r0, #1
+; V8M-COMMON-NEXT: sub sp, #136
+; V8M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V8M-COMMON-NEXT: mov r1, r0
+; V8M-COMMON-NEXT: mov r2, r0
+; V8M-COMMON-NEXT: mov r3, r0
+; V8M-COMMON-NEXT: mov r4, r0
+; V8M-COMMON-NEXT: mov r5, r0
+; V8M-COMMON-NEXT: mov r6, r0
+; V8M-COMMON-NEXT: mov r7, r0
+; V8M-COMMON-NEXT: mov r8, r0
+; V8M-COMMON-NEXT: mov r9, r0
+; V8M-COMMON-NEXT: mov r10, r0
+; V8M-COMMON-NEXT: mov r11, r0
+; V8M-COMMON-NEXT: mov r12, r0
+; V8M-COMMON-NEXT: msr apsr_nzcvq, r0
+; V8M-COMMON-NEXT: blxns r0
+; V8M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V8M-COMMON-NEXT: add sp, #136
+; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: movw r1, :lower16:arr
+; V8M-COMMON-NEXT: sxtb r0, r0
+; V8M-COMMON-NEXT: movt r1, :upper16:arr
+; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V8M-COMMON-NEXT: pop {r7, pc}
+;
+; V81M-COMMON-LABEL: access_i8:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: push {r7, lr}
+; V81M-COMMON-NEXT: movw r0, :lower16:get_idx
+; V81M-COMMON-NEXT: movt r0, :upper16:get_idx
+; V81M-COMMON-NEXT: ldr r0, [r0]
+; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: bic r0, r0, #1
+; V81M-COMMON-NEXT: sub sp, #136
+; V81M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-COMMON-NEXT: blxns r0
+; V81M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V81M-COMMON-NEXT: add sp, #136
+; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: movw r1, :lower16:arr
+; V81M-COMMON-NEXT: sxtb r0, r0
+; V81M-COMMON-NEXT: movt r1, :upper16:arr
+; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V81M-COMMON-NEXT: pop {r7, pc}
+entry:
+ %0 = load ptr, ptr @get_idx, align 4
+ %call = tail call signext i8 %0() "cmse_nonsecure_call"
+ %idxprom = sext i8 %call to i32
+ %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+ %1 = load i32, ptr %arrayidx, align 4
+ ret i32 %1
+}
+
+define i32 @access_u8() {
+; V8M-COMMON-LABEL: access_u8:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: push {r7, lr}
+; V8M-COMMON-NEXT: movw r0, :lower16:get_idx
+; V8M-COMMON-NEXT: movt r0, :upper16:get_idx
+; V8M-COMMON-NEXT: ldr r0, [r0]
+; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: bic r0, r0, #1
+; V8M-COMMON-NEXT: sub sp, #136
+; V8M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V8M-COMMON-NEXT: mov r1, r0
+; V8M-COMMON-NEXT: mov r2, r0
+; V8M-COMMON-NEXT: mov r3, r0
+; V8M-COMMON-NEXT: mov r4, r0
+; V8M-COMMON-NEXT: mov r5, r0
+; V8M-COMMON-NEXT: mov r6, r0
+; V8M-COMMON-NEXT: mov r7, r0
+; V8M-COMMON-NEXT: mov r8, r0
+; V8M-COMMON-NEXT: mov r9, r0
+; V8M-COMMON-NEXT: mov r10, r0
+; V8M-COMMON-NEXT: mov r11, r0
+; V8M-COMMON-NEXT: mov r12, r0
+; V8M-COMMON-NEXT: msr apsr_nzcvq, r0
+; V8M-COMMON-NEXT: blxns r0
+; V8M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V8M-COMMON-NEXT: add sp, #136
+; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: movw r1, :lower16:arr
+; V8M-COMMON-NEXT: uxtb r0, r0
+; V8M-COMMON-NEXT: movt r1, :upper16:arr
+; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V8M-COMMON-NEXT: pop {r7, pc}
+;
+; V81M-COMMON-LABEL: access_u8:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: push {r7, lr}
+; V81M-COMMON-NEXT: movw r0, :lower16:get_idx
+; V81M-COMMON-NEXT: movt r0, :upper16:get_idx
+; V81M-COMMON-NEXT: ldr r0, [r0]
+; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: bic r0, r0, #1
+; V81M-COMMON-NEXT: sub sp, #136
+; V81M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-COMMON-NEXT: blxns r0
+; V81M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V81M-COMMON-NEXT: add sp, #136
+; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: movw r1, :lower16:arr
+; V81M-COMMON-NEXT: uxtb r0, r0
+; V81M-COMMON-NEXT: movt r1, :upper16:arr
+; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V81M-COMMON-NEXT: pop {r7, pc}
+entry:
+ %0 = load ptr, ptr @get_idx, align 4
+ %call = tail call zeroext i8 %0() "cmse_nonsecure_call"
+ %idxprom = zext i8 %call to i32
+ %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+ %1 = load i32, ptr %arrayidx, align 4
+ ret i32 %1
+}
+
+define i32 @access_i1() {
+; V8M-COMMON-LABEL: access_i1:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: push {r7, lr}
+; V8M-COMMON-NEXT: movw r0, :lower16:get_idx
+; V8M-COMMON-NEXT: movt r0, :upper16:get_idx
+; V8M-COMMON-NEXT: ldr r0, [r0]
+; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: bic r0, r0, #1
+; V8M-COMMON-NEXT: sub sp, #136
+; V8M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V8M-COMMON-NEXT: mov r1, r0
+; V8M-COMMON-NEXT: mov r2, r0
+; V8M-COMMON-NEXT: mov r3, r0
+; V8M-COMMON-NEXT: mov r4, r0
+; V8M-COMMON-NEXT: mov r5, r0
+; V8M-COMMON-NEXT: mov r6, r0
+; V8M-COMMON-NEXT: mov r7, r0
+; V8M-COMMON-NEXT: mov r8, r0
+; V8M-COMMON-NEXT: mov r9, r0
+; V8M-COMMON-NEXT: mov r10, r0
+; V8M-COMMON-NEXT: mov r11, r0
+; V8M-COMMON-NEXT: mov r12, r0
+; V8M-COMMON-NEXT: msr apsr_nzcvq, r0
+; V8M-COMMON-NEXT: blxns r0
+; V8M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V8M-COMMON-NEXT: add sp, #136
+; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: movw r1, :lower16:arr
+; V8M-COMMON-NEXT: and r0, r0, #1
+; V8M-COMMON-NEXT: movt r1, :upper16:arr
+; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V8M-COMMON-NEXT: pop {r7, pc}
+;
+; V81M-COMMON-LABEL: access_i1:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: push {r7, lr}
+; V81M-COMMON-NEXT: movw r0, :lower16:get_idx
+; V81M-COMMON-NEXT: movt r0, :upper16:get_idx
+; V81M-COMMON-NEXT: ldr r0, [r0]
+; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: bic r0, r0, #1
+; V81M-COMMON-NEXT: sub sp, #136
+; V81M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-COMMON-NEXT: blxns r0
+; V81M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V81M-COMMON-NEXT: add sp, #136
+; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: movw r1, :lower16:arr
+; V81M-COMMON-NEXT: and r0, r0, #1
+; V81M-COMMON-NEXT: movt r1, :upper16:arr
+; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V81M-COMMON-NEXT: pop {r7, pc}
+entry:
+ %0 = load ptr, ptr @get_idx, align 4
+ %call = tail call zeroext i1 %0() "cmse_nonsecure_call"
+ %idxprom = zext i1 %call to i32
+ %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+ %1 = load i32, ptr %arrayidx, align 4
+ ret i32 %1
+}
+
+define i32 @access_i5() {
+; V8M-COMMON-LABEL: access_i5:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: push {r7, lr}
+; V8M-COMMON-NEXT: movw r0, :lower16:get_idx
+; V8M-COMMON-NEXT: movt r0, :upper16:get_idx
+; V8M-COMMON-NEXT: ldr r0, [r0]
+; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: bic r0, r0, #1
+; V8M-COMMON-NEXT: sub sp, #136
+; V8M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V8M-COMMON-NEXT: mov r1, r0
+; V8M-COMMON-NEXT: mov r2, r0
+; V8M-COMMON-NEXT: mov r3, r0
+; V8M-COMMON-NEXT: mov r4, r0
+; V8M-COMMON-NEXT: mov r5, r0
+; V8M-COMMON-NEXT: mov r6, r0
+; V8M-COMMON-NEXT: mov r7, r0
+; V8M-COMMON-NEXT: mov r8, r0
+; V8M-COMMON-NEXT: mov r9, r0
+; V8M-COMMON-NEXT: mov r10, r0
+; V8M-COMMON-NEXT: mov r11, r0
+; V8M-COMMON-NEXT: mov r12, r0
+; V8M-COMMON-NEXT: msr apsr_nzcvq, r0
+; V8M-COMMON-NEXT: blxns r0
+; V8M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V8M-COMMON-NEXT: add sp, #136
+; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: movw r1, :lower16:arr
+; V8M-COMMON-NEXT: sbfx r0, r0, #0, #5
+; V8M-COMMON-NEXT: movt r1, :upper16:arr
+; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V8M-COMMON-NEXT: pop {r7, pc}
+;
+; V81M-COMMON-LABEL: access_i5:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: push {r7, lr}
+; V81M-COMMON-NEXT: movw r0, :lower16:get_idx
+; V81M-COMMON-NEXT: movt r0, :upper16:get_idx
+; V81M-COMMON-NEXT: ldr r0, [r0]
+; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: bic r0, r0, #1
+; V81M-COMMON-NEXT: sub sp, #136
+; V81M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-COMMON-NEXT: blxns r0
+; V81M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V81M-COMMON-NEXT: add sp, #136
+; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: movw r1, :lower16:arr
+; V81M-COMMON-NEXT: sbfx r0, r0, #0, #5
+; V81M-COMMON-NEXT: movt r1, :upper16:arr
+; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V81M-COMMON-NEXT: pop {r7, pc}
+entry:
+ %0 = load ptr, ptr @get_idx, align 4
+ %call = tail call signext i5 %0() "cmse_nonsecure_call"
+ %idxprom = sext i5 %call to i32
+ %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+ %1 = load i32, ptr %arrayidx, align 4
+ ret i32 %1
+}
+
+define i32 @access_u5() {
+; V8M-COMMON-LABEL: access_u5:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: push {r7, lr}
+; V8M-COMMON-NEXT: movw r0, :lower16:get_idx
+; V8M-COMMON-NEXT: movt r0, :upper16:get_idx
+; V8M-COMMON-NEXT: ldr r0, [r0]
+; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: bic r0, r0, #1
+; V8M-COMMON-NEXT: sub sp, #136
+; V8M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V8M-COMMON-NEXT: mov r1, r0
+; V8M-COMMON-NEXT: mov r2, r0
+; V8M-COMMON-NEXT: mov r3, r0
+; V8M-COMMON-NEXT: mov r4, r0
+; V8M-COMMON-NEXT: mov r5, r0
+; V8M-COMMON-NEXT: mov r6, r0
+; V8M-COMMON-NEXT: mov r7, r0
+; V8M-COMMON-NEXT: mov r8, r0
+; V8M-COMMON-NEXT: mov r9, r0
+; V8M-COMMON-NEXT: mov r10, r0
+; V8M-COMMON-NEXT: mov r11, r0
+; V8M-COMMON-NEXT: mov r12, r0
+; V8M-COMMON-NEXT: msr apsr_nzcvq, r0
+; V8M-COMMON-NEXT: blxns r0
+; V8M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V8M-COMMON-NEXT: add sp, #136
+; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: movw r1, :lower16:arr
+; V8M-COMMON-NEXT: and r0, r0, #31
+; V8M-COMMON-NEXT: movt r1, :upper16:arr
+; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V8M-COMMON-NEXT: pop {r7, pc}
+;
+; V81M-COMMON-LABEL: access_u5:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: push {r7, lr}
+; V81M-COMMON-NEXT: movw r0, :lower16:get_idx
+; V81M-COMMON-NEXT: movt r0, :upper16:get_idx
+; V81M-COMMON-NEXT: ldr r0, [r0]
+; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: bic r0, r0, #1
+; V81M-COMMON-NEXT: sub sp, #136
+; V81M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-COMMON-NEXT: blxns r0
+; V81M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V81M-COMMON-NEXT: add sp, #136
+; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: movw r1, :lower16:arr
+; V81M-COMMON-NEXT: and r0, r0, #31
+; V81M-COMMON-NEXT: movt r1, :upper16:arr
+; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V81M-COMMON-NEXT: pop {r7, pc}
+entry:
+ %0 = load ptr, ptr @get_idx, align 4
+ %call = tail call zeroext i5 %0() "cmse_nonsecure_call"
+ %idxprom = zext i5 %call to i32
+ %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+ %1 = load i32, ptr %arrayidx, align 4
+ ret i32 %1
+}
+
+define i32 @access_i33(ptr %f) {
+; V8M-COMMON-LABEL: access_i33:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: push {r7, lr}
+; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: bic r0, r0, #1
+; V8M-COMMON-NEXT: sub sp, #136
+; V8M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V8M-COMMON-NEXT: mov r1, r0
+; V8M-COMMON-NEXT: mov r2, r0
+; V8M-COMMON-NEXT: mov r3, r0
+; V8M-COMMON-NEXT: mov r4, r0
+; V8M-COMMON-NEXT: mov r5, r0
+; V8M-COMMON-NEXT: mov r6, r0
+; V8M-COMMON-NEXT: mov r7, r0
+; V8M-COMMON-NEXT: mov r8, r0
+; V8M-COMMON-NEXT: mov r9, r0
+; V8M-COMMON-NEXT: mov r10, r0
+; V8M-COMMON-NEXT: mov r11, r0
+; V8M-COMMON-NEXT: mov r12, r0
+; V8M-COMMON-NEXT: msr apsr_nzcvq, r0
+; V8M-COMMON-NEXT: blxns r0
+; V8M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V8M-COMMON-NEXT: add sp, #136
+; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-LE-NEXT: and r0, r1, #1
+; V8M-BE-NEXT: and r0, r0, #1
+; V8M-COMMON-NEXT: rsb.w r0, r0, #0
+; V8M-COMMON-NEXT: pop {r7, pc}
+;
+; V81M-COMMON-LABEL: access_i33:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: push {r7, lr}
+; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: bic r0, r0, #1
+; V81M-COMMON-NEXT: sub sp, #136
+; V81M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-COMMON-NEXT: blxns r0
+; V81M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V81M-COMMON-NEXT: add sp, #136
+; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-LE-NEXT: and r0, r1, #1
+; V81M-BE-NEXT: and r0, r0, #1
+; V81M-COMMON-NEXT: rsb.w r0, r0, #0
+; V81M-COMMON-NEXT: pop {r7, pc}
+entry:
+ %call = tail call i33 %f() "cmse_nonsecure_call"
+ %shr = ashr i33 %call, 32
+ %conv = trunc nsw i33 %shr to i32
+ ret i32 %conv
+}
+
+define i32 @access_u33(ptr %f) {
+; V8M-COMMON-LABEL: access_u33:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: push {r7, lr}
+; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-COMMON-NEXT: bic r0, r0, #1
+; V8M-COMMON-NEXT: sub sp, #136
+; V8M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V8M-COMMON-NEXT: mov r1, r0
+; V8M-COMMON-NEXT: mov r2, r0
+; V8M-COMMON-NEXT: mov r3, r0
+; V8M-COMMON-NEXT: mov r4, r0
+; V8M-COMMON-NEXT: mov r5, r0
+; V8M-COMMON-NEXT: mov r6, r0
+; V8M-COMMON-NEXT: mov r7, r0
+; V8M-COMMON-NEXT: mov r8, r0
+; V8M-COMMON-NEXT: mov r9, r0
+; V8M-COMMON-NEXT: mov r10, r0
+; V8M-COMMON-NEXT: mov r11, r0
+; V8M-COMMON-NEXT: mov r12, r0
+; V8M-COMMON-NEXT: msr apsr_nzcvq, r0
+; V8M-COMMON-NEXT: blxns r0
+; V8M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V8M-COMMON-NEXT: add sp, #136
+; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-LE-NEXT: and r0, r1, #1
+; V8M-BE-NEXT: and r0, r0, #1
+; V8M-COMMON-NEXT: pop {r7, pc}
+;
+; V81M-COMMON-LABEL: access_u33:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: push {r7, lr}
+; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-COMMON-NEXT: bic r0, r0, #1
+; V81M-COMMON-NEXT: sub sp, #136
+; V81M-COMMON-NEXT: vlstm sp, {d0 - d15}
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-COMMON-NEXT: blxns r0
+; V81M-COMMON-NEXT: vlldm sp, {d0 - d15}
+; V81M-COMMON-NEXT: add sp, #136
+; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-LE-NEXT: and r0, r1, #1
+; V81M-BE-NEXT: and r0, r0, #1
+; V81M-COMMON-NEXT: pop {r7, pc}
+entry:
+ %call = tail call i33 %f() "cmse_nonsecure_call"
+ %shr = lshr i33 %call, 32
+ %conv = trunc nuw nsw i33 %shr to i32
+ ret i32 %conv
+}
diff --git a/llvm/test/CodeGen/ARM/cmse-harden-entry-arguments.ll b/llvm/test/CodeGen/ARM/cmse-harden-entry-arguments.ll
new file mode 100644
index 0000000..c66ab00
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/cmse-harden-entry-arguments.ll
@@ -0,0 +1,368 @@
+; RUN: llc %s -mtriple=thumbv8m.main -o - | FileCheck %s --check-prefixes V8M-COMMON,V8M-LE
+; RUN: llc %s -mtriple=thumbebv8m.main -o - | FileCheck %s --check-prefixes V8M-COMMON,V8M-BE
+; RUN: llc %s -mtriple=thumbv8.1m.main -o - | FileCheck %s --check-prefixes V81M-COMMON,V81M-LE
+; RUN: llc %s -mtriple=thumbebv8.1m.main -o - | FileCheck %s --check-prefixes V81M-COMMON,V81M-BE
+
+@arr = hidden local_unnamed_addr global [256 x i32] zeroinitializer, align 4
+
+define i32 @access_i16(i16 signext %idx) "cmse_nonsecure_entry" {
+; V8M-COMMON-LABEL: access_i16:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: movw r1, :lower16:arr
+; V8M-COMMON-NEXT: sxth r0, r0
+; V8M-COMMON-NEXT: movt r1, :upper16:arr
+; V8M-COMMON-NEXT: mov r2, lr
+; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V8M-COMMON-NEXT: mov r1, lr
+; V8M-COMMON-NEXT: mov r3, lr
+; V8M-COMMON-NEXT: msr apsr_nzcvq, lr
+; V8M-COMMON-NEXT: mov r12, lr
+; V8M-COMMON-NEXT: bxns lr
+;
+; V81M-COMMON-LABEL: access_i16:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]!
+; V81M-COMMON-NEXT: movw r1, :lower16:arr
+; V81M-COMMON-NEXT: sxth r0, r0
+; V81M-COMMON-NEXT: movt r1, :upper16:arr
+; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
+; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr}
+; V81M-COMMON-NEXT: bxns lr
+entry:
+ %idxprom = sext i16 %idx to i32
+ %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ ret i32 %0
+}
+
+define i32 @access_u16(i16 zeroext %idx) "cmse_nonsecure_entry" {
+; V8M-COMMON-LABEL: access_u16:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: movw r1, :lower16:arr
+; V8M-COMMON-NEXT: uxth r0, r0
+; V8M-COMMON-NEXT: movt r1, :upper16:arr
+; V8M-COMMON-NEXT: mov r2, lr
+; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V8M-COMMON-NEXT: mov r1, lr
+; V8M-COMMON-NEXT: mov r3, lr
+; V8M-COMMON-NEXT: msr apsr_nzcvq, lr
+; V8M-COMMON-NEXT: mov r12, lr
+; V8M-COMMON-NEXT: bxns lr
+;
+; V81M-COMMON-LABEL: access_u16:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]!
+; V81M-COMMON-NEXT: movw r1, :lower16:arr
+; V81M-COMMON-NEXT: uxth r0, r0
+; V81M-COMMON-NEXT: movt r1, :upper16:arr
+; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
+; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr}
+; V81M-COMMON-NEXT: bxns lr
+entry:
+ %idxprom = zext i16 %idx to i32
+ %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ ret i32 %0
+}
+
+define i32 @access_i8(i8 signext %idx) "cmse_nonsecure_entry" {
+; V8M-COMMON-LABEL: access_i8:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: movw r1, :lower16:arr
+; V8M-COMMON-NEXT: sxtb r0, r0
+; V8M-COMMON-NEXT: movt r1, :upper16:arr
+; V8M-COMMON-NEXT: mov r2, lr
+; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V8M-COMMON-NEXT: mov r1, lr
+; V8M-COMMON-NEXT: mov r3, lr
+; V8M-COMMON-NEXT: msr apsr_nzcvq, lr
+; V8M-COMMON-NEXT: mov r12, lr
+; V8M-COMMON-NEXT: bxns lr
+;
+; V81M-COMMON-LABEL: access_i8:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]!
+; V81M-COMMON-NEXT: movw r1, :lower16:arr
+; V81M-COMMON-NEXT: sxtb r0, r0
+; V81M-COMMON-NEXT: movt r1, :upper16:arr
+; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
+; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr}
+; V81M-COMMON-NEXT: bxns lr
+entry:
+ %idxprom = sext i8 %idx to i32
+ %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ ret i32 %0
+}
+
+define i32 @access_u8(i8 zeroext %idx) "cmse_nonsecure_entry" {
+; V8M-COMMON-LABEL: access_u8:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: movw r1, :lower16:arr
+; V8M-COMMON-NEXT: uxtb r0, r0
+; V8M-COMMON-NEXT: movt r1, :upper16:arr
+; V8M-COMMON-NEXT: mov r2, lr
+; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V8M-COMMON-NEXT: mov r1, lr
+; V8M-COMMON-NEXT: mov r3, lr
+; V8M-COMMON-NEXT: msr apsr_nzcvq, lr
+; V8M-COMMON-NEXT: mov r12, lr
+; V8M-COMMON-NEXT: bxns lr
+;
+; V81M-COMMON-LABEL: access_u8:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]!
+; V81M-COMMON-NEXT: movw r1, :lower16:arr
+; V81M-COMMON-NEXT: uxtb r0, r0
+; V81M-COMMON-NEXT: movt r1, :upper16:arr
+; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
+; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr}
+; V81M-COMMON-NEXT: bxns lr
+entry:
+ %idxprom = zext i8 %idx to i32
+ %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ ret i32 %0
+}
+
+define i32 @access_i1(i1 signext %idx) "cmse_nonsecure_entry" {
+; V8M-COMMON-LABEL: access_i1:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: and r0, r0, #1
+; V8M-COMMON-NEXT: movw r1, :lower16:arr
+; V8M-COMMON-NEXT: rsbs r0, r0, #0
+; V8M-COMMON-NEXT: movt r1, :upper16:arr
+; V8M-COMMON-NEXT: and r0, r0, #1
+; V8M-COMMON-NEXT: mov r2, lr
+; V8M-COMMON-NEXT: mov r3, lr
+; V8M-COMMON-NEXT: mov r12, lr
+; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V8M-COMMON-NEXT: mov r1, lr
+; V8M-COMMON-NEXT: msr apsr_nzcvq, lr
+; V8M-COMMON-NEXT: bxns lr
+;
+; V81M-COMMON-LABEL: access_i1:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]!
+; V81M-COMMON-NEXT: and r0, r0, #1
+; V81M-COMMON-NEXT: movw r1, :lower16:arr
+; V81M-COMMON-NEXT: rsbs r0, r0, #0
+; V81M-COMMON-NEXT: movt r1, :upper16:arr
+; V81M-COMMON-NEXT: and r0, r0, #1
+; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
+; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr}
+; V81M-COMMON-NEXT: bxns lr
+entry:
+ %idxprom = zext i1 %idx to i32
+ %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ ret i32 %0
+}
+
+define i32 @access_i5(i5 signext %idx) "cmse_nonsecure_entry" {
+; V8M-COMMON-LABEL: access_i5:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: movw r1, :lower16:arr
+; V8M-COMMON-NEXT: sbfx r0, r0, #0, #5
+; V8M-COMMON-NEXT: movt r1, :upper16:arr
+; V8M-COMMON-NEXT: mov r2, lr
+; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V8M-COMMON-NEXT: mov r1, lr
+; V8M-COMMON-NEXT: mov r3, lr
+; V8M-COMMON-NEXT: msr apsr_nzcvq, lr
+; V8M-COMMON-NEXT: mov r12, lr
+; V8M-COMMON-NEXT: bxns lr
+;
+; V81M-COMMON-LABEL: access_i5:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]!
+; V81M-COMMON-NEXT: movw r1, :lower16:arr
+; V81M-COMMON-NEXT: sbfx r0, r0, #0, #5
+; V81M-COMMON-NEXT: movt r1, :upper16:arr
+; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
+; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr}
+; V81M-COMMON-NEXT: bxns lr
+entry:
+ %idxprom = sext i5 %idx to i32
+ %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ ret i32 %0
+}
+
+define i32 @access_u5(i5 zeroext %idx) "cmse_nonsecure_entry" {
+; V8M-COMMON-LABEL: access_u5:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: movw r1, :lower16:arr
+; V8M-COMMON-NEXT: and r0, r0, #31
+; V8M-COMMON-NEXT: movt r1, :upper16:arr
+; V8M-COMMON-NEXT: mov r2, lr
+; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V8M-COMMON-NEXT: mov r1, lr
+; V8M-COMMON-NEXT: mov r3, lr
+; V8M-COMMON-NEXT: msr apsr_nzcvq, lr
+; V8M-COMMON-NEXT: mov r12, lr
+; V8M-COMMON-NEXT: bxns lr
+;
+; V81M-COMMON-LABEL: access_u5:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]!
+; V81M-COMMON-NEXT: movw r1, :lower16:arr
+; V81M-COMMON-NEXT: and r0, r0, #31
+; V81M-COMMON-NEXT: movt r1, :upper16:arr
+; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2]
+; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
+; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr}
+; V81M-COMMON-NEXT: bxns lr
+entry:
+ %idxprom = zext i5 %idx to i32
+ %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ ret i32 %0
+}
+
+define i32 @access_i33(i33 %arg) "cmse_nonsecure_entry" {
+; V8M-COMMON-LABEL: access_i33:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-LE-NEXT: and r0, r1, #1
+; V8M-BE-NEXT: and r0, r0, #1
+; V8M-COMMON-NEXT: mov r1, lr
+; V8M-COMMON-NEXT: rsbs r0, r0, #0
+; V8M-COMMON-NEXT: mov r2, lr
+; V8M-COMMON-NEXT: mov r3, lr
+; V8M-COMMON-NEXT: mov r12, lr
+; V8M-COMMON-NEXT: msr apsr_nzcvq, lr
+; V8M-COMMON-NEXT: bxns lr
+;
+; V81M-COMMON-LABEL: access_i33:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]!
+; V81M-LE-NEXT: and r0, r1, #1
+; V81M-BE-NEXT: and r0, r0, #1
+; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
+; V81M-COMMON-NEXT: rsbs r0, r0, #0
+; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr}
+; V81M-COMMON-NEXT: bxns lr
+entry:
+ %shr = ashr i33 %arg, 32
+ %conv = trunc nsw i33 %shr to i32
+ ret i32 %conv
+}
+
+define i32 @access_u33(i33 %arg) "cmse_nonsecure_entry" {
+; V8M-COMMON-LABEL: access_u33:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-LE-NEXT: and r0, r1, #1
+; V8M-BE-NEXT: and r0, r0, #1
+; V8M-COMMON-NEXT: mov r1, lr
+; V8M-COMMON-NEXT: mov r2, lr
+; V8M-COMMON-NEXT: mov r3, lr
+; V8M-COMMON-NEXT: mov r12, lr
+; V8M-COMMON-NEXT: msr apsr_nzcvq, lr
+; V8M-COMMON-NEXT: bxns lr
+;
+; V81M-COMMON-LABEL: access_u33:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]!
+; V81M-LE-NEXT: and r0, r1, #1
+; V81M-BE-NEXT: and r0, r0, #1
+; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
+; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr}
+; V81M-COMMON-NEXT: bxns lr
+entry:
+ %shr = lshr i33 %arg, 32
+ %conv = trunc nuw nsw i33 %shr to i32
+ ret i32 %conv
+}
+
+define i32 @access_i65(ptr byval(i65) %0) "cmse_nonsecure_entry" {
+; V8M-COMMON-LABEL: access_i65:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: sub sp, #16
+; V8M-COMMON-NEXT: stm.w sp, {r0, r1, r2, r3}
+; V8M-LE-NEXT: ldrb.w r0, [sp, #8]
+; V8M-LE-NEXT: and r0, r0, #1
+; V8M-LE-NEXT: rsbs r0, r0, #0
+; V8M-BE-NEXT: movs r1, #0
+; V8M-BE-NEXT: sub.w r0, r1, r0, lsr #24
+; V8M-COMMON-NEXT: add sp, #16
+; V8M-COMMON-NEXT: mov r1, lr
+; V8M-COMMON-NEXT: mov r2, lr
+; V8M-COMMON-NEXT: mov r3, lr
+; V8M-COMMON-NEXT: mov r12, lr
+; V8M-COMMON-NEXT: msr apsr_nzcvq, lr
+; V8M-COMMON-NEXT: bxns lr
+;
+; V81M-COMMON-LABEL: access_i65:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]!
+; V81M-COMMON-NEXT: sub sp, #16
+; V81M-COMMON-NEXT: add sp, #4
+; V81M-COMMON-NEXT: stm.w sp, {r0, r1, r2, r3}
+; V81M-LE-NEXT: ldrb.w r0, [sp, #8]
+; V81M-LE-NEXT: and r0, r0, #1
+; V81M-LE-NEXT: rsbs r0, r0, #0
+; V81M-BE-NEXT: movs r1, #0
+; V81M-BE-NEXT: sub.w r0, r1, r0, lsr #24
+; V81M-COMMON-NEXT: sub sp, #4
+; V81M-COMMON-NEXT: add sp, #16
+; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
+; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr}
+; V81M-COMMON-NEXT: bxns lr
+entry:
+ %arg = load i65, ptr %0, align 8
+ %shr = ashr i65 %arg, 64
+ %conv = trunc nsw i65 %shr to i32
+ ret i32 %conv
+}
+
+define i32 @access_u65(ptr byval(i65) %0) "cmse_nonsecure_entry" {
+; V8M-COMMON-LABEL: access_u65:
+; V8M-COMMON: @ %bb.0: @ %entry
+; V8M-COMMON-NEXT: sub sp, #16
+; V8M-COMMON-NEXT: stm.w sp, {r0, r1, r2, r3}
+; V8M-LE-NEXT: ldrb.w r0, [sp, #8]
+; V8M-BE-NEXT: lsrs r0, r0, #24
+; V8M-COMMON-NEXT: add sp, #16
+; V8M-COMMON-NEXT: mov r1, lr
+; V8M-COMMON-NEXT: mov r2, lr
+; V8M-COMMON-NEXT: mov r3, lr
+; V8M-COMMON-NEXT: mov r12, lr
+; V8M-COMMON-NEXT: msr apsr_nzcvq, lr
+; V8M-COMMON-NEXT: bxns lr
+;
+; V81M-COMMON-LABEL: access_u65:
+; V81M-COMMON: @ %bb.0: @ %entry
+; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]!
+; V81M-COMMON-NEXT: sub sp, #16
+; V81M-COMMON-NEXT: add sp, #4
+; V81M-COMMON-NEXT: stm.w sp, {r0, r1, r2, r3}
+; V81M-LE-NEXT: ldrb.w r0, [sp, #8]
+; V81M-BE-NEXT: lsrs r0, r0, #24
+; V81M-COMMON-NEXT: sub sp, #4
+; V81M-COMMON-NEXT: add sp, #16
+; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr}
+; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4
+; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr}
+; V81M-COMMON-NEXT: bxns lr
+entry:
+ %arg = load i65, ptr %0, align 8
+ %shr = lshr i65 %arg, 64
+ %conv = trunc nuw nsw i65 %shr to i32
+ ret i32 %conv
+}