aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/ARM
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/ARM')
-rw-r--r--llvm/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll6
-rw-r--r--llvm/test/CodeGen/ARM/bad-constraint.ll6
-rw-r--r--llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir4
-rw-r--r--llvm/test/CodeGen/ARM/codesize-ifcvt.mir12
-rw-r--r--llvm/test/CodeGen/ARM/constant-island-movwt.mir4
-rw-r--r--llvm/test/CodeGen/ARM/constant-islands-cfg.mir4
-rw-r--r--llvm/test/CodeGen/ARM/constant-islands-split-IT.mir4
-rw-r--r--llvm/test/CodeGen/ARM/execute-only-save-cpsr.mir16
-rw-r--r--llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir4
-rw-r--r--llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir4
-rw-r--r--llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll14
-rw-r--r--llvm/test/CodeGen/ARM/inlineasmbr-if-cvt.mir4
-rw-r--r--llvm/test/CodeGen/ARM/invalidated-save-point.ll4
-rw-r--r--llvm/test/CodeGen/ARM/jump-table-dbg-value.mir4
-rw-r--r--llvm/test/CodeGen/ARM/legalize-bitcast.ll4
-rw-r--r--llvm/test/CodeGen/ARM/llrint-conv.ll29
-rw-r--r--llvm/test/CodeGen/ARM/lrint-conv.ll18
-rw-r--r--llvm/test/CodeGen/ARM/scmp.ll48
-rw-r--r--llvm/test/CodeGen/ARM/stack_frame_offset.mir12
-rw-r--r--llvm/test/CodeGen/ARM/ucmp.ll36
-rw-r--r--llvm/test/CodeGen/ARM/vector-llrint.ll2394
-rw-r--r--llvm/test/CodeGen/ARM/vector-lrint.ll3265
-rw-r--r--llvm/test/CodeGen/ARM/vtrn.ll180
-rw-r--r--llvm/test/CodeGen/ARM/vuzp.ll56
-rw-r--r--llvm/test/CodeGen/ARM/vzip.ll19
25 files changed, 6001 insertions, 150 deletions
diff --git a/llvm/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll b/llvm/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll
index 344bb15..8f798fa 100644
--- a/llvm/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll
+++ b/llvm/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8 | FileCheck -check-prefix=CHECK-V8 %s
-; RUN: llc < %s -mtriple=thumbv7 -arm-restrict-it | FileCheck -check-prefix=CHECK-RESTRICT-IT %s
+; RUN: llc -keep-loops="false" < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc -keep-loops="false" < %s -mtriple=thumbv8 | FileCheck -check-prefix=CHECK-V8 %s
+; RUN: llc -keep-loops="false" < %s -mtriple=thumbv7 -arm-restrict-it | FileCheck -check-prefix=CHECK-RESTRICT-IT %s
define i32 @t1(i32 %a, i32 %b, ptr %retaddr) {
; CHECK-LABEL: t1:
diff --git a/llvm/test/CodeGen/ARM/bad-constraint.ll b/llvm/test/CodeGen/ARM/bad-constraint.ll
index 9b8fcd5..7d80f0c 100644
--- a/llvm/test/CodeGen/ARM/bad-constraint.ll
+++ b/llvm/test/CodeGen/ARM/bad-constraint.ll
@@ -1,6 +1,7 @@
; RUN: not llc -filetype=obj %s -o /dev/null 2>&1 | FileCheck %s
; CHECK: error: couldn't allocate input reg for constraint '{d2}'
; CHECK-NEXT: error: couldn't allocate input reg for constraint '{s2}'
+; CHECK-NEXT: error: couldn't allocate input reg for constraint '{d3}'
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "armv8a-unknown-linux-gnueabihf"
@@ -23,3 +24,8 @@ entry:
ret void
}
+define void @_Z1dv() local_unnamed_addr {
+entry:
+ tail call void asm sideeffect "", "{d3}"(<16 x i8> splat (i8 -1))
+ ret void
+}
diff --git a/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir b/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir
index 29429fd..9bc335c 100644
--- a/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir
+++ b/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir
@@ -55,8 +55,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack:
- { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
diff --git a/llvm/test/CodeGen/ARM/codesize-ifcvt.mir b/llvm/test/CodeGen/ARM/codesize-ifcvt.mir
index 3acbcf1..53a2a4a 100644
--- a/llvm/test/CodeGen/ARM/codesize-ifcvt.mir
+++ b/llvm/test/CodeGen/ARM/codesize-ifcvt.mir
@@ -142,8 +142,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack:
- { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
@@ -304,8 +304,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack:
- { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
@@ -472,8 +472,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack:
- { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
diff --git a/llvm/test/CodeGen/ARM/constant-island-movwt.mir b/llvm/test/CodeGen/ARM/constant-island-movwt.mir
index 7b3e59e..215d73f6 100644
--- a/llvm/test/CodeGen/ARM/constant-island-movwt.mir
+++ b/llvm/test/CodeGen/ARM/constant-island-movwt.mir
@@ -341,8 +341,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
- { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
diff --git a/llvm/test/CodeGen/ARM/constant-islands-cfg.mir b/llvm/test/CodeGen/ARM/constant-islands-cfg.mir
index d85e7bf..c4d6a47 100644
--- a/llvm/test/CodeGen/ARM/constant-islands-cfg.mir
+++ b/llvm/test/CodeGen/ARM/constant-islands-cfg.mir
@@ -32,8 +32,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
body: |
diff --git a/llvm/test/CodeGen/ARM/constant-islands-split-IT.mir b/llvm/test/CodeGen/ARM/constant-islands-split-IT.mir
index 236cd34..60f4ec6 100644
--- a/llvm/test/CodeGen/ARM/constant-islands-split-IT.mir
+++ b/llvm/test/CodeGen/ARM/constant-islands-split-IT.mir
@@ -47,8 +47,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 28
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
callSites: []
constants:
diff --git a/llvm/test/CodeGen/ARM/execute-only-save-cpsr.mir b/llvm/test/CodeGen/ARM/execute-only-save-cpsr.mir
index cdd8ed49..6a6d474 100644
--- a/llvm/test/CodeGen/ARM/execute-only-save-cpsr.mir
+++ b/llvm/test/CodeGen/ARM/execute-only-save-cpsr.mir
@@ -118,8 +118,8 @@ frameInfo:
hasMustTailInVarArgFunc: false
hasTailCall: false
localFrameSize: 2052
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack:
- { id: 0, name: var, type: default, offset: 0, size: 4, alignment: 4,
@@ -204,8 +204,8 @@ frameInfo:
hasMustTailInVarArgFunc: false
hasTailCall: false
localFrameSize: 2052
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack:
- { id: 0, name: var, type: default, offset: 0, size: 4, alignment: 4,
@@ -296,8 +296,8 @@ frameInfo:
hasMustTailInVarArgFunc: false
hasTailCall: false
localFrameSize: 2052
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack:
- { id: 0, name: var, type: default, offset: 0, size: 4, alignment: 4,
@@ -388,8 +388,8 @@ frameInfo:
hasMustTailInVarArgFunc: false
hasTailCall: false
localFrameSize: 2052
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack:
- { id: 0, name: var, type: default, offset: 0, size: 4, alignment: 4,
diff --git a/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir b/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir
index bd343eb..46f028b 100644
--- a/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir
+++ b/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir
@@ -61,8 +61,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
- { id: 0, name: res, type: default, offset: -2, size: 2, alignment: 2,
diff --git a/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir b/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir
index 1f8e6b0..5a03fcd 100644
--- a/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir
+++ b/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir
@@ -62,8 +62,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
- { id: 0, name: res, type: default, offset: -2, size: 2, alignment: 2,
diff --git a/llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll b/llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll
new file mode 100644
index 0000000..0c01bb9
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll
@@ -0,0 +1,14 @@
+; RUN: llc %s -filetype=asm -o - | FileCheck %s
+
+; CHECK: vmov.i8 d3, #0xff
+
+target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "armv8a-unknown-linux-gnueabihf"
+
+; Function Attrs: mustprogress noimplicitfloat nounwind
+define void @cvt_vec() local_unnamed_addr {
+entry:
+ tail call void asm sideeffect "", "{d3}"(<8 x i8> splat (i8 -1))
+ ret void
+}
+
diff --git a/llvm/test/CodeGen/ARM/inlineasmbr-if-cvt.mir b/llvm/test/CodeGen/ARM/inlineasmbr-if-cvt.mir
index 2d53074..9a7ac0a 100644
--- a/llvm/test/CodeGen/ARM/inlineasmbr-if-cvt.mir
+++ b/llvm/test/CodeGen/ARM/inlineasmbr-if-cvt.mir
@@ -69,8 +69,8 @@ frameInfo:
hasMustTailInVarArgFunc: false
hasTailCall: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
callSites: []
diff --git a/llvm/test/CodeGen/ARM/invalidated-save-point.ll b/llvm/test/CodeGen/ARM/invalidated-save-point.ll
index bb60230..0ff153b 100644
--- a/llvm/test/CodeGen/ARM/invalidated-save-point.ll
+++ b/llvm/test/CodeGen/ARM/invalidated-save-point.ll
@@ -4,8 +4,8 @@
; this point. Notably, if it isn't is will be invalid and reference a
; deleted block (%bb.-1.if.end)
-; CHECK: savePoint: ''
-; CHECK: restorePoint: ''
+; CHECK-NOT: savePoint:
+; CHECK-NOT: restorePoint:
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "thumbv7"
diff --git a/llvm/test/CodeGen/ARM/jump-table-dbg-value.mir b/llvm/test/CodeGen/ARM/jump-table-dbg-value.mir
index ec475e1..4e7628a 100644
--- a/llvm/test/CodeGen/ARM/jump-table-dbg-value.mir
+++ b/llvm/test/CodeGen/ARM/jump-table-dbg-value.mir
@@ -98,8 +98,8 @@ frameInfo:
hasMustTailInVarArgFunc: false
hasTailCall: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
callSites: []
diff --git a/llvm/test/CodeGen/ARM/legalize-bitcast.ll b/llvm/test/CodeGen/ARM/legalize-bitcast.ll
index 5b989a0..92b77f5 100644
--- a/llvm/test/CodeGen/ARM/legalize-bitcast.ll
+++ b/llvm/test/CodeGen/ARM/legalize-bitcast.ll
@@ -7,7 +7,7 @@ define i32 @vec_to_int() {
; CHECK-LABEL: vec_to_int:
; CHECK: @ %bb.0: @ %bb.0
; CHECK-NEXT: push {r4}
-; CHECK-NEXT: sub sp, sp, #28
+; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: movw r0, :lower16:vec6_p
; CHECK-NEXT: movt r0, :upper16:vec6_p
; CHECK-NEXT: vld1.8 {d16}, [r0]!
@@ -25,7 +25,7 @@ define i32 @vec_to_int() {
; CHECK-NEXT: vrev32.16 q8, q8
; CHECK-NEXT: vmov.f64 d16, d17
; CHECK-NEXT: vmov.32 r0, d16[0]
-; CHECK-NEXT: add sp, sp, #28
+; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: pop {r4}
; CHECK-NEXT: bx lr
bb.0:
diff --git a/llvm/test/CodeGen/ARM/llrint-conv.ll b/llvm/test/CodeGen/ARM/llrint-conv.ll
index 017955b..749ee00 100644
--- a/llvm/test/CodeGen/ARM/llrint-conv.ll
+++ b/llvm/test/CodeGen/ARM/llrint-conv.ll
@@ -1,13 +1,23 @@
; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP
; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP
+; SOFTFP-LABEL: testmsxh_builtin:
+; SOFTFP: bl llrintf
+; HARDFP-LABEL: testmsxh_builtin:
+; HARDFP: bl llrintf
+define i64 @testmsxh_builtin(half %x) {
+entry:
+ %0 = tail call i64 @llvm.llrint.i64.f16(half %x)
+ ret i64 %0
+}
+
; SOFTFP-LABEL: testmsxs_builtin:
; SOFTFP: bl llrintf
; HARDFP-LABEL: testmsxs_builtin:
; HARDFP: bl llrintf
define i64 @testmsxs_builtin(float %x) {
entry:
- %0 = tail call i64 @llvm.llrint.f32(float %x)
+ %0 = tail call i64 @llvm.llrint.i64.f32(float %x)
ret i64 %0
}
@@ -17,9 +27,20 @@ entry:
; HARDFP: bl llrint
define i64 @testmsxd_builtin(double %x) {
entry:
- %0 = tail call i64 @llvm.llrint.f64(double %x)
+ %0 = tail call i64 @llvm.llrint.i64.f64(double %x)
+ ret i64 %0
+}
+
+; FIXME(#44744): incorrect libcall
+; SOFTFP-LABEL: testmsxq_builtin:
+; SOFTFP: bl llrintl
+; HARDFP-LABEL: testmsxq_builtin:
+; HARDFP: bl llrintl
+define i64 @testmsxq_builtin(fp128 %x) {
+entry:
+ %0 = tail call i64 @llvm.llrint.i64.f128(fp128 %x)
ret i64 %0
}
-declare i64 @llvm.llrint.f32(float) nounwind readnone
-declare i64 @llvm.llrint.f64(double) nounwind readnone
+declare i64 @llvm.llrint.i64.f32(float) nounwind readnone
+declare i64 @llvm.llrint.i64.f64(double) nounwind readnone
diff --git a/llvm/test/CodeGen/ARM/lrint-conv.ll b/llvm/test/CodeGen/ARM/lrint-conv.ll
index 192da56..9aa9511 100644
--- a/llvm/test/CodeGen/ARM/lrint-conv.ll
+++ b/llvm/test/CodeGen/ARM/lrint-conv.ll
@@ -1,6 +1,13 @@
; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP
; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP
+; FIXME: crash
+; define i32 @testmswh_builtin(half %x) {
+; entry:
+; %0 = tail call i32 @llvm.lrint.i32.f16(half %x)
+; ret i32 %0
+; }
+
; SOFTFP-LABEL: testmsws_builtin:
; SOFTFP: bl lrintf
; HARDFP-LABEL: testmsws_builtin:
@@ -21,5 +28,16 @@ entry:
ret i32 %0
}
+; FIXME(#44744): incorrect libcall
+; SOFTFP-LABEL: testmswq_builtin:
+; SOFTFP: bl lrintl
+; HARDFP-LABEL: testmswq_builtin:
+; HARDFP: bl lrintl
+define i32 @testmswq_builtin(fp128 %x) {
+entry:
+ %0 = tail call i32 @llvm.lrint.i32.f128(fp128 %x)
+ ret i32 %0
+}
+
declare i32 @llvm.lrint.i32.f32(float) nounwind readnone
declare i32 @llvm.lrint.i32.f64(double) nounwind readnone
diff --git a/llvm/test/CodeGen/ARM/scmp.ll b/llvm/test/CodeGen/ARM/scmp.ll
index 6e493c9..9189aee 100644
--- a/llvm/test/CodeGen/ARM/scmp.ll
+++ b/llvm/test/CodeGen/ARM/scmp.ll
@@ -4,12 +4,9 @@
define i8 @scmp_8_8(i8 signext %x, i8 signext %y) nounwind {
; CHECK-LABEL: scmp_8_8:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlt r0, #1
-; CHECK-NEXT: movwgt r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: mvnlt r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.scmp(i8 %x, i8 %y)
ret i8 %1
@@ -18,12 +15,9 @@ define i8 @scmp_8_8(i8 signext %x, i8 signext %y) nounwind {
define i8 @scmp_8_16(i16 signext %x, i16 signext %y) nounwind {
; CHECK-LABEL: scmp_8_16:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlt r0, #1
-; CHECK-NEXT: movwgt r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: mvnlt r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.scmp(i16 %x, i16 %y)
ret i8 %1
@@ -32,12 +26,9 @@ define i8 @scmp_8_16(i16 signext %x, i16 signext %y) nounwind {
define i8 @scmp_8_32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: scmp_8_32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlt r0, #1
-; CHECK-NEXT: movwgt r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: mvnlt r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.scmp(i32 %x, i32 %y)
ret i8 %1
@@ -92,17 +83,26 @@ define i8 @scmp_8_128(i128 %x, i128 %y) nounwind {
define i32 @scmp_32_32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: scmp_32_32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlt r0, #1
-; CHECK-NEXT: movwgt r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: mvnlt r0, #0
; CHECK-NEXT: bx lr
%1 = call i32 @llvm.scmp(i32 %x, i32 %y)
ret i32 %1
}
+define i32 @scmp_neg(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: scmp_neg:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: adds r0, r0, r1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: mvnlt r0, #0
+; CHECK-NEXT: bx lr
+ %yy = sub nsw i32 0, %y
+ %1 = call i32 @llvm.scmp(i32 %x, i32 %yy)
+ ret i32 %1
+}
+
define i32 @scmp_32_64(i64 %x, i64 %y) nounwind {
; CHECK-LABEL: scmp_32_64:
; CHECK: @ %bb.0:
diff --git a/llvm/test/CodeGen/ARM/stack_frame_offset.mir b/llvm/test/CodeGen/ARM/stack_frame_offset.mir
index e387e079..423928d 100644
--- a/llvm/test/CodeGen/ARM/stack_frame_offset.mir
+++ b/llvm/test/CodeGen/ARM/stack_frame_offset.mir
@@ -51,8 +51,8 @@ frameInfo:
hasMustTailInVarArgFunc: false
hasTailCall: false
localFrameSize: 4
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
stack:
- { id: 0, name: a, type: default, offset: 0, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '', callee-saved-restored: true,
@@ -103,8 +103,8 @@ frameInfo:
hasMustTailInVarArgFunc: false
hasTailCall: false
localFrameSize: 4
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
stack:
- { id: 0, name: a, type: default, offset: 0, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '', callee-saved-restored: true,
@@ -155,8 +155,8 @@ frameInfo:
hasMustTailInVarArgFunc: false
hasTailCall: false
localFrameSize: 4
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
stack:
- { id: 0, name: a, type: default, offset: 0, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '', callee-saved-restored: true,
diff --git a/llvm/test/CodeGen/ARM/ucmp.ll b/llvm/test/CodeGen/ARM/ucmp.ll
index ad4af53..bb02014 100644
--- a/llvm/test/CodeGen/ARM/ucmp.ll
+++ b/llvm/test/CodeGen/ARM/ucmp.ll
@@ -4,12 +4,9 @@
define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind {
; CHECK-LABEL: ucmp_8_8:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlo r0, #1
-; CHECK-NEXT: movwhi r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: mvnlo r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i8 %x, i8 %y)
ret i8 %1
@@ -18,12 +15,9 @@ define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind {
define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind {
; CHECK-LABEL: ucmp_8_16:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlo r0, #1
-; CHECK-NEXT: movwhi r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: mvnlo r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i16 %x, i16 %y)
ret i8 %1
@@ -32,12 +26,9 @@ define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind {
define i8 @ucmp_8_32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: ucmp_8_32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlo r0, #1
-; CHECK-NEXT: movwhi r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: mvnlo r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
ret i8 %1
@@ -92,12 +83,9 @@ define i8 @ucmp_8_128(i128 %x, i128 %y) nounwind {
define i32 @ucmp_32_32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: ucmp_32_32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlo r0, #1
-; CHECK-NEXT: movwhi r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: mvnlo r0, #0
; CHECK-NEXT: bx lr
%1 = call i32 @llvm.ucmp(i32 %x, i32 %y)
ret i32 %1
diff --git a/llvm/test/CodeGen/ARM/vector-llrint.ll b/llvm/test/CodeGen/ARM/vector-llrint.ll
new file mode 100644
index 0000000..5f4e391
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/vector-llrint.ll
@@ -0,0 +1,2394 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+
+; RUN: llc %s -o - -mtriple=armv7-unknown-none-eabihf | FileCheck %s --check-prefixes=LE
+; RUN: llc %s -o - -mtriple=armv7-unknown-none-eabihf -mattr=+neon | FileCheck %s --check-prefixes=LE
+; RUN: llc %s -o - -mtriple=armebv7-unknown-none-eabihf | FileCheck %s --check-prefixes=BE
+; RUN: llc %s -o - -mtriple=armebv7-unknown-none-eabihf -mattr=+neon | FileCheck %s --check-prefixes=BE
+
+define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) {
+; LE-LABEL: llrint_v1i64_v1f16:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r11, lr}
+; LE-NEXT: push {r11, lr}
+; LE-NEXT: vmov r0, s0
+; LE-NEXT: bl __aeabi_f2h
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d0[0], r0
+; LE-NEXT: vmov.32 d0[1], r1
+; LE-NEXT: pop {r11, pc}
+;
+; BE-LABEL: llrint_v1i64_v1f16:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r11, lr}
+; BE-NEXT: push {r11, lr}
+; BE-NEXT: vmov r0, s0
+; BE-NEXT: bl __aeabi_f2h
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov s0, r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: vmov.32 d16[1], r1
+; BE-NEXT: vrev64.32 d0, d16
+; BE-NEXT: pop {r11, pc}
+ %a = call <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half> %x)
+ ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half>)
+
+define <2 x i64> @llrint_v1i64_v2f16(<2 x half> %x) {
+; LE-LABEL: llrint_v1i64_v2f16:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r4, r5, r11, lr}
+; LE-NEXT: push {r4, r5, r11, lr}
+; LE-NEXT: .vsave {d8, d9}
+; LE-NEXT: vpush {d8, d9}
+; LE-NEXT: vmov r0, s1
+; LE-NEXT: vmov.f32 s16, s0
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: mov r4, r0
+; LE-NEXT: vmov r0, s16
+; LE-NEXT: mov r5, r1
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: vmov.32 d9[0], r4
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d8[0], r0
+; LE-NEXT: vmov.32 d9[1], r5
+; LE-NEXT: vmov.32 d8[1], r1
+; LE-NEXT: vorr q0, q4, q4
+; LE-NEXT: vpop {d8, d9}
+; LE-NEXT: pop {r4, r5, r11, pc}
+;
+; BE-LABEL: llrint_v1i64_v2f16:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r4, r5, r11, lr}
+; BE-NEXT: push {r4, r5, r11, lr}
+; BE-NEXT: .vsave {d8}
+; BE-NEXT: vpush {d8}
+; BE-NEXT: vmov r0, s1
+; BE-NEXT: vmov.f32 s16, s0
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov s0, r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: mov r4, r0
+; BE-NEXT: vmov r0, s16
+; BE-NEXT: mov r5, r1
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov s0, r0
+; BE-NEXT: vmov.32 d8[0], r4
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: vmov.32 d8[1], r5
+; BE-NEXT: vmov.32 d16[1], r1
+; BE-NEXT: vrev64.32 d1, d8
+; BE-NEXT: vrev64.32 d0, d16
+; BE-NEXT: vpop {d8}
+; BE-NEXT: pop {r4, r5, r11, pc}
+ %a = call <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half> %x)
+ ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half>)
+
+define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) {
+; LE-LABEL: llrint_v4i64_v4f16:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r4, r5, r6, r7, r11, lr}
+; LE-NEXT: push {r4, r5, r6, r7, r11, lr}
+; LE-NEXT: .vsave {d12, d13}
+; LE-NEXT: vpush {d12, d13}
+; LE-NEXT: .vsave {d8, d9, d10}
+; LE-NEXT: vpush {d8, d9, d10}
+; LE-NEXT: vmov r0, s1
+; LE-NEXT: vmov.f32 s16, s3
+; LE-NEXT: vmov.f32 s20, s2
+; LE-NEXT: vmov.f32 s18, s0
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: mov r5, r0
+; LE-NEXT: vmov r0, s18
+; LE-NEXT: mov r4, r1
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: mov r7, r0
+; LE-NEXT: vmov r0, s16
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov s0, r7
+; LE-NEXT: mov r6, r1
+; LE-NEXT: vmov.32 d9[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d12[0], r0
+; LE-NEXT: vmov r0, s20
+; LE-NEXT: mov r7, r1
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: vmov.32 d13[0], r5
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d8[0], r0
+; LE-NEXT: vmov.32 d13[1], r4
+; LE-NEXT: vmov.32 d9[1], r6
+; LE-NEXT: vmov.32 d12[1], r7
+; LE-NEXT: vmov.32 d8[1], r1
+; LE-NEXT: vorr q0, q6, q6
+; LE-NEXT: vorr q1, q4, q4
+; LE-NEXT: vpop {d8, d9, d10}
+; LE-NEXT: vpop {d12, d13}
+; LE-NEXT: pop {r4, r5, r6, r7, r11, pc}
+;
+; BE-LABEL: llrint_v4i64_v4f16:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r4, r5, r6, r7, r11, lr}
+; BE-NEXT: push {r4, r5, r6, r7, r11, lr}
+; BE-NEXT: .vsave {d8, d9, d10}
+; BE-NEXT: vpush {d8, d9, d10}
+; BE-NEXT: vmov r0, s1
+; BE-NEXT: vmov.f32 s16, s3
+; BE-NEXT: vmov.f32 s18, s2
+; BE-NEXT: vmov.f32 s20, s0
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov s0, r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: mov r5, r0
+; BE-NEXT: vmov r0, s20
+; BE-NEXT: mov r4, r1
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: mov r7, r0
+; BE-NEXT: vmov r0, s16
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov s0, r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov s0, r7
+; BE-NEXT: mov r6, r1
+; BE-NEXT: vmov.32 d8[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: vmov r0, s18
+; BE-NEXT: mov r7, r1
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov s0, r0
+; BE-NEXT: vmov.32 d9[0], r5
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: vmov.32 d9[1], r4
+; BE-NEXT: vmov.32 d8[1], r6
+; BE-NEXT: vmov.32 d10[1], r7
+; BE-NEXT: vmov.32 d16[1], r1
+; BE-NEXT: vrev64.32 d1, d9
+; BE-NEXT: vrev64.32 d3, d8
+; BE-NEXT: vrev64.32 d0, d10
+; BE-NEXT: vrev64.32 d2, d16
+; BE-NEXT: vpop {d8, d9, d10}
+; BE-NEXT: pop {r4, r5, r6, r7, r11, pc}
+ %a = call <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half> %x)
+ ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half>)
+
+define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) {
+; LE-LABEL: llrint_v8i64_v8f16:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-NEXT: .pad #4
+; LE-NEXT: sub sp, sp, #4
+; LE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: .pad #8
+; LE-NEXT: sub sp, sp, #8
+; LE-NEXT: vmov r0, s1
+; LE-NEXT: vstr s6, [sp, #4] @ 4-byte Spill
+; LE-NEXT: vmov.f32 s16, s7
+; LE-NEXT: vmov.f32 s18, s5
+; LE-NEXT: vmov.f32 s20, s4
+; LE-NEXT: vmov.f32 s22, s3
+; LE-NEXT: vmov.f32 s24, s2
+; LE-NEXT: vmov.f32 s26, s0
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: mov r9, r0
+; LE-NEXT: vmov r0, s26
+; LE-NEXT: str r1, [sp] @ 4-byte Spill
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: mov r10, r0
+; LE-NEXT: vmov r0, s22
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: mov r5, r0
+; LE-NEXT: vmov r0, s24
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: mov r7, r0
+; LE-NEXT: vmov r0, s18
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: mov r6, r0
+; LE-NEXT: vmov r0, s20
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: mov r4, r0
+; LE-NEXT: vmov r0, s16
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov s0, r4
+; LE-NEXT: mov r11, r1
+; LE-NEXT: vmov.32 d11[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov s0, r6
+; LE-NEXT: mov r8, r1
+; LE-NEXT: vmov.32 d12[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov s0, r7
+; LE-NEXT: mov r6, r1
+; LE-NEXT: vmov.32 d13[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov s0, r5
+; LE-NEXT: mov r7, r1
+; LE-NEXT: vmov.32 d14[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov s0, r10
+; LE-NEXT: mov r5, r1
+; LE-NEXT: vmov.32 d15[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vldr s0, [sp, #4] @ 4-byte Reload
+; LE-NEXT: mov r4, r1
+; LE-NEXT: vmov.32 d8[0], r0
+; LE-NEXT: vmov r0, s0
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: vmov.32 d9[0], r9
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d10[0], r0
+; LE-NEXT: ldr r0, [sp] @ 4-byte Reload
+; LE-NEXT: vmov.32 d15[1], r5
+; LE-NEXT: vmov.32 d9[1], r0
+; LE-NEXT: vmov.32 d13[1], r6
+; LE-NEXT: vmov.32 d11[1], r11
+; LE-NEXT: vmov.32 d8[1], r4
+; LE-NEXT: vmov.32 d14[1], r7
+; LE-NEXT: vorr q0, q4, q4
+; LE-NEXT: vmov.32 d12[1], r8
+; LE-NEXT: vorr q1, q7, q7
+; LE-NEXT: vmov.32 d10[1], r1
+; LE-NEXT: vorr q2, q6, q6
+; LE-NEXT: vorr q3, q5, q5
+; LE-NEXT: add sp, sp, #8
+; LE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: add sp, sp, #4
+; LE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+;
+; BE-LABEL: llrint_v8i64_v8f16:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-NEXT: .pad #4
+; BE-NEXT: sub sp, sp, #4
+; BE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14}
+; BE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14}
+; BE-NEXT: .pad #8
+; BE-NEXT: sub sp, sp, #8
+; BE-NEXT: vmov r0, s1
+; BE-NEXT: vmov.f32 s18, s7
+; BE-NEXT: vmov.f32 s16, s6
+; BE-NEXT: vmov.f32 s20, s5
+; BE-NEXT: vmov.f32 s22, s4
+; BE-NEXT: vmov.f32 s24, s3
+; BE-NEXT: vmov.f32 s26, s2
+; BE-NEXT: vmov.f32 s28, s0
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov s0, r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: mov r9, r0
+; BE-NEXT: vmov r0, s28
+; BE-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: mov r10, r0
+; BE-NEXT: vmov r0, s24
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: mov r5, r0
+; BE-NEXT: vmov r0, s26
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: mov r7, r0
+; BE-NEXT: vmov r0, s20
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: mov r6, r0
+; BE-NEXT: vmov r0, s22
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: mov r4, r0
+; BE-NEXT: vmov r0, s18
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov s0, r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov s0, r4
+; BE-NEXT: mov r11, r1
+; BE-NEXT: vmov.32 d9[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov s0, r6
+; BE-NEXT: mov r8, r1
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov s0, r7
+; BE-NEXT: mov r6, r1
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov s0, r5
+; BE-NEXT: mov r7, r1
+; BE-NEXT: vmov.32 d12[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov s0, r10
+; BE-NEXT: mov r5, r1
+; BE-NEXT: vmov.32 d13[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d14[0], r0
+; BE-NEXT: vmov r0, s16
+; BE-NEXT: mov r4, r1
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov s0, r0
+; BE-NEXT: vmov.32 d8[0], r9
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: ldr r0, [sp, #4] @ 4-byte Reload
+; BE-NEXT: vmov.32 d13[1], r5
+; BE-NEXT: vmov.32 d8[1], r0
+; BE-NEXT: vmov.32 d11[1], r6
+; BE-NEXT: vmov.32 d9[1], r11
+; BE-NEXT: vmov.32 d14[1], r4
+; BE-NEXT: vmov.32 d12[1], r7
+; BE-NEXT: vmov.32 d10[1], r8
+; BE-NEXT: vmov.32 d16[1], r1
+; BE-NEXT: vrev64.32 d1, d8
+; BE-NEXT: vrev64.32 d3, d13
+; BE-NEXT: vrev64.32 d5, d11
+; BE-NEXT: vrev64.32 d7, d9
+; BE-NEXT: vrev64.32 d0, d14
+; BE-NEXT: vrev64.32 d2, d12
+; BE-NEXT: vrev64.32 d4, d10
+; BE-NEXT: vrev64.32 d6, d16
+; BE-NEXT: add sp, sp, #8
+; BE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14}
+; BE-NEXT: add sp, sp, #4
+; BE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+ %a = call <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half> %x)
+ ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half>)
+
+define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
+; LE-LABEL: llrint_v16i64_v16f16:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-NEXT: .pad #4
+; LE-NEXT: sub sp, sp, #4
+; LE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: .pad #120
+; LE-NEXT: sub sp, sp, #120
+; LE-NEXT: mov r11, r0
+; LE-NEXT: vmov r0, s7
+; LE-NEXT: vstr s15, [sp, #24] @ 4-byte Spill
+; LE-NEXT: vmov.f32 s23, s13
+; LE-NEXT: vstr s14, [sp, #100] @ 4-byte Spill
+; LE-NEXT: vmov.f32 s25, s12
+; LE-NEXT: vmov.f32 s27, s11
+; LE-NEXT: vstr s10, [sp, #104] @ 4-byte Spill
+; LE-NEXT: vstr s9, [sp, #108] @ 4-byte Spill
+; LE-NEXT: vmov.f32 s24, s8
+; LE-NEXT: vmov.f32 s19, s6
+; LE-NEXT: vmov.f32 s29, s5
+; LE-NEXT: vmov.f32 s17, s4
+; LE-NEXT: vmov.f32 s16, s3
+; LE-NEXT: vmov.f32 s21, s2
+; LE-NEXT: vmov.f32 s26, s1
+; LE-NEXT: vmov.f32 s18, s0
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: mov r7, r0
+; LE-NEXT: vmov r0, s25
+; LE-NEXT: str r1, [sp, #56] @ 4-byte Spill
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: mov r5, r0
+; LE-NEXT: vmov r0, s27
+; LE-NEXT: str r1, [sp, #116] @ 4-byte Spill
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: mov r6, r0
+; LE-NEXT: vmov r0, s29
+; LE-NEXT: str r1, [sp, #112] @ 4-byte Spill
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d15[0], r0
+; LE-NEXT: vmov r0, s23
+; LE-NEXT: mov r4, r1
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: add lr, sp, #80
+; LE-NEXT: vmov.32 d17[0], r6
+; LE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-NEXT: bl llrintf
+; LE-NEXT: mov r6, r0
+; LE-NEXT: vmov r0, s17
+; LE-NEXT: vmov r8, s21
+; LE-NEXT: str r1, [sp, #76] @ 4-byte Spill
+; LE-NEXT: vmov r10, s19
+; LE-NEXT: vmov.32 d10[0], r5
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: add lr, sp, #40
+; LE-NEXT: vmov.32 d11[0], r6
+; LE-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d14[0], r0
+; LE-NEXT: mov r0, r10
+; LE-NEXT: mov r9, r1
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: vmov.32 d11[0], r7
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d10[0], r0
+; LE-NEXT: mov r0, r8
+; LE-NEXT: mov r7, r1
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: mov r6, r0
+; LE-NEXT: ldr r0, [sp, #56] @ 4-byte Reload
+; LE-NEXT: vmov.32 d11[1], r0
+; LE-NEXT: vmov r0, s18
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: mov r5, r0
+; LE-NEXT: vmov r0, s16
+; LE-NEXT: vmov.32 d10[1], r7
+; LE-NEXT: add lr, sp, #56
+; LE-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov s0, r0
+; LE-NEXT: vmov.32 d15[1], r4
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d9[0], r0
+; LE-NEXT: vmov r0, s26
+; LE-NEXT: add lr, sp, #24
+; LE-NEXT: vmov r8, s24
+; LE-NEXT: vmov.32 d14[1], r9
+; LE-NEXT: mov r10, r1
+; LE-NEXT: vmov s24, r5
+; LE-NEXT: vldr s0, [sp, #24] @ 4-byte Reload
+; LE-NEXT: vstmia lr, {d14, d15} @ 16-byte Spill
+; LE-NEXT: vmov r7, s0
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov.f32 s0, s24
+; LE-NEXT: vmov s22, r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s22
+; LE-NEXT: mov r5, r1
+; LE-NEXT: vmov.32 d14[0], r0
+; LE-NEXT: vmov s24, r6
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d15[0], r0
+; LE-NEXT: mov r0, r7
+; LE-NEXT: mov r6, r1
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov.f32 s0, s24
+; LE-NEXT: vmov s22, r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s22
+; LE-NEXT: vmov.32 d8[0], r0
+; LE-NEXT: add lr, sp, #8
+; LE-NEXT: mov r9, r1
+; LE-NEXT: vmov.32 d15[1], r6
+; LE-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d13[0], r0
+; LE-NEXT: mov r0, r8
+; LE-NEXT: mov r6, r1
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vldr s0, [sp, #100] @ 4-byte Reload
+; LE-NEXT: mov r7, r0
+; LE-NEXT: vmov.32 d14[1], r5
+; LE-NEXT: vmov r0, s0
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vldr s0, [sp, #104] @ 4-byte Reload
+; LE-NEXT: vmov s20, r0
+; LE-NEXT: vmov.32 d13[1], r6
+; LE-NEXT: vmov r4, s0
+; LE-NEXT: vldr s0, [sp, #108] @ 4-byte Reload
+; LE-NEXT: vmov r0, s0
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov.f32 s0, s20
+; LE-NEXT: vmov s16, r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s16
+; LE-NEXT: mov r5, r1
+; LE-NEXT: vmov.32 d12[0], r0
+; LE-NEXT: vmov s18, r7
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d11[0], r0
+; LE-NEXT: mov r0, r4
+; LE-NEXT: mov r6, r1
+; LE-NEXT: bl __aeabi_h2f
+; LE-NEXT: vmov.f32 s0, s18
+; LE-NEXT: vmov s16, r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s16
+; LE-NEXT: vmov.32 d10[0], r0
+; LE-NEXT: mov r4, r1
+; LE-NEXT: vmov.32 d11[1], r6
+; LE-NEXT: bl llrintf
+; LE-NEXT: add lr, sp, #80
+; LE-NEXT: vmov.32 d10[1], r4
+; LE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-NEXT: add lr, sp, #40
+; LE-NEXT: vldmia lr, {d18, d19} @ 16-byte Reload
+; LE-NEXT: add lr, sp, #8
+; LE-NEXT: vmov.32 d16[0], r0
+; LE-NEXT: ldr r0, [sp, #76] @ 4-byte Reload
+; LE-NEXT: vldmia lr, {d20, d21} @ 16-byte Reload
+; LE-NEXT: add lr, sp, #24
+; LE-NEXT: vmov.32 d19[1], r0
+; LE-NEXT: ldr r0, [sp, #116] @ 4-byte Reload
+; LE-NEXT: vmov.32 d21[1], r10
+; LE-NEXT: vmov.32 d18[1], r0
+; LE-NEXT: ldr r0, [sp, #112] @ 4-byte Reload
+; LE-NEXT: vmov.32 d12[1], r5
+; LE-NEXT: vmov.32 d17[1], r0
+; LE-NEXT: add r0, r11, #64
+; LE-NEXT: vmov.32 d16[1], r1
+; LE-NEXT: vst1.64 {d10, d11}, [r0:128]!
+; LE-NEXT: vst1.64 {d16, d17}, [r0:128]!
+; LE-NEXT: vst1.64 {d18, d19}, [r0:128]!
+; LE-NEXT: vmov.32 d20[1], r9
+; LE-NEXT: vst1.64 {d12, d13}, [r0:128]
+; LE-NEXT: vst1.64 {d14, d15}, [r11:128]!
+; LE-NEXT: vst1.64 {d20, d21}, [r11:128]!
+; LE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-NEXT: add lr, sp, #56
+; LE-NEXT: vst1.64 {d16, d17}, [r11:128]!
+; LE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-NEXT: vst1.64 {d16, d17}, [r11:128]
+; LE-NEXT: add sp, sp, #120
+; LE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: add sp, sp, #4
+; LE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+;
+; BE-LABEL: llrint_v16i64_v16f16:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-NEXT: .pad #4
+; BE-NEXT: sub sp, sp, #4
+; BE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: .pad #112
+; BE-NEXT: sub sp, sp, #112
+; BE-NEXT: mov r11, r0
+; BE-NEXT: vmov r0, s14
+; BE-NEXT: vmov.f32 s17, s15
+; BE-NEXT: vstr s13, [sp, #52] @ 4-byte Spill
+; BE-NEXT: vmov.f32 s21, s12
+; BE-NEXT: vstr s10, [sp, #68] @ 4-byte Spill
+; BE-NEXT: vmov.f32 s23, s11
+; BE-NEXT: vstr s7, [sp, #72] @ 4-byte Spill
+; BE-NEXT: vmov.f32 s19, s9
+; BE-NEXT: vstr s4, [sp, #28] @ 4-byte Spill
+; BE-NEXT: vmov.f32 s26, s8
+; BE-NEXT: vmov.f32 s24, s6
+; BE-NEXT: vmov.f32 s18, s5
+; BE-NEXT: vmov.f32 s25, s3
+; BE-NEXT: vmov.f32 s16, s2
+; BE-NEXT: vmov.f32 s27, s1
+; BE-NEXT: vmov.f32 s29, s0
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov s0, r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: mov r8, r0
+; BE-NEXT: vmov r0, s29
+; BE-NEXT: mov r4, r1
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: mov r9, r0
+; BE-NEXT: vmov r0, s27
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: mov r7, r0
+; BE-NEXT: vmov r0, s21
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: mov r6, r0
+; BE-NEXT: vmov r0, s25
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: mov r5, r0
+; BE-NEXT: vmov r0, s23
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov s0, r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: vmov s0, r5
+; BE-NEXT: str r1, [sp, #108] @ 4-byte Spill
+; BE-NEXT: vstr d16, [sp, #96] @ 8-byte Spill
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: vmov s0, r6
+; BE-NEXT: str r1, [sp, #92] @ 4-byte Spill
+; BE-NEXT: vstr d16, [sp, #80] @ 8-byte Spill
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: vmov s0, r7
+; BE-NEXT: str r1, [sp, #76] @ 4-byte Spill
+; BE-NEXT: vstr d16, [sp, #56] @ 8-byte Spill
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov s0, r9
+; BE-NEXT: mov r10, r1
+; BE-NEXT: vmov.32 d14[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d15[0], r0
+; BE-NEXT: vmov r0, s17
+; BE-NEXT: mov r5, r1
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov s0, r0
+; BE-NEXT: vmov.32 d10[0], r8
+; BE-NEXT: vmov r6, s19
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: mov r0, r6
+; BE-NEXT: mov r7, r1
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: mov r6, r0
+; BE-NEXT: vmov r0, s18
+; BE-NEXT: vmov.32 d10[1], r4
+; BE-NEXT: vstr d10, [sp, #40] @ 8-byte Spill
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: mov r4, r0
+; BE-NEXT: vmov r0, s16
+; BE-NEXT: vmov.32 d11[1], r7
+; BE-NEXT: vstr d11, [sp, #32] @ 8-byte Spill
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov.32 d15[1], r5
+; BE-NEXT: vmov s0, r0
+; BE-NEXT: vstr d15, [sp, #16] @ 8-byte Spill
+; BE-NEXT: bl llrintf
+; BE-NEXT: vldr s0, [sp, #28] @ 4-byte Reload
+; BE-NEXT: vmov r5, s26
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: vmov s26, r4
+; BE-NEXT: vmov r0, s0
+; BE-NEXT: mov r8, r1
+; BE-NEXT: vmov.32 d14[1], r10
+; BE-NEXT: vmov r4, s24
+; BE-NEXT: vstr d16, [sp] @ 8-byte Spill
+; BE-NEXT: vstr d14, [sp, #8] @ 8-byte Spill
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov.f32 s0, s26
+; BE-NEXT: vmov s22, r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s22
+; BE-NEXT: mov r7, r1
+; BE-NEXT: vmov.32 d13[0], r0
+; BE-NEXT: vmov s24, r6
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d14[0], r0
+; BE-NEXT: mov r0, r4
+; BE-NEXT: mov r6, r1
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov.f32 s0, s24
+; BE-NEXT: vmov s22, r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s22
+; BE-NEXT: mov r9, r1
+; BE-NEXT: vmov.32 d12[0], r0
+; BE-NEXT: vmov.32 d14[1], r6
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: mov r0, r5
+; BE-NEXT: mov r6, r1
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vldr s0, [sp, #52] @ 4-byte Reload
+; BE-NEXT: mov r4, r0
+; BE-NEXT: vmov.32 d13[1], r7
+; BE-NEXT: vmov r0, s0
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vldr s0, [sp, #68] @ 4-byte Reload
+; BE-NEXT: vmov s20, r0
+; BE-NEXT: vmov.32 d11[1], r6
+; BE-NEXT: vmov r7, s0
+; BE-NEXT: vldr s0, [sp, #72] @ 4-byte Reload
+; BE-NEXT: vmov r0, s0
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov.f32 s0, s20
+; BE-NEXT: vmov s16, r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s16
+; BE-NEXT: mov r5, r1
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: vmov s18, r4
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d15[0], r0
+; BE-NEXT: mov r0, r7
+; BE-NEXT: mov r4, r1
+; BE-NEXT: bl __aeabi_h2f
+; BE-NEXT: vmov.f32 s0, s18
+; BE-NEXT: vmov s16, r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s16
+; BE-NEXT: mov r6, r1
+; BE-NEXT: vmov.32 d9[0], r0
+; BE-NEXT: vmov.32 d15[1], r4
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d24[0], r0
+; BE-NEXT: ldr r0, [sp, #76] @ 4-byte Reload
+; BE-NEXT: vldr d23, [sp, #56] @ 8-byte Reload
+; BE-NEXT: vldr d20, [sp, #8] @ 8-byte Reload
+; BE-NEXT: vmov.32 d23[1], r0
+; BE-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
+; BE-NEXT: vldr d22, [sp, #80] @ 8-byte Reload
+; BE-NEXT: vldr d26, [sp, #16] @ 8-byte Reload
+; BE-NEXT: vrev64.32 d21, d20
+; BE-NEXT: vmov.32 d22[1], r0
+; BE-NEXT: ldr r0, [sp, #108] @ 4-byte Reload
+; BE-NEXT: vldr d30, [sp] @ 8-byte Reload
+; BE-NEXT: vldr d25, [sp, #96] @ 8-byte Reload
+; BE-NEXT: vrev64.32 d20, d26
+; BE-NEXT: vldr d26, [sp, #32] @ 8-byte Reload
+; BE-NEXT: vmov.32 d10[1], r5
+; BE-NEXT: vmov.32 d12[1], r9
+; BE-NEXT: vldr d28, [sp, #40] @ 8-byte Reload
+; BE-NEXT: vrev64.32 d27, d26
+; BE-NEXT: vmov.32 d25[1], r0
+; BE-NEXT: add r0, r11, #64
+; BE-NEXT: vmov.32 d30[1], r8
+; BE-NEXT: vmov.32 d9[1], r6
+; BE-NEXT: vrev64.32 d26, d28
+; BE-NEXT: vrev64.32 d29, d10
+; BE-NEXT: vmov.32 d24[1], r1
+; BE-NEXT: vrev64.32 d1, d12
+; BE-NEXT: vrev64.32 d28, d23
+; BE-NEXT: vrev64.32 d23, d22
+; BE-NEXT: vrev64.32 d22, d30
+; BE-NEXT: vrev64.32 d31, d25
+; BE-NEXT: vrev64.32 d0, d9
+; BE-NEXT: vrev64.32 d30, d24
+; BE-NEXT: vst1.64 {d0, d1}, [r0:128]!
+; BE-NEXT: vst1.64 {d30, d31}, [r0:128]!
+; BE-NEXT: vst1.64 {d28, d29}, [r0:128]!
+; BE-NEXT: vrev64.32 d19, d13
+; BE-NEXT: vst1.64 {d26, d27}, [r0:128]
+; BE-NEXT: vst1.64 {d20, d21}, [r11:128]!
+; BE-NEXT: vrev64.32 d18, d14
+; BE-NEXT: vst1.64 {d22, d23}, [r11:128]!
+; BE-NEXT: vrev64.32 d17, d15
+; BE-NEXT: vrev64.32 d16, d11
+; BE-NEXT: vst1.64 {d18, d19}, [r11:128]!
+; BE-NEXT: vst1.64 {d16, d17}, [r11:128]
+; BE-NEXT: add sp, sp, #112
+; BE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: add sp, sp, #4
+; BE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+ %a = call <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half> %x)
+ ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half>)
+
+define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
+; LE-LABEL: llrint_v1i64_v1f32:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r11, lr}
+; LE-NEXT: push {r11, lr}
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d0[0], r0
+; LE-NEXT: vmov.32 d0[1], r1
+; LE-NEXT: pop {r11, pc}
+;
+; BE-LABEL: llrint_v1i64_v1f32:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r11, lr}
+; BE-NEXT: push {r11, lr}
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: vmov.32 d16[1], r1
+; BE-NEXT: vrev64.32 d0, d16
+; BE-NEXT: pop {r11, pc}
+ %a = call <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float> %x)
+ ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float>)
+
+define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
+; LE-LABEL: llrint_v2i64_v2f32:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r4, lr}
+; LE-NEXT: push {r4, lr}
+; LE-NEXT: .vsave {d10, d11}
+; LE-NEXT: vpush {d10, d11}
+; LE-NEXT: .vsave {d8}
+; LE-NEXT: vpush {d8}
+; LE-NEXT: vmov.f64 d8, d0
+; LE-NEXT: vmov.f32 s0, s17
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s16
+; LE-NEXT: mov r4, r1
+; LE-NEXT: vmov.32 d11[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d10[0], r0
+; LE-NEXT: vmov.32 d11[1], r4
+; LE-NEXT: vmov.32 d10[1], r1
+; LE-NEXT: vorr q0, q5, q5
+; LE-NEXT: vpop {d8}
+; LE-NEXT: vpop {d10, d11}
+; LE-NEXT: pop {r4, pc}
+;
+; BE-LABEL: llrint_v2i64_v2f32:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r4, lr}
+; BE-NEXT: push {r4, lr}
+; BE-NEXT: .vsave {d10, d11}
+; BE-NEXT: vpush {d10, d11}
+; BE-NEXT: .vsave {d8}
+; BE-NEXT: vpush {d8}
+; BE-NEXT: vrev64.32 d8, d0
+; BE-NEXT: vmov.f32 s0, s17
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s16
+; BE-NEXT: mov r4, r1
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: vmov.32 d11[1], r4
+; BE-NEXT: vmov.32 d10[1], r1
+; BE-NEXT: vrev64.32 q0, q5
+; BE-NEXT: vpop {d8}
+; BE-NEXT: vpop {d10, d11}
+; BE-NEXT: pop {r4, pc}
+ %a = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> %x)
+ ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float>)
+
+define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
+; LE-LABEL: llrint_v4i64_v4f32:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r4, r5, r6, lr}
+; LE-NEXT: push {r4, r5, r6, lr}
+; LE-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
+; LE-NEXT: vpush {d8, d9, d10, d11, d12, d13}
+; LE-NEXT: vorr q5, q0, q0
+; LE-NEXT: vmov.f32 s0, s23
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s20
+; LE-NEXT: mov r4, r1
+; LE-NEXT: vmov.32 d9[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s21
+; LE-NEXT: mov r5, r1
+; LE-NEXT: vmov.32 d12[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s22
+; LE-NEXT: mov r6, r1
+; LE-NEXT: vmov.32 d13[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d8[0], r0
+; LE-NEXT: vmov.32 d13[1], r6
+; LE-NEXT: vmov.32 d9[1], r4
+; LE-NEXT: vmov.32 d12[1], r5
+; LE-NEXT: vmov.32 d8[1], r1
+; LE-NEXT: vorr q0, q6, q6
+; LE-NEXT: vorr q1, q4, q4
+; LE-NEXT: vpop {d8, d9, d10, d11, d12, d13}
+; LE-NEXT: pop {r4, r5, r6, pc}
+;
+; BE-LABEL: llrint_v4i64_v4f32:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r4, r5, r6, lr}
+; BE-NEXT: push {r4, r5, r6, lr}
+; BE-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
+; BE-NEXT: vpush {d8, d9, d10, d11, d12, d13}
+; BE-NEXT: vrev64.32 d8, d1
+; BE-NEXT: vrev64.32 d9, d0
+; BE-NEXT: vmov.f32 s0, s17
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s18
+; BE-NEXT: mov r4, r1
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s19
+; BE-NEXT: mov r5, r1
+; BE-NEXT: vmov.32 d12[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s16
+; BE-NEXT: mov r6, r1
+; BE-NEXT: vmov.32 d13[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: vmov.32 d13[1], r6
+; BE-NEXT: vmov.32 d11[1], r4
+; BE-NEXT: vmov.32 d12[1], r5
+; BE-NEXT: vmov.32 d10[1], r1
+; BE-NEXT: vrev64.32 q0, q6
+; BE-NEXT: vrev64.32 q1, q5
+; BE-NEXT: vpop {d8, d9, d10, d11, d12, d13}
+; BE-NEXT: pop {r4, r5, r6, pc}
+ %a = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> %x)
+ ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float>)
+
+define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
+; LE-LABEL: llrint_v8i64_v8f32:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; LE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, lr}
+; LE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: .pad #40
+; LE-NEXT: sub sp, sp, #40
+; LE-NEXT: vorr q6, q1, q1
+; LE-NEXT: add lr, sp, #24
+; LE-NEXT: vorr q7, q0, q0
+; LE-NEXT: vstmia lr, {d2, d3} @ 16-byte Spill
+; LE-NEXT: vmov.f32 s0, s27
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s24
+; LE-NEXT: mov r8, r1
+; LE-NEXT: vmov.32 d9[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s25
+; LE-NEXT: mov r9, r1
+; LE-NEXT: vmov.32 d10[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vorr q6, q7, q7
+; LE-NEXT: add lr, sp, #8
+; LE-NEXT: mov r10, r1
+; LE-NEXT: vmov.32 d11[0], r0
+; LE-NEXT: vmov.f32 s0, s26
+; LE-NEXT: vstmia lr, {d14, d15} @ 16-byte Spill
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s27
+; LE-NEXT: mov r7, r1
+; LE-NEXT: vmov.32 d14[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s24
+; LE-NEXT: mov r4, r1
+; LE-NEXT: vmov.32 d15[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: add lr, sp, #8
+; LE-NEXT: mov r5, r1
+; LE-NEXT: vmov.32 d12[0], r0
+; LE-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; LE-NEXT: vmov.f32 s0, s1
+; LE-NEXT: bl llrintf
+; LE-NEXT: add lr, sp, #24
+; LE-NEXT: mov r6, r1
+; LE-NEXT: vmov.32 d13[0], r0
+; LE-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; LE-NEXT: vmov.f32 s0, s2
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d8[0], r0
+; LE-NEXT: vmov.32 d13[1], r6
+; LE-NEXT: vmov.32 d15[1], r4
+; LE-NEXT: vmov.32 d11[1], r10
+; LE-NEXT: vmov.32 d9[1], r8
+; LE-NEXT: vmov.32 d12[1], r5
+; LE-NEXT: vmov.32 d14[1], r7
+; LE-NEXT: vorr q0, q6, q6
+; LE-NEXT: vmov.32 d10[1], r9
+; LE-NEXT: vorr q1, q7, q7
+; LE-NEXT: vmov.32 d8[1], r1
+; LE-NEXT: vorr q2, q5, q5
+; LE-NEXT: vorr q3, q4, q4
+; LE-NEXT: add sp, sp, #40
+; LE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, pc}
+;
+; BE-LABEL: llrint_v8i64_v8f32:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; BE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, lr}
+; BE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: .pad #32
+; BE-NEXT: sub sp, sp, #32
+; BE-NEXT: vorr q4, q1, q1
+; BE-NEXT: add lr, sp, #8
+; BE-NEXT: vorr q5, q0, q0
+; BE-NEXT: vstmia lr, {d0, d1} @ 16-byte Spill
+; BE-NEXT: vrev64.32 d12, d8
+; BE-NEXT: vmov.f32 s0, s25
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s24
+; BE-NEXT: mov r8, r1
+; BE-NEXT: vmov.32 d15[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vrev64.32 d0, d11
+; BE-NEXT: mov r9, r1
+; BE-NEXT: vrev64.32 d8, d9
+; BE-NEXT: vorr d9, d0, d0
+; BE-NEXT: vmov.32 d14[0], r0
+; BE-NEXT: vstr d8, [sp, #24] @ 8-byte Spill
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s17
+; BE-NEXT: mov r10, r1
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: add lr, sp, #8
+; BE-NEXT: vmov.f32 s0, s19
+; BE-NEXT: mov r7, r1
+; BE-NEXT: vmov.32 d13[0], r0
+; BE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-NEXT: vrev64.32 d8, d16
+; BE-NEXT: vstr d8, [sp, #8] @ 8-byte Spill
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s16
+; BE-NEXT: mov r4, r1
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vldr d0, [sp, #8] @ 8-byte Reload
+; BE-NEXT: mov r5, r1
+; BE-NEXT: vmov.32 d8[0], r0
+; BE-NEXT: vmov.f32 s0, s1
+; BE-NEXT: bl llrintf
+; BE-NEXT: vldr d0, [sp, #24] @ 8-byte Reload
+; BE-NEXT: mov r6, r1
+; BE-NEXT: @ kill: def $s0 killed $s0 killed $d0
+; BE-NEXT: vmov.32 d9[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d12[0], r0
+; BE-NEXT: vmov.32 d9[1], r6
+; BE-NEXT: vmov.32 d11[1], r4
+; BE-NEXT: vmov.32 d15[1], r8
+; BE-NEXT: vmov.32 d13[1], r7
+; BE-NEXT: vmov.32 d8[1], r5
+; BE-NEXT: vmov.32 d10[1], r10
+; BE-NEXT: vmov.32 d14[1], r9
+; BE-NEXT: vmov.32 d12[1], r1
+; BE-NEXT: vrev64.32 q0, q4
+; BE-NEXT: vrev64.32 q1, q5
+; BE-NEXT: vrev64.32 q2, q7
+; BE-NEXT: vrev64.32 q3, q6
+; BE-NEXT: add sp, sp, #32
+; BE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, pc}
+ %a = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> %x)
+ ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float>)
+
+define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
+; LE-LABEL: llrint_v16i64_v16f32:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-NEXT: .pad #4
+; LE-NEXT: sub sp, sp, #4
+; LE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: .pad #160
+; LE-NEXT: sub sp, sp, #160
+; LE-NEXT: add lr, sp, #112
+; LE-NEXT: vorr q5, q3, q3
+; LE-NEXT: vorr q6, q0, q0
+; LE-NEXT: mov r4, r0
+; LE-NEXT: vstmia lr, {d4, d5} @ 16-byte Spill
+; LE-NEXT: add lr, sp, #48
+; LE-NEXT: vorr q7, q1, q1
+; LE-NEXT: vstmia lr, {d0, d1} @ 16-byte Spill
+; LE-NEXT: vmov.f32 s0, s23
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s24
+; LE-NEXT: add lr, sp, #144
+; LE-NEXT: vmov.32 d17[0], r0
+; LE-NEXT: str r1, [sp, #108] @ 4-byte Spill
+; LE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s25
+; LE-NEXT: str r1, [sp, #84] @ 4-byte Spill
+; LE-NEXT: vmov.32 d8[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s28
+; LE-NEXT: add lr, sp, #128
+; LE-NEXT: vmov.32 d9[0], r0
+; LE-NEXT: str r1, [sp, #44] @ 4-byte Spill
+; LE-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s29
+; LE-NEXT: mov r9, r1
+; LE-NEXT: vmov.32 d8[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s30
+; LE-NEXT: mov r6, r1
+; LE-NEXT: vmov.32 d9[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s31
+; LE-NEXT: mov r5, r1
+; LE-NEXT: vmov.32 d12[0], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: add lr, sp, #112
+; LE-NEXT: mov r7, r1
+; LE-NEXT: vmov.32 d13[0], r0
+; LE-NEXT: vldmia lr, {d14, d15} @ 16-byte Reload
+; LE-NEXT: vmov.f32 s0, s29
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s22
+; LE-NEXT: add lr, sp, #24
+; LE-NEXT: vmov.32 d17[0], r0
+; LE-NEXT: mov r11, r1
+; LE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-NEXT: vmov.32 d13[1], r7
+; LE-NEXT: bl llrintf
+; LE-NEXT: add lr, sp, #144
+; LE-NEXT: vmov.f32 s0, s21
+; LE-NEXT: vmov.32 d12[1], r5
+; LE-NEXT: str r1, [sp, #40] @ 4-byte Spill
+; LE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-NEXT: vmov.32 d16[0], r0
+; LE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-NEXT: add lr, sp, #88
+; LE-NEXT: vstmia lr, {d12, d13} @ 16-byte Spill
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s20
+; LE-NEXT: mov r10, r1
+; LE-NEXT: vmov.32 d13[0], r0
+; LE-NEXT: vmov.32 d9[1], r6
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s31
+; LE-NEXT: vmov.32 d12[0], r0
+; LE-NEXT: add lr, sp, #8
+; LE-NEXT: mov r8, r1
+; LE-NEXT: vmov.32 d8[1], r9
+; LE-NEXT: vstmia lr, {d12, d13} @ 16-byte Spill
+; LE-NEXT: add lr, sp, #64
+; LE-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; LE-NEXT: bl llrintf
+; LE-NEXT: add lr, sp, #128
+; LE-NEXT: vmov.32 d9[0], r0
+; LE-NEXT: ldr r0, [sp, #44] @ 4-byte Reload
+; LE-NEXT: mov r9, r1
+; LE-NEXT: vldmia lr, {d10, d11} @ 16-byte Reload
+; LE-NEXT: add lr, sp, #48
+; LE-NEXT: vldmia lr, {d12, d13} @ 16-byte Reload
+; LE-NEXT: vmov.f32 s0, s27
+; LE-NEXT: vmov.32 d11[1], r0
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.f32 s0, s26
+; LE-NEXT: vmov.32 d15[0], r0
+; LE-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
+; LE-NEXT: add lr, sp, #128
+; LE-NEXT: mov r7, r1
+; LE-NEXT: vmov.32 d10[1], r0
+; LE-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; LE-NEXT: bl llrintf
+; LE-NEXT: vmov.32 d14[0], r0
+; LE-NEXT: add lr, sp, #144
+; LE-NEXT: ldr r0, [sp, #108] @ 4-byte Reload
+; LE-NEXT: mov r5, r1
+; LE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-NEXT: vmov.32 d17[1], r0
+; LE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-NEXT: add lr, sp, #112
+; LE-NEXT: vldmia lr, {d10, d11} @ 16-byte Reload
+; LE-NEXT: vmov.f32 s0, s20
+; LE-NEXT: bl llrintf
+; LE-NEXT: add lr, sp, #24
+; LE-NEXT: vmov.f32 s0, s22
+; LE-NEXT: mov r6, r1
+; LE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-NEXT: vmov.32 d16[0], r0
+; LE-NEXT: vmov.32 d17[1], r11
+; LE-NEXT: vorr q6, q8, q8
+; LE-NEXT: bl llrintf
+; LE-NEXT: add lr, sp, #144
+; LE-NEXT: vmov.32 d8[0], r0
+; LE-NEXT: ldr r0, [sp, #40] @ 4-byte Reload
+; LE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-NEXT: add lr, sp, #8
+; LE-NEXT: vldmia lr, {d18, d19} @ 16-byte Reload
+; LE-NEXT: add lr, sp, #128
+; LE-NEXT: vmov.32 d9[1], r9
+; LE-NEXT: vmov.32 d12[1], r6
+; LE-NEXT: vmov.32 d19[1], r10
+; LE-NEXT: vmov.32 d8[1], r1
+; LE-NEXT: vmov.32 d16[1], r0
+; LE-NEXT: add r0, r4, #64
+; LE-NEXT: vmov.32 d18[1], r8
+; LE-NEXT: vst1.64 {d12, d13}, [r0:128]!
+; LE-NEXT: vst1.64 {d8, d9}, [r0:128]!
+; LE-NEXT: vst1.64 {d18, d19}, [r0:128]!
+; LE-NEXT: vst1.64 {d16, d17}, [r0:128]
+; LE-NEXT: vmov.32 d15[1], r7
+; LE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-NEXT: add lr, sp, #64
+; LE-NEXT: vmov.32 d14[1], r5
+; LE-NEXT: vst1.64 {d16, d17}, [r4:128]!
+; LE-NEXT: vst1.64 {d14, d15}, [r4:128]!
+; LE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-NEXT: add lr, sp, #88
+; LE-NEXT: vst1.64 {d16, d17}, [r4:128]!
+; LE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-NEXT: vst1.64 {d16, d17}, [r4:128]
+; LE-NEXT: add sp, sp, #160
+; LE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: add sp, sp, #4
+; LE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+;
+; BE-LABEL: llrint_v16i64_v16f32:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-NEXT: .pad #4
+; BE-NEXT: sub sp, sp, #4
+; BE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: .pad #144
+; BE-NEXT: sub sp, sp, #144
+; BE-NEXT: vorr q6, q3, q3
+; BE-NEXT: add lr, sp, #112
+; BE-NEXT: vorr q7, q0, q0
+; BE-NEXT: mov r4, r0
+; BE-NEXT: vstmia lr, {d4, d5} @ 16-byte Spill
+; BE-NEXT: add lr, sp, #96
+; BE-NEXT: vrev64.32 d8, d13
+; BE-NEXT: vstmia lr, {d2, d3} @ 16-byte Spill
+; BE-NEXT: vmov.f32 s0, s17
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s16
+; BE-NEXT: str r1, [sp, #88] @ 4-byte Spill
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vrev64.32 d8, d14
+; BE-NEXT: add lr, sp, #128
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: str r1, [sp, #92] @ 4-byte Spill
+; BE-NEXT: vmov.f32 s0, s16
+; BE-NEXT: vrev64.32 d9, d12
+; BE-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; BE-NEXT: vstr d9, [sp, #64] @ 8-byte Spill
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s19
+; BE-NEXT: mov r9, r1
+; BE-NEXT: vmov.32 d12[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s17
+; BE-NEXT: str r1, [sp, #84] @ 4-byte Spill
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: vrev64.32 d9, d15
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s18
+; BE-NEXT: mov r6, r1
+; BE-NEXT: vmov.32 d13[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s19
+; BE-NEXT: mov r5, r1
+; BE-NEXT: vmov.32 d14[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vldr d0, [sp, #64] @ 8-byte Reload
+; BE-NEXT: mov r7, r1
+; BE-NEXT: @ kill: def $s0 killed $s0 killed $d0
+; BE-NEXT: vmov.32 d15[0], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: add lr, sp, #40
+; BE-NEXT: str r1, [sp, #60] @ 4-byte Spill
+; BE-NEXT: vmov.32 d15[1], r7
+; BE-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; BE-NEXT: add lr, sp, #96
+; BE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-NEXT: vrev64.32 d8, d16
+; BE-NEXT: vmov.f32 s0, s17
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s16
+; BE-NEXT: vmov.32 d14[1], r5
+; BE-NEXT: add lr, sp, #64
+; BE-NEXT: mov r10, r1
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: vstmia lr, {d14, d15} @ 16-byte Spill
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: add lr, sp, #24
+; BE-NEXT: mov r11, r1
+; BE-NEXT: vmov.32 d13[1], r6
+; BE-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; BE-NEXT: add lr, sp, #96
+; BE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-NEXT: vrev64.32 d8, d17
+; BE-NEXT: vmov.f32 s0, s17
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s16
+; BE-NEXT: vmov.32 d12[1], r9
+; BE-NEXT: add lr, sp, #96
+; BE-NEXT: mov r8, r1
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: vstmia lr, {d12, d13} @ 16-byte Spill
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: add lr, sp, #8
+; BE-NEXT: ldr r0, [sp, #88] @ 4-byte Reload
+; BE-NEXT: mov r9, r1
+; BE-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; BE-NEXT: add lr, sp, #112
+; BE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-NEXT: add lr, sp, #128
+; BE-NEXT: vldmia lr, {d10, d11} @ 16-byte Reload
+; BE-NEXT: vrev64.32 d8, d16
+; BE-NEXT: vmov.32 d11[1], r0
+; BE-NEXT: vmov.f32 s0, s17
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s16
+; BE-NEXT: vmov.32 d15[0], r0
+; BE-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
+; BE-NEXT: add lr, sp, #128
+; BE-NEXT: mov r7, r1
+; BE-NEXT: vmov.32 d10[1], r0
+; BE-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; BE-NEXT: bl llrintf
+; BE-NEXT: add lr, sp, #112
+; BE-NEXT: vmov.32 d14[0], r0
+; BE-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
+; BE-NEXT: mov r5, r1
+; BE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-NEXT: add lr, sp, #40
+; BE-NEXT: vrev64.32 d8, d17
+; BE-NEXT: vldmia lr, {d12, d13} @ 16-byte Reload
+; BE-NEXT: vmov.f32 s0, s17
+; BE-NEXT: vmov.32 d13[1], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: vmov.f32 s0, s16
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: ldr r0, [sp, #60] @ 4-byte Reload
+; BE-NEXT: mov r6, r1
+; BE-NEXT: vmov.32 d12[1], r0
+; BE-NEXT: bl llrintf
+; BE-NEXT: add lr, sp, #24
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: add r0, r4, #64
+; BE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-NEXT: add lr, sp, #8
+; BE-NEXT: vmov.32 d17[1], r10
+; BE-NEXT: vmov.32 d16[1], r11
+; BE-NEXT: vorr q12, q8, q8
+; BE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-NEXT: add lr, sp, #128
+; BE-NEXT: vmov.32 d15[1], r7
+; BE-NEXT: vmov.32 d11[1], r6
+; BE-NEXT: vmov.32 d14[1], r5
+; BE-NEXT: vldmia lr, {d18, d19} @ 16-byte Reload
+; BE-NEXT: add lr, sp, #96
+; BE-NEXT: vmov.32 d10[1], r1
+; BE-NEXT: vmov.32 d17[1], r8
+; BE-NEXT: vldmia lr, {d20, d21} @ 16-byte Reload
+; BE-NEXT: add lr, sp, #64
+; BE-NEXT: vmov.32 d16[1], r9
+; BE-NEXT: vrev64.32 q14, q7
+; BE-NEXT: vorr q13, q8, q8
+; BE-NEXT: vrev64.32 q15, q5
+; BE-NEXT: vldmia lr, {d22, d23} @ 16-byte Reload
+; BE-NEXT: vrev64.32 q8, q6
+; BE-NEXT: vst1.64 {d28, d29}, [r0:128]!
+; BE-NEXT: vst1.64 {d30, d31}, [r0:128]!
+; BE-NEXT: vrev64.32 q9, q9
+; BE-NEXT: vrev64.32 q10, q10
+; BE-NEXT: vst1.64 {d16, d17}, [r0:128]!
+; BE-NEXT: vrev64.32 q11, q11
+; BE-NEXT: vrev64.32 q12, q12
+; BE-NEXT: vst1.64 {d18, d19}, [r0:128]
+; BE-NEXT: vst1.64 {d20, d21}, [r4:128]!
+; BE-NEXT: vst1.64 {d22, d23}, [r4:128]!
+; BE-NEXT: vrev64.32 q13, q13
+; BE-NEXT: vst1.64 {d24, d25}, [r4:128]!
+; BE-NEXT: vst1.64 {d26, d27}, [r4:128]
+; BE-NEXT: add sp, sp, #144
+; BE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: add sp, sp, #4
+; BE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+ %a = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> %x)
+ ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>)
+
+define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) {
+; LE-LABEL: llrint_v1i64_v1f64:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r11, lr}
+; LE-NEXT: push {r11, lr}
+; LE-NEXT: bl llrint
+; LE-NEXT: vmov.32 d0[0], r0
+; LE-NEXT: vmov.32 d0[1], r1
+; LE-NEXT: pop {r11, pc}
+;
+; BE-LABEL: llrint_v1i64_v1f64:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r11, lr}
+; BE-NEXT: push {r11, lr}
+; BE-NEXT: bl llrint
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: vmov.32 d16[1], r1
+; BE-NEXT: vrev64.32 d0, d16
+; BE-NEXT: pop {r11, pc}
+ %a = call <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double> %x)
+ ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double>)
+
+define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
+; LE-LABEL: llrint_v2i64_v2f64:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r4, lr}
+; LE-NEXT: push {r4, lr}
+; LE-NEXT: .vsave {d8, d9, d10, d11}
+; LE-NEXT: vpush {d8, d9, d10, d11}
+; LE-NEXT: vorr q4, q0, q0
+; LE-NEXT: vorr d0, d9, d9
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d8, d8
+; LE-NEXT: mov r4, r1
+; LE-NEXT: vmov.32 d11[0], r0
+; LE-NEXT: bl llrint
+; LE-NEXT: vmov.32 d10[0], r0
+; LE-NEXT: vmov.32 d11[1], r4
+; LE-NEXT: vmov.32 d10[1], r1
+; LE-NEXT: vorr q0, q5, q5
+; LE-NEXT: vpop {d8, d9, d10, d11}
+; LE-NEXT: pop {r4, pc}
+;
+; BE-LABEL: llrint_v2i64_v2f64:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r4, lr}
+; BE-NEXT: push {r4, lr}
+; BE-NEXT: .vsave {d8, d9, d10, d11}
+; BE-NEXT: vpush {d8, d9, d10, d11}
+; BE-NEXT: vorr q4, q0, q0
+; BE-NEXT: vorr d0, d9, d9
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d8, d8
+; BE-NEXT: mov r4, r1
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: bl llrint
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: vmov.32 d11[1], r4
+; BE-NEXT: vmov.32 d10[1], r1
+; BE-NEXT: vrev64.32 q0, q5
+; BE-NEXT: vpop {d8, d9, d10, d11}
+; BE-NEXT: pop {r4, pc}
+ %a = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> %x)
+ ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double>)
+
+define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
+; LE-LABEL: llrint_v4i64_v4f64:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r4, r5, r6, lr}
+; LE-NEXT: push {r4, r5, r6, lr}
+; LE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: vorr q5, q1, q1
+; LE-NEXT: vorr q6, q0, q0
+; LE-NEXT: vorr d0, d11, d11
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d12, d12
+; LE-NEXT: mov r4, r1
+; LE-NEXT: vmov.32 d9[0], r0
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d13, d13
+; LE-NEXT: mov r5, r1
+; LE-NEXT: vmov.32 d14[0], r0
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d10, d10
+; LE-NEXT: mov r6, r1
+; LE-NEXT: vmov.32 d15[0], r0
+; LE-NEXT: bl llrint
+; LE-NEXT: vmov.32 d8[0], r0
+; LE-NEXT: vmov.32 d15[1], r6
+; LE-NEXT: vmov.32 d9[1], r4
+; LE-NEXT: vmov.32 d14[1], r5
+; LE-NEXT: vmov.32 d8[1], r1
+; LE-NEXT: vorr q0, q7, q7
+; LE-NEXT: vorr q1, q4, q4
+; LE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: pop {r4, r5, r6, pc}
+;
+; BE-LABEL: llrint_v4i64_v4f64:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r4, r5, r6, lr}
+; BE-NEXT: push {r4, r5, r6, lr}
+; BE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: vorr q4, q1, q1
+; BE-NEXT: vorr q5, q0, q0
+; BE-NEXT: vorr d0, d9, d9
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d10, d10
+; BE-NEXT: mov r4, r1
+; BE-NEXT: vmov.32 d13[0], r0
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d11, d11
+; BE-NEXT: mov r5, r1
+; BE-NEXT: vmov.32 d14[0], r0
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d8, d8
+; BE-NEXT: mov r6, r1
+; BE-NEXT: vmov.32 d15[0], r0
+; BE-NEXT: bl llrint
+; BE-NEXT: vmov.32 d12[0], r0
+; BE-NEXT: vmov.32 d15[1], r6
+; BE-NEXT: vmov.32 d13[1], r4
+; BE-NEXT: vmov.32 d14[1], r5
+; BE-NEXT: vmov.32 d12[1], r1
+; BE-NEXT: vrev64.32 q0, q7
+; BE-NEXT: vrev64.32 q1, q6
+; BE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: pop {r4, r5, r6, pc}
+ %a = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> %x)
+ ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double>)
+
+define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
+; LE-LABEL: llrint_v8i64_v8f64:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; LE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, lr}
+; LE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: .pad #40
+; LE-NEXT: sub sp, sp, #40
+; LE-NEXT: vorr q4, q0, q0
+; LE-NEXT: add lr, sp, #24
+; LE-NEXT: vorr d0, d7, d7
+; LE-NEXT: vstmia lr, {d6, d7} @ 16-byte Spill
+; LE-NEXT: vorr q7, q2, q2
+; LE-NEXT: vorr q6, q1, q1
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d14, d14
+; LE-NEXT: add lr, sp, #8
+; LE-NEXT: vmov.32 d17[0], r0
+; LE-NEXT: mov r8, r1
+; LE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d15, d15
+; LE-NEXT: mov r9, r1
+; LE-NEXT: vmov.32 d10[0], r0
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d12, d12
+; LE-NEXT: mov r10, r1
+; LE-NEXT: vmov.32 d11[0], r0
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d13, d13
+; LE-NEXT: mov r7, r1
+; LE-NEXT: vmov.32 d14[0], r0
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d8, d8
+; LE-NEXT: mov r4, r1
+; LE-NEXT: vmov.32 d15[0], r0
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d9, d9
+; LE-NEXT: mov r5, r1
+; LE-NEXT: vmov.32 d12[0], r0
+; LE-NEXT: bl llrint
+; LE-NEXT: add lr, sp, #24
+; LE-NEXT: mov r6, r1
+; LE-NEXT: vmov.32 d13[0], r0
+; LE-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; LE-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; LE-NEXT: bl llrint
+; LE-NEXT: add lr, sp, #8
+; LE-NEXT: vmov.32 d13[1], r6
+; LE-NEXT: vldmia lr, {d6, d7} @ 16-byte Reload
+; LE-NEXT: vmov.32 d15[1], r4
+; LE-NEXT: vmov.32 d11[1], r10
+; LE-NEXT: vmov.32 d6[0], r0
+; LE-NEXT: vmov.32 d12[1], r5
+; LE-NEXT: vmov.32 d14[1], r7
+; LE-NEXT: vorr q0, q6, q6
+; LE-NEXT: vmov.32 d10[1], r9
+; LE-NEXT: vorr q1, q7, q7
+; LE-NEXT: vmov.32 d7[1], r8
+; LE-NEXT: vorr q2, q5, q5
+; LE-NEXT: vmov.32 d6[1], r1
+; LE-NEXT: add sp, sp, #40
+; LE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, pc}
+;
+; BE-LABEL: llrint_v8i64_v8f64:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; BE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, lr}
+; BE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: .pad #40
+; BE-NEXT: sub sp, sp, #40
+; BE-NEXT: vorr q4, q0, q0
+; BE-NEXT: add lr, sp, #24
+; BE-NEXT: vorr d0, d7, d7
+; BE-NEXT: vstmia lr, {d6, d7} @ 16-byte Spill
+; BE-NEXT: vorr q7, q2, q2
+; BE-NEXT: vorr q6, q1, q1
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d14, d14
+; BE-NEXT: add lr, sp, #8
+; BE-NEXT: vmov.32 d17[0], r0
+; BE-NEXT: mov r8, r1
+; BE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d15, d15
+; BE-NEXT: mov r9, r1
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d12, d12
+; BE-NEXT: mov r10, r1
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d13, d13
+; BE-NEXT: mov r7, r1
+; BE-NEXT: vmov.32 d14[0], r0
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d8, d8
+; BE-NEXT: mov r4, r1
+; BE-NEXT: vmov.32 d15[0], r0
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d9, d9
+; BE-NEXT: mov r5, r1
+; BE-NEXT: vmov.32 d12[0], r0
+; BE-NEXT: bl llrint
+; BE-NEXT: add lr, sp, #24
+; BE-NEXT: mov r6, r1
+; BE-NEXT: vmov.32 d13[0], r0
+; BE-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; BE-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; BE-NEXT: bl llrint
+; BE-NEXT: add lr, sp, #8
+; BE-NEXT: vmov.32 d13[1], r6
+; BE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-NEXT: vmov.32 d15[1], r4
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: vmov.32 d11[1], r10
+; BE-NEXT: vmov.32 d17[1], r8
+; BE-NEXT: vmov.32 d12[1], r5
+; BE-NEXT: vmov.32 d14[1], r7
+; BE-NEXT: vmov.32 d10[1], r9
+; BE-NEXT: vmov.32 d16[1], r1
+; BE-NEXT: vrev64.32 q0, q6
+; BE-NEXT: vrev64.32 q1, q7
+; BE-NEXT: vrev64.32 q2, q5
+; BE-NEXT: vrev64.32 q3, q8
+; BE-NEXT: add sp, sp, #40
+; BE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, pc}
+ %a = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> %x)
+ ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>)
+
+define <16 x i64> @llrint_v16f64(<16 x double> %x) {
+; LE-LABEL: llrint_v16f64:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-NEXT: .pad #4
+; LE-NEXT: sub sp, sp, #4
+; LE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: .pad #176
+; LE-NEXT: sub sp, sp, #176
+; LE-NEXT: add lr, sp, #40
+; LE-NEXT: str r0, [sp, #140] @ 4-byte Spill
+; LE-NEXT: add r0, sp, #312
+; LE-NEXT: vorr q6, q2, q2
+; LE-NEXT: vstmia lr, {d6, d7} @ 16-byte Spill
+; LE-NEXT: add lr, sp, #96
+; LE-NEXT: vorr q7, q1, q1
+; LE-NEXT: vstmia lr, {d0, d1} @ 16-byte Spill
+; LE-NEXT: add lr, sp, #144
+; LE-NEXT: vorr d0, d1, d1
+; LE-NEXT: vld1.64 {d16, d17}, [r0]
+; LE-NEXT: add r0, sp, #280
+; LE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-NEXT: add lr, sp, #80
+; LE-NEXT: vld1.64 {d16, d17}, [r0]
+; LE-NEXT: add r0, sp, #296
+; LE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-NEXT: add lr, sp, #120
+; LE-NEXT: vld1.64 {d16, d17}, [r0]
+; LE-NEXT: add r0, sp, #328
+; LE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-NEXT: add lr, sp, #56
+; LE-NEXT: vld1.64 {d16, d17}, [r0]
+; LE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d14, d14
+; LE-NEXT: str r1, [sp, #116] @ 4-byte Spill
+; LE-NEXT: vmov.32 d11[0], r0
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d15, d15
+; LE-NEXT: str r1, [sp, #76] @ 4-byte Spill
+; LE-NEXT: vmov.32 d8[0], r0
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d12, d12
+; LE-NEXT: add lr, sp, #160
+; LE-NEXT: vmov.32 d9[0], r0
+; LE-NEXT: str r1, [sp, #72] @ 4-byte Spill
+; LE-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d13, d13
+; LE-NEXT: mov r6, r1
+; LE-NEXT: vmov.32 d14[0], r0
+; LE-NEXT: bl llrint
+; LE-NEXT: add lr, sp, #40
+; LE-NEXT: mov r4, r1
+; LE-NEXT: vmov.32 d15[0], r0
+; LE-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; LE-NEXT: vorr d0, d8, d8
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d9, d9
+; LE-NEXT: mov r7, r1
+; LE-NEXT: vmov.32 d12[0], r0
+; LE-NEXT: bl llrint
+; LE-NEXT: add lr, sp, #96
+; LE-NEXT: mov r5, r1
+; LE-NEXT: vmov.32 d13[0], r0
+; LE-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; LE-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; LE-NEXT: bl llrint
+; LE-NEXT: vmov.32 d10[0], r0
+; LE-NEXT: add lr, sp, #40
+; LE-NEXT: mov r10, r1
+; LE-NEXT: vmov.32 d13[1], r5
+; LE-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; LE-NEXT: add lr, sp, #56
+; LE-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; LE-NEXT: vorr d0, d9, d9
+; LE-NEXT: bl llrint
+; LE-NEXT: vorr d0, d8, d8
+; LE-NEXT: vmov.32 d12[1], r7
+; LE-NEXT: add lr, sp, #96
+; LE-NEXT: mov r9, r1
+; LE-NEXT: vmov.32 d11[0], r0
+; LE-NEXT: vstmia lr, {d12, d13} @ 16-byte Spill
+; LE-NEXT: bl llrint
+; LE-NEXT: vmov.32 d10[0], r0
+; LE-NEXT: add lr, sp, #24
+; LE-NEXT: mov r11, r1
+; LE-NEXT: vmov.32 d15[1], r4
+; LE-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; LE-NEXT: add lr, sp, #144
+; LE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-NEXT: vorr d0, d17, d17
+; LE-NEXT: bl llrint
+; LE-NEXT: add lr, sp, #8
+; LE-NEXT: vmov.32 d14[1], r6
+; LE-NEXT: mov r8, r1
+; LE-NEXT: vmov.32 d17[0], r0
+; LE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-NEXT: add lr, sp, #56
+; LE-NEXT: vstmia lr, {d14, d15} @ 16-byte Spill
+; LE-NEXT: add lr, sp, #80
+; LE-NEXT: vldmia lr, {d10, d11} @ 16-byte Reload
+; LE-NEXT: vorr d0, d11, d11
+; LE-NEXT: bl llrint
+; LE-NEXT: vmov.32 d15[0], r0
+; LE-NEXT: add lr, sp, #160
+; LE-NEXT: vorr d0, d10, d10
+; LE-NEXT: ldr r0, [sp, #72] @ 4-byte Reload
+; LE-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; LE-NEXT: mov r6, r1
+; LE-NEXT: vmov.32 d9[1], r0
+; LE-NEXT: bl llrint
+; LE-NEXT: vmov.32 d14[0], r0
+; LE-NEXT: ldr r0, [sp, #76] @ 4-byte Reload
+; LE-NEXT: add lr, sp, #160
+; LE-NEXT: mov r4, r1
+; LE-NEXT: vmov.32 d8[1], r0
+; LE-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; LE-NEXT: add lr, sp, #120
+; LE-NEXT: vldmia lr, {d10, d11} @ 16-byte Reload
+; LE-NEXT: vorr d0, d11, d11
+; LE-NEXT: bl llrint
+; LE-NEXT: vmov.32 d13[0], r0
+; LE-NEXT: add lr, sp, #40
+; LE-NEXT: vorr d0, d10, d10
+; LE-NEXT: ldr r0, [sp, #116] @ 4-byte Reload
+; LE-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; LE-NEXT: mov r5, r1
+; LE-NEXT: vmov.32 d9[1], r0
+; LE-NEXT: bl llrint
+; LE-NEXT: add lr, sp, #144
+; LE-NEXT: mov r7, r1
+; LE-NEXT: vmov.32 d12[0], r0
+; LE-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; LE-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; LE-NEXT: vmov.32 d8[1], r10
+; LE-NEXT: bl llrint
+; LE-NEXT: add lr, sp, #8
+; LE-NEXT: vmov.32 d15[1], r6
+; LE-NEXT: vldmia lr, {d20, d21} @ 16-byte Reload
+; LE-NEXT: add lr, sp, #24
+; LE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-NEXT: add lr, sp, #160
+; LE-NEXT: vmov.32 d20[0], r0
+; LE-NEXT: vmov.32 d21[1], r8
+; LE-NEXT: vmov.32 d20[1], r1
+; LE-NEXT: ldr r1, [sp, #140] @ 4-byte Reload
+; LE-NEXT: vmov.32 d13[1], r5
+; LE-NEXT: mov r0, r1
+; LE-NEXT: vst1.64 {d8, d9}, [r0:128]!
+; LE-NEXT: vldmia lr, {d18, d19} @ 16-byte Reload
+; LE-NEXT: add lr, sp, #56
+; LE-NEXT: vmov.32 d14[1], r4
+; LE-NEXT: vst1.64 {d18, d19}, [r0:128]!
+; LE-NEXT: vldmia lr, {d18, d19} @ 16-byte Reload
+; LE-NEXT: add lr, sp, #96
+; LE-NEXT: vmov.32 d12[1], r7
+; LE-NEXT: vst1.64 {d18, d19}, [r0:128]!
+; LE-NEXT: vldmia lr, {d18, d19} @ 16-byte Reload
+; LE-NEXT: vmov.32 d17[1], r9
+; LE-NEXT: vst1.64 {d18, d19}, [r0:128]
+; LE-NEXT: add r0, r1, #64
+; LE-NEXT: vst1.64 {d14, d15}, [r0:128]!
+; LE-NEXT: vst1.64 {d12, d13}, [r0:128]!
+; LE-NEXT: vmov.32 d16[1], r11
+; LE-NEXT: vst1.64 {d20, d21}, [r0:128]!
+; LE-NEXT: vst1.64 {d16, d17}, [r0:128]
+; LE-NEXT: add sp, sp, #176
+; LE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: add sp, sp, #4
+; LE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+;
+; BE-LABEL: llrint_v16f64:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-NEXT: .pad #4
+; BE-NEXT: sub sp, sp, #4
+; BE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: .pad #168
+; BE-NEXT: sub sp, sp, #168
+; BE-NEXT: add lr, sp, #64
+; BE-NEXT: str r0, [sp, #132] @ 4-byte Spill
+; BE-NEXT: add r0, sp, #304
+; BE-NEXT: vorr q4, q3, q3
+; BE-NEXT: vstmia lr, {d0, d1} @ 16-byte Spill
+; BE-NEXT: add lr, sp, #48
+; BE-NEXT: vorr d0, d1, d1
+; BE-NEXT: vld1.64 {d16, d17}, [r0]
+; BE-NEXT: add r0, sp, #320
+; BE-NEXT: vorr q6, q2, q2
+; BE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-NEXT: add lr, sp, #88
+; BE-NEXT: vorr q7, q1, q1
+; BE-NEXT: vld1.64 {d16, d17}, [r0]
+; BE-NEXT: add r0, sp, #272
+; BE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-NEXT: add lr, sp, #112
+; BE-NEXT: vld1.64 {d16, d17}, [r0]
+; BE-NEXT: add r0, sp, #288
+; BE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-NEXT: add lr, sp, #24
+; BE-NEXT: vld1.64 {d16, d17}, [r0]
+; BE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d14, d14
+; BE-NEXT: add lr, sp, #136
+; BE-NEXT: vmov.32 d17[0], r0
+; BE-NEXT: str r1, [sp, #108] @ 4-byte Spill
+; BE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d15, d15
+; BE-NEXT: str r1, [sp, #84] @ 4-byte Spill
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d12, d12
+; BE-NEXT: add lr, sp, #152
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: str r1, [sp, #44] @ 4-byte Spill
+; BE-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d13, d13
+; BE-NEXT: mov r6, r1
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d8, d8
+; BE-NEXT: mov r4, r1
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d9, d9
+; BE-NEXT: mov r7, r1
+; BE-NEXT: vmov.32 d12[0], r0
+; BE-NEXT: bl llrint
+; BE-NEXT: add lr, sp, #64
+; BE-NEXT: mov r5, r1
+; BE-NEXT: vmov.32 d13[0], r0
+; BE-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; BE-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; BE-NEXT: bl llrint
+; BE-NEXT: add lr, sp, #136
+; BE-NEXT: mov r9, r1
+; BE-NEXT: vmov.32 d13[1], r5
+; BE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-NEXT: add lr, sp, #24
+; BE-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; BE-NEXT: vorr d0, d9, d9
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d8, d8
+; BE-NEXT: vmov.32 d12[1], r7
+; BE-NEXT: add lr, sp, #64
+; BE-NEXT: mov r10, r1
+; BE-NEXT: vmov.32 d15[0], r0
+; BE-NEXT: vstmia lr, {d12, d13} @ 16-byte Spill
+; BE-NEXT: bl llrint
+; BE-NEXT: vmov.32 d14[0], r0
+; BE-NEXT: add lr, sp, #8
+; BE-NEXT: mov r11, r1
+; BE-NEXT: vmov.32 d11[1], r4
+; BE-NEXT: vstmia lr, {d14, d15} @ 16-byte Spill
+; BE-NEXT: add lr, sp, #48
+; BE-NEXT: vorr q6, q5, q5
+; BE-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; BE-NEXT: vorr d0, d9, d9
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d8, d8
+; BE-NEXT: vmov.32 d12[1], r6
+; BE-NEXT: add lr, sp, #24
+; BE-NEXT: mov r8, r1
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: vstmia lr, {d12, d13} @ 16-byte Spill
+; BE-NEXT: bl llrint
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: add lr, sp, #48
+; BE-NEXT: ldr r0, [sp, #44] @ 4-byte Reload
+; BE-NEXT: mov r6, r1
+; BE-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; BE-NEXT: add lr, sp, #152
+; BE-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; BE-NEXT: add lr, sp, #88
+; BE-NEXT: vldmia lr, {d12, d13} @ 16-byte Reload
+; BE-NEXT: vorr d0, d13, d13
+; BE-NEXT: vmov.32 d9[1], r0
+; BE-NEXT: bl llrint
+; BE-NEXT: vmov.32 d15[0], r0
+; BE-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
+; BE-NEXT: vorr d0, d12, d12
+; BE-NEXT: add lr, sp, #152
+; BE-NEXT: mov r4, r1
+; BE-NEXT: vmov.32 d8[1], r0
+; BE-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; BE-NEXT: bl llrint
+; BE-NEXT: add lr, sp, #136
+; BE-NEXT: vmov.32 d14[0], r0
+; BE-NEXT: ldr r0, [sp, #108] @ 4-byte Reload
+; BE-NEXT: mov r5, r1
+; BE-NEXT: vldmia lr, {d10, d11} @ 16-byte Reload
+; BE-NEXT: add lr, sp, #112
+; BE-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; BE-NEXT: vorr d0, d9, d9
+; BE-NEXT: vmov.32 d11[1], r0
+; BE-NEXT: bl llrint
+; BE-NEXT: vorr d0, d8, d8
+; BE-NEXT: mov r7, r1
+; BE-NEXT: vmov.32 d13[0], r0
+; BE-NEXT: vmov.32 d10[1], r9
+; BE-NEXT: bl llrint
+; BE-NEXT: add lr, sp, #8
+; BE-NEXT: vmov.32 d12[0], r0
+; BE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-NEXT: add lr, sp, #48
+; BE-NEXT: vmov.32 d17[1], r10
+; BE-NEXT: vmov.32 d16[1], r11
+; BE-NEXT: vorr q12, q8, q8
+; BE-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-NEXT: add lr, sp, #152
+; BE-NEXT: vmov.32 d17[1], r8
+; BE-NEXT: vldmia lr, {d18, d19} @ 16-byte Reload
+; BE-NEXT: add lr, sp, #24
+; BE-NEXT: vmov.32 d13[1], r7
+; BE-NEXT: vmov.32 d16[1], r6
+; BE-NEXT: vldmia lr, {d20, d21} @ 16-byte Reload
+; BE-NEXT: add lr, sp, #64
+; BE-NEXT: vorr q13, q8, q8
+; BE-NEXT: vmov.32 d12[1], r1
+; BE-NEXT: ldr r1, [sp, #132] @ 4-byte Reload
+; BE-NEXT: vrev64.32 q8, q5
+; BE-NEXT: mov r0, r1
+; BE-NEXT: vldmia lr, {d22, d23} @ 16-byte Reload
+; BE-NEXT: vrev64.32 q9, q9
+; BE-NEXT: vrev64.32 q10, q10
+; BE-NEXT: vst1.64 {d16, d17}, [r0:128]!
+; BE-NEXT: vst1.64 {d18, d19}, [r0:128]!
+; BE-NEXT: vrev64.32 q11, q11
+; BE-NEXT: vmov.32 d15[1], r4
+; BE-NEXT: vst1.64 {d20, d21}, [r0:128]!
+; BE-NEXT: vrev64.32 q15, q6
+; BE-NEXT: vmov.32 d14[1], r5
+; BE-NEXT: vrev64.32 q12, q12
+; BE-NEXT: vst1.64 {d22, d23}, [r0:128]
+; BE-NEXT: add r0, r1, #64
+; BE-NEXT: vrev64.32 q13, q13
+; BE-NEXT: vst1.64 {d30, d31}, [r0:128]!
+; BE-NEXT: vst1.64 {d24, d25}, [r0:128]!
+; BE-NEXT: vrev64.32 q14, q7
+; BE-NEXT: vst1.64 {d26, d27}, [r0:128]!
+; BE-NEXT: vst1.64 {d28, d29}, [r0:128]
+; BE-NEXT: add sp, sp, #168
+; BE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-NEXT: add sp, sp, #4
+; BE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+ %a = call <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double> %x)
+ ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double>)
+
+define <1 x i64> @llrint_v1i64_v1f128(<1 x fp128> %x) {
+; LE-LABEL: llrint_v1i64_v1f128:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r11, lr}
+; LE-NEXT: push {r11, lr}
+; LE-NEXT: bl llrintl
+; LE-NEXT: vmov.32 d0[0], r0
+; LE-NEXT: vmov.32 d0[1], r1
+; LE-NEXT: pop {r11, pc}
+;
+; BE-LABEL: llrint_v1i64_v1f128:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r11, lr}
+; BE-NEXT: push {r11, lr}
+; BE-NEXT: bl llrintl
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: vmov.32 d16[1], r1
+; BE-NEXT: vrev64.32 d0, d16
+; BE-NEXT: pop {r11, pc}
+ %a = call <1 x i64> @llvm.llrint.v1i64.v1f128(<1 x fp128> %x)
+ ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llrint.v1i64.v1f128(<1 x fp128>)
+
+define <2 x i64> @llrint_v2i64_v2f128(<2 x fp128> %x) {
+; LE-LABEL: llrint_v2i64_v2f128:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r4, r5, r6, r7, r8, lr}
+; LE-NEXT: push {r4, r5, r6, r7, r8, lr}
+; LE-NEXT: .vsave {d8, d9}
+; LE-NEXT: vpush {d8, d9}
+; LE-NEXT: mov r8, r3
+; LE-NEXT: add r3, sp, #40
+; LE-NEXT: mov r5, r2
+; LE-NEXT: mov r6, r1
+; LE-NEXT: mov r7, r0
+; LE-NEXT: ldm r3, {r0, r1, r2, r3}
+; LE-NEXT: bl llrintl
+; LE-NEXT: mov r4, r1
+; LE-NEXT: vmov.32 d9[0], r0
+; LE-NEXT: mov r0, r7
+; LE-NEXT: mov r1, r6
+; LE-NEXT: mov r2, r5
+; LE-NEXT: mov r3, r8
+; LE-NEXT: bl llrintl
+; LE-NEXT: vmov.32 d8[0], r0
+; LE-NEXT: vmov.32 d9[1], r4
+; LE-NEXT: vmov.32 d8[1], r1
+; LE-NEXT: vorr q0, q4, q4
+; LE-NEXT: vpop {d8, d9}
+; LE-NEXT: pop {r4, r5, r6, r7, r8, pc}
+;
+; BE-LABEL: llrint_v2i64_v2f128:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r4, r5, r6, r7, r8, lr}
+; BE-NEXT: push {r4, r5, r6, r7, r8, lr}
+; BE-NEXT: .vsave {d8}
+; BE-NEXT: vpush {d8}
+; BE-NEXT: mov r8, r3
+; BE-NEXT: add r3, sp, #32
+; BE-NEXT: mov r5, r2
+; BE-NEXT: mov r6, r1
+; BE-NEXT: mov r7, r0
+; BE-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-NEXT: bl llrintl
+; BE-NEXT: mov r4, r1
+; BE-NEXT: vmov.32 d8[0], r0
+; BE-NEXT: mov r0, r7
+; BE-NEXT: mov r1, r6
+; BE-NEXT: mov r2, r5
+; BE-NEXT: mov r3, r8
+; BE-NEXT: bl llrintl
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: vmov.32 d8[1], r4
+; BE-NEXT: vmov.32 d16[1], r1
+; BE-NEXT: vrev64.32 d1, d8
+; BE-NEXT: vrev64.32 d0, d16
+; BE-NEXT: vpop {d8}
+; BE-NEXT: pop {r4, r5, r6, r7, r8, pc}
+ %a = call <2 x i64> @llvm.llrint.v2i64.v2f128(<2 x fp128> %x)
+ ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llrint.v2i64.v2f128(<2 x fp128>)
+
+define <4 x i64> @llrint_v4i64_v4f128(<4 x fp128> %x) {
+; LE-LABEL: llrint_v4i64_v4f128:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; LE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, lr}
+; LE-NEXT: .vsave {d8, d9, d10, d11}
+; LE-NEXT: vpush {d8, d9, d10, d11}
+; LE-NEXT: mov r5, r3
+; LE-NEXT: add r3, sp, #96
+; LE-NEXT: mov r7, r2
+; LE-NEXT: mov r6, r1
+; LE-NEXT: mov r4, r0
+; LE-NEXT: ldm r3, {r0, r1, r2, r3}
+; LE-NEXT: bl llrintl
+; LE-NEXT: mov r9, r1
+; LE-NEXT: vmov.32 d9[0], r0
+; LE-NEXT: mov r0, r4
+; LE-NEXT: mov r1, r6
+; LE-NEXT: mov r2, r7
+; LE-NEXT: mov r3, r5
+; LE-NEXT: ldr r8, [sp, #80]
+; LE-NEXT: ldr r10, [sp, #64]
+; LE-NEXT: bl llrintl
+; LE-NEXT: add r3, sp, #68
+; LE-NEXT: mov r5, r1
+; LE-NEXT: vmov.32 d10[0], r0
+; LE-NEXT: mov r0, r10
+; LE-NEXT: ldm r3, {r1, r2, r3}
+; LE-NEXT: bl llrintl
+; LE-NEXT: add r3, sp, #84
+; LE-NEXT: mov r4, r1
+; LE-NEXT: vmov.32 d11[0], r0
+; LE-NEXT: mov r0, r8
+; LE-NEXT: ldm r3, {r1, r2, r3}
+; LE-NEXT: bl llrintl
+; LE-NEXT: vmov.32 d8[0], r0
+; LE-NEXT: vmov.32 d11[1], r4
+; LE-NEXT: vmov.32 d9[1], r9
+; LE-NEXT: vmov.32 d10[1], r5
+; LE-NEXT: vmov.32 d8[1], r1
+; LE-NEXT: vorr q0, q5, q5
+; LE-NEXT: vorr q1, q4, q4
+; LE-NEXT: vpop {d8, d9, d10, d11}
+; LE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, pc}
+;
+; BE-LABEL: llrint_v4i64_v4f128:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; BE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, lr}
+; BE-NEXT: .vsave {d8, d9, d10}
+; BE-NEXT: vpush {d8, d9, d10}
+; BE-NEXT: mov r5, r3
+; BE-NEXT: add r3, sp, #88
+; BE-NEXT: mov r7, r2
+; BE-NEXT: mov r6, r1
+; BE-NEXT: mov r4, r0
+; BE-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-NEXT: bl llrintl
+; BE-NEXT: mov r9, r1
+; BE-NEXT: vmov.32 d8[0], r0
+; BE-NEXT: mov r0, r4
+; BE-NEXT: mov r1, r6
+; BE-NEXT: mov r2, r7
+; BE-NEXT: mov r3, r5
+; BE-NEXT: ldr r8, [sp, #72]
+; BE-NEXT: ldr r10, [sp, #56]
+; BE-NEXT: bl llrintl
+; BE-NEXT: add r3, sp, #60
+; BE-NEXT: mov r5, r1
+; BE-NEXT: vmov.32 d9[0], r0
+; BE-NEXT: mov r0, r10
+; BE-NEXT: ldm r3, {r1, r2, r3}
+; BE-NEXT: bl llrintl
+; BE-NEXT: add r3, sp, #76
+; BE-NEXT: mov r4, r1
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: mov r0, r8
+; BE-NEXT: ldm r3, {r1, r2, r3}
+; BE-NEXT: bl llrintl
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: vmov.32 d10[1], r4
+; BE-NEXT: vmov.32 d8[1], r9
+; BE-NEXT: vmov.32 d9[1], r5
+; BE-NEXT: vmov.32 d16[1], r1
+; BE-NEXT: vrev64.32 d1, d10
+; BE-NEXT: vrev64.32 d3, d8
+; BE-NEXT: vrev64.32 d0, d9
+; BE-NEXT: vrev64.32 d2, d16
+; BE-NEXT: vpop {d8, d9, d10}
+; BE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, pc}
+ %a = call <4 x i64> @llvm.llrint.v4i64.v4f128(<4 x fp128> %x)
+ ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llrint.v4i64.v4f128(<4 x fp128>)
+
+define <8 x i64> @llrint_v8i64_v8f128(<8 x fp128> %x) {
+; LE-LABEL: llrint_v8i64_v8f128:
+; LE: @ %bb.0:
+; LE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-NEXT: .pad #4
+; LE-NEXT: sub sp, sp, #4
+; LE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: .pad #8
+; LE-NEXT: sub sp, sp, #8
+; LE-NEXT: mov r11, r3
+; LE-NEXT: add r3, sp, #208
+; LE-NEXT: mov r10, r2
+; LE-NEXT: mov r4, r1
+; LE-NEXT: mov r5, r0
+; LE-NEXT: ldm r3, {r0, r1, r2, r3}
+; LE-NEXT: bl llrintl
+; LE-NEXT: add r7, sp, #164
+; LE-NEXT: ldr r6, [sp, #160]
+; LE-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; LE-NEXT: vmov.32 d9[0], r0
+; LE-NEXT: ldm r7, {r1, r2, r3, r7}
+; LE-NEXT: mov r0, r6
+; LE-NEXT: ldr r8, [sp, #128]
+; LE-NEXT: ldr r9, [sp, #144]
+; LE-NEXT: bl llrintl
+; LE-NEXT: add r3, sp, #180
+; LE-NEXT: str r1, [sp] @ 4-byte Spill
+; LE-NEXT: vmov.32 d10[0], r0
+; LE-NEXT: mov r0, r7
+; LE-NEXT: ldm r3, {r1, r2, r3}
+; LE-NEXT: bl llrintl
+; LE-NEXT: add r3, sp, #132
+; LE-NEXT: mov r7, r1
+; LE-NEXT: vmov.32 d11[0], r0
+; LE-NEXT: mov r0, r8
+; LE-NEXT: ldm r3, {r1, r2, r3}
+; LE-NEXT: bl llrintl
+; LE-NEXT: add r3, sp, #148
+; LE-NEXT: mov r8, r1
+; LE-NEXT: vmov.32 d12[0], r0
+; LE-NEXT: mov r0, r9
+; LE-NEXT: ldm r3, {r1, r2, r3}
+; LE-NEXT: bl llrintl
+; LE-NEXT: mov r9, r1
+; LE-NEXT: vmov.32 d13[0], r0
+; LE-NEXT: mov r0, r5
+; LE-NEXT: mov r1, r4
+; LE-NEXT: mov r2, r10
+; LE-NEXT: mov r3, r11
+; LE-NEXT: ldr r6, [sp, #112]
+; LE-NEXT: bl llrintl
+; LE-NEXT: add r3, sp, #116
+; LE-NEXT: mov r4, r1
+; LE-NEXT: vmov.32 d14[0], r0
+; LE-NEXT: mov r0, r6
+; LE-NEXT: ldm r3, {r1, r2, r3}
+; LE-NEXT: bl llrintl
+; LE-NEXT: add r3, sp, #196
+; LE-NEXT: vmov.32 d15[0], r0
+; LE-NEXT: ldr r0, [sp, #192]
+; LE-NEXT: mov r5, r1
+; LE-NEXT: ldm r3, {r1, r2, r3}
+; LE-NEXT: bl llrintl
+; LE-NEXT: vmov.32 d8[0], r0
+; LE-NEXT: ldr r0, [sp] @ 4-byte Reload
+; LE-NEXT: vmov.32 d11[1], r7
+; LE-NEXT: vmov.32 d10[1], r0
+; LE-NEXT: ldr r0, [sp, #4] @ 4-byte Reload
+; LE-NEXT: vmov.32 d15[1], r5
+; LE-NEXT: vorr q2, q5, q5
+; LE-NEXT: vmov.32 d13[1], r9
+; LE-NEXT: vmov.32 d9[1], r0
+; LE-NEXT: vmov.32 d14[1], r4
+; LE-NEXT: vmov.32 d12[1], r8
+; LE-NEXT: vorr q0, q7, q7
+; LE-NEXT: vmov.32 d8[1], r1
+; LE-NEXT: vorr q1, q6, q6
+; LE-NEXT: vorr q3, q4, q4
+; LE-NEXT: add sp, sp, #8
+; LE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-NEXT: add sp, sp, #4
+; LE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+;
+; BE-LABEL: llrint_v8i64_v8f128:
+; BE: @ %bb.0:
+; BE-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-NEXT: .pad #4
+; BE-NEXT: sub sp, sp, #4
+; BE-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14}
+; BE-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14}
+; BE-NEXT: .pad #16
+; BE-NEXT: sub sp, sp, #16
+; BE-NEXT: str r3, [sp, #4] @ 4-byte Spill
+; BE-NEXT: add r3, sp, #208
+; BE-NEXT: mov r11, r2
+; BE-NEXT: mov r4, r1
+; BE-NEXT: mov r5, r0
+; BE-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-NEXT: bl llrintl
+; BE-NEXT: ldr r7, [sp, #176]
+; BE-NEXT: add r3, sp, #180
+; BE-NEXT: str r1, [sp, #12] @ 4-byte Spill
+; BE-NEXT: vmov.32 d8[0], r0
+; BE-NEXT: ldm r3, {r1, r2, r3}
+; BE-NEXT: mov r0, r7
+; BE-NEXT: ldr r6, [sp, #128]
+; BE-NEXT: ldr r8, [sp, #144]
+; BE-NEXT: bl llrintl
+; BE-NEXT: add r3, sp, #132
+; BE-NEXT: str r1, [sp, #8] @ 4-byte Spill
+; BE-NEXT: vmov.32 d9[0], r0
+; BE-NEXT: mov r0, r6
+; BE-NEXT: ldm r3, {r1, r2, r3}
+; BE-NEXT: bl llrintl
+; BE-NEXT: add r3, sp, #148
+; BE-NEXT: mov r6, r1
+; BE-NEXT: vmov.32 d10[0], r0
+; BE-NEXT: mov r0, r8
+; BE-NEXT: ldm r3, {r1, r2, r3}
+; BE-NEXT: bl llrintl
+; BE-NEXT: add r3, sp, #160
+; BE-NEXT: mov r9, r0
+; BE-NEXT: mov r7, r1
+; BE-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-NEXT: bl llrintl
+; BE-NEXT: ldr r3, [sp, #4] @ 4-byte Reload
+; BE-NEXT: mov r8, r1
+; BE-NEXT: vmov.32 d11[0], r0
+; BE-NEXT: mov r0, r5
+; BE-NEXT: mov r1, r4
+; BE-NEXT: mov r2, r11
+; BE-NEXT: ldr r10, [sp, #112]
+; BE-NEXT: vmov.32 d12[0], r9
+; BE-NEXT: bl llrintl
+; BE-NEXT: add r3, sp, #116
+; BE-NEXT: mov r4, r1
+; BE-NEXT: vmov.32 d13[0], r0
+; BE-NEXT: mov r0, r10
+; BE-NEXT: ldm r3, {r1, r2, r3}
+; BE-NEXT: bl llrintl
+; BE-NEXT: add r3, sp, #196
+; BE-NEXT: vmov.32 d14[0], r0
+; BE-NEXT: ldr r0, [sp, #192]
+; BE-NEXT: mov r5, r1
+; BE-NEXT: ldm r3, {r1, r2, r3}
+; BE-NEXT: bl llrintl
+; BE-NEXT: vmov.32 d16[0], r0
+; BE-NEXT: ldr r0, [sp, #8] @ 4-byte Reload
+; BE-NEXT: vmov.32 d14[1], r5
+; BE-NEXT: vmov.32 d9[1], r0
+; BE-NEXT: ldr r0, [sp, #12] @ 4-byte Reload
+; BE-NEXT: vmov.32 d12[1], r7
+; BE-NEXT: vmov.32 d8[1], r0
+; BE-NEXT: vmov.32 d13[1], r4
+; BE-NEXT: vmov.32 d10[1], r6
+; BE-NEXT: vmov.32 d11[1], r8
+; BE-NEXT: vmov.32 d16[1], r1
+; BE-NEXT: vrev64.32 d1, d14
+; BE-NEXT: vrev64.32 d3, d12
+; BE-NEXT: vrev64.32 d5, d9
+; BE-NEXT: vrev64.32 d7, d8
+; BE-NEXT: vrev64.32 d0, d13
+; BE-NEXT: vrev64.32 d2, d10
+; BE-NEXT: vrev64.32 d4, d11
+; BE-NEXT: vrev64.32 d6, d16
+; BE-NEXT: add sp, sp, #16
+; BE-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14}
+; BE-NEXT: add sp, sp, #4
+; BE-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+ %a = call <8 x i64> @llvm.llrint.v8i64.v8f128(<8 x fp128> %x)
+ ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llrint.v8i64.v8f128(<8 x fp128>)
diff --git a/llvm/test/CodeGen/ARM/vector-lrint.ll b/llvm/test/CodeGen/ARM/vector-lrint.ll
new file mode 100644
index 0000000..fe5e3cb
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/vector-lrint.ll
@@ -0,0 +1,3265 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=armv7-unknown-none-eabihf | FileCheck %s --check-prefixes=LE-I32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=armv7-unknown-none-eabihf | FileCheck %s --check-prefixes=LE-I64
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=armv7-unknown-none-eabihf -mattr=+neon | FileCheck %s --check-prefixes=LE-I32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=armv7-unknown-none-eabihf -mattr=+neon | FileCheck %s --check-prefixes=LE-I64
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=armebv7-unknown-none-eabihf | FileCheck %s --check-prefixes=BE-I32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=armebv7-unknown-none-eabihf | FileCheck %s --check-prefixes=BE-I64
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=armebv7-unknown-none-eabihf -mattr=+neon | FileCheck %s --check-prefixes=BE-I32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=armebv7-unknown-none-eabihf -mattr=+neon | FileCheck %s --check-prefixes=BE-I64
+
+; FIXME: crash "Do not know how to soft promote this operator's operand!"
+; define <1 x iXLen> @lrint_v1f16(<1 x half> %x) {
+; %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half> %x)
+; ret <1 x iXLen> %a
+; }
+; declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half>)
+
+; define <2 x iXLen> @lrint_v2f16(<2 x half> %x) {
+; %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half> %x)
+; ret <2 x iXLen> %a
+; }
+; declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half>)
+
+; define <4 x iXLen> @lrint_v4f16(<4 x half> %x) {
+; %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half> %x)
+; ret <4 x iXLen> %a
+; }
+; declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half>)
+
+; define <8 x iXLen> @lrint_v8f16(<8 x half> %x) {
+; %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half> %x)
+; ret <8 x iXLen> %a
+; }
+; declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half>)
+
+; define <16 x iXLen> @lrint_v16f16(<16 x half> %x) {
+; %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half> %x)
+; ret <16 x iXLen> %a
+; }
+; declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half>)
+
+define <1 x iXLen> @lrint_v1f32(<1 x float> %x) {
+; LE-I32-LABEL: lrint_v1f32:
+; LE-I32: @ %bb.0:
+; LE-I32-NEXT: .save {r11, lr}
+; LE-I32-NEXT: push {r11, lr}
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: pop {r11, pc}
+;
+; LE-I64-LABEL: lrint_v1f32:
+; LE-I64: @ %bb.0:
+; LE-I64-NEXT: .save {r11, lr}
+; LE-I64-NEXT: push {r11, lr}
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.32 d0[0], r0
+; LE-I64-NEXT: vmov.32 d0[1], r1
+; LE-I64-NEXT: pop {r11, pc}
+;
+; BE-I32-LABEL: lrint_v1f32:
+; BE-I32: @ %bb.0:
+; BE-I32-NEXT: .save {r11, lr}
+; BE-I32-NEXT: push {r11, lr}
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: pop {r11, pc}
+;
+; BE-I64-LABEL: lrint_v1f32:
+; BE-I64: @ %bb.0:
+; BE-I64-NEXT: .save {r11, lr}
+; BE-I64-NEXT: push {r11, lr}
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.32 d16[0], r0
+; BE-I64-NEXT: vmov.32 d16[1], r1
+; BE-I64-NEXT: vrev64.32 d0, d16
+; BE-I64-NEXT: pop {r11, pc}
+ %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float> %x)
+ ret <1 x iXLen> %a
+}
+declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float>)
+
+define <2 x iXLen> @lrint_v2f32(<2 x float> %x) {
+; LE-I32-LABEL: lrint_v2f32:
+; LE-I32: @ %bb.0:
+; LE-I32-NEXT: .save {r11, lr}
+; LE-I32-NEXT: push {r11, lr}
+; LE-I32-NEXT: .vsave {d8, d9}
+; LE-I32-NEXT: vpush {d8, d9}
+; LE-I32-NEXT: vmov.f64 d8, d0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s17
+; LE-I32-NEXT: vmov.32 d9[0], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.32 d9[1], r0
+; LE-I32-NEXT: vorr d0, d9, d9
+; LE-I32-NEXT: vpop {d8, d9}
+; LE-I32-NEXT: pop {r11, pc}
+;
+; LE-I64-LABEL: lrint_v2f32:
+; LE-I64: @ %bb.0:
+; LE-I64-NEXT: .save {r4, lr}
+; LE-I64-NEXT: push {r4, lr}
+; LE-I64-NEXT: .vsave {d10, d11}
+; LE-I64-NEXT: vpush {d10, d11}
+; LE-I64-NEXT: .vsave {d8}
+; LE-I64-NEXT: vpush {d8}
+; LE-I64-NEXT: vmov.f64 d8, d0
+; LE-I64-NEXT: vmov.f32 s0, s17
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s16
+; LE-I64-NEXT: mov r4, r1
+; LE-I64-NEXT: vmov.32 d11[0], r0
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.32 d10[0], r0
+; LE-I64-NEXT: vmov.32 d11[1], r4
+; LE-I64-NEXT: vmov.32 d10[1], r1
+; LE-I64-NEXT: vorr q0, q5, q5
+; LE-I64-NEXT: vpop {d8}
+; LE-I64-NEXT: vpop {d10, d11}
+; LE-I64-NEXT: pop {r4, pc}
+;
+; BE-I32-LABEL: lrint_v2f32:
+; BE-I32: @ %bb.0:
+; BE-I32-NEXT: .save {r11, lr}
+; BE-I32-NEXT: push {r11, lr}
+; BE-I32-NEXT: .vsave {d8, d9}
+; BE-I32-NEXT: vpush {d8, d9}
+; BE-I32-NEXT: vrev64.32 d8, d0
+; BE-I32-NEXT: vmov.f32 s0, s16
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s17
+; BE-I32-NEXT: vmov.32 d9[0], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.32 d9[1], r0
+; BE-I32-NEXT: vrev64.32 d0, d9
+; BE-I32-NEXT: vpop {d8, d9}
+; BE-I32-NEXT: pop {r11, pc}
+;
+; BE-I64-LABEL: lrint_v2f32:
+; BE-I64: @ %bb.0:
+; BE-I64-NEXT: .save {r4, lr}
+; BE-I64-NEXT: push {r4, lr}
+; BE-I64-NEXT: .vsave {d10, d11}
+; BE-I64-NEXT: vpush {d10, d11}
+; BE-I64-NEXT: .vsave {d8}
+; BE-I64-NEXT: vpush {d8}
+; BE-I64-NEXT: vrev64.32 d8, d0
+; BE-I64-NEXT: vmov.f32 s0, s17
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s16
+; BE-I64-NEXT: mov r4, r1
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: vmov.32 d11[1], r4
+; BE-I64-NEXT: vmov.32 d10[1], r1
+; BE-I64-NEXT: vrev64.32 q0, q5
+; BE-I64-NEXT: vpop {d8}
+; BE-I64-NEXT: vpop {d10, d11}
+; BE-I64-NEXT: pop {r4, pc}
+ %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float> %x)
+ ret <2 x iXLen> %a
+}
+declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float>)
+
+define <4 x iXLen> @lrint_v4f32(<4 x float> %x) {
+; LE-I32-LABEL: lrint_v4f32:
+; LE-I32: @ %bb.0:
+; LE-I32-NEXT: .save {r11, lr}
+; LE-I32-NEXT: push {r11, lr}
+; LE-I32-NEXT: .vsave {d8, d9, d10, d11}
+; LE-I32-NEXT: vpush {d8, d9, d10, d11}
+; LE-I32-NEXT: vorr q4, q0, q0
+; LE-I32-NEXT: vmov.f32 s0, s18
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s16
+; LE-I32-NEXT: vmov.32 d11[0], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s19
+; LE-I32-NEXT: vmov.32 d10[0], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s17
+; LE-I32-NEXT: vmov.32 d11[1], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.32 d10[1], r0
+; LE-I32-NEXT: vorr q0, q5, q5
+; LE-I32-NEXT: vpop {d8, d9, d10, d11}
+; LE-I32-NEXT: pop {r11, pc}
+;
+; LE-I64-LABEL: lrint_v4f32:
+; LE-I64: @ %bb.0:
+; LE-I64-NEXT: .save {r4, r5, r6, lr}
+; LE-I64-NEXT: push {r4, r5, r6, lr}
+; LE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
+; LE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13}
+; LE-I64-NEXT: vorr q5, q0, q0
+; LE-I64-NEXT: vmov.f32 s0, s23
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s20
+; LE-I64-NEXT: mov r4, r1
+; LE-I64-NEXT: vmov.32 d9[0], r0
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s21
+; LE-I64-NEXT: mov r5, r1
+; LE-I64-NEXT: vmov.32 d12[0], r0
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s22
+; LE-I64-NEXT: mov r6, r1
+; LE-I64-NEXT: vmov.32 d13[0], r0
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.32 d8[0], r0
+; LE-I64-NEXT: vmov.32 d13[1], r6
+; LE-I64-NEXT: vmov.32 d9[1], r4
+; LE-I64-NEXT: vmov.32 d12[1], r5
+; LE-I64-NEXT: vmov.32 d8[1], r1
+; LE-I64-NEXT: vorr q0, q6, q6
+; LE-I64-NEXT: vorr q1, q4, q4
+; LE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13}
+; LE-I64-NEXT: pop {r4, r5, r6, pc}
+;
+; BE-I32-LABEL: lrint_v4f32:
+; BE-I32: @ %bb.0:
+; BE-I32-NEXT: .save {r11, lr}
+; BE-I32-NEXT: push {r11, lr}
+; BE-I32-NEXT: .vsave {d8, d9, d10, d11}
+; BE-I32-NEXT: vpush {d8, d9, d10, d11}
+; BE-I32-NEXT: vrev64.32 q4, q0
+; BE-I32-NEXT: vmov.f32 s0, s18
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s16
+; BE-I32-NEXT: vmov.32 d11[0], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s19
+; BE-I32-NEXT: vmov.32 d10[0], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s17
+; BE-I32-NEXT: vmov.32 d11[1], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.32 d10[1], r0
+; BE-I32-NEXT: vrev64.32 q0, q5
+; BE-I32-NEXT: vpop {d8, d9, d10, d11}
+; BE-I32-NEXT: pop {r11, pc}
+;
+; BE-I64-LABEL: lrint_v4f32:
+; BE-I64: @ %bb.0:
+; BE-I64-NEXT: .save {r4, r5, r6, lr}
+; BE-I64-NEXT: push {r4, r5, r6, lr}
+; BE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
+; BE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13}
+; BE-I64-NEXT: vrev64.32 d8, d1
+; BE-I64-NEXT: vrev64.32 d9, d0
+; BE-I64-NEXT: vmov.f32 s0, s17
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s18
+; BE-I64-NEXT: mov r4, r1
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s19
+; BE-I64-NEXT: mov r5, r1
+; BE-I64-NEXT: vmov.32 d12[0], r0
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s16
+; BE-I64-NEXT: mov r6, r1
+; BE-I64-NEXT: vmov.32 d13[0], r0
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: vmov.32 d13[1], r6
+; BE-I64-NEXT: vmov.32 d11[1], r4
+; BE-I64-NEXT: vmov.32 d12[1], r5
+; BE-I64-NEXT: vmov.32 d10[1], r1
+; BE-I64-NEXT: vrev64.32 q0, q6
+; BE-I64-NEXT: vrev64.32 q1, q5
+; BE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13}
+; BE-I64-NEXT: pop {r4, r5, r6, pc}
+ %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float> %x)
+ ret <4 x iXLen> %a
+}
+declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float>)
+
+define <8 x iXLen> @lrint_v8f32(<8 x float> %x) {
+; LE-I32-LABEL: lrint_v8f32:
+; LE-I32: @ %bb.0:
+; LE-I32-NEXT: .save {r11, lr}
+; LE-I32-NEXT: push {r11, lr}
+; LE-I32-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I32-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I32-NEXT: vorr q5, q1, q1
+; LE-I32-NEXT: vorr q7, q0, q0
+; LE-I32-NEXT: vmov.f32 s0, s20
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s22
+; LE-I32-NEXT: vmov.32 d8[0], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s30
+; LE-I32-NEXT: vmov.32 d9[0], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s28
+; LE-I32-NEXT: vmov.32 d13[0], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s31
+; LE-I32-NEXT: vmov.32 d12[0], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s29
+; LE-I32-NEXT: vmov.32 d13[1], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s23
+; LE-I32-NEXT: vmov.32 d12[1], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s21
+; LE-I32-NEXT: vmov.32 d9[1], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.32 d8[1], r0
+; LE-I32-NEXT: vorr q0, q6, q6
+; LE-I32-NEXT: vorr q1, q4, q4
+; LE-I32-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I32-NEXT: pop {r11, pc}
+;
+; LE-I64-LABEL: lrint_v8f32:
+; LE-I64: @ %bb.0:
+; LE-I64-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; LE-I64-NEXT: push {r4, r5, r6, r7, r8, r9, r10, lr}
+; LE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: .pad #40
+; LE-I64-NEXT: sub sp, sp, #40
+; LE-I64-NEXT: vorr q6, q1, q1
+; LE-I64-NEXT: add lr, sp, #24
+; LE-I64-NEXT: vorr q7, q0, q0
+; LE-I64-NEXT: vstmia lr, {d2, d3} @ 16-byte Spill
+; LE-I64-NEXT: vmov.f32 s0, s27
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s24
+; LE-I64-NEXT: mov r8, r1
+; LE-I64-NEXT: vmov.32 d9[0], r0
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s25
+; LE-I64-NEXT: mov r9, r1
+; LE-I64-NEXT: vmov.32 d10[0], r0
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vorr q6, q7, q7
+; LE-I64-NEXT: add lr, sp, #8
+; LE-I64-NEXT: mov r10, r1
+; LE-I64-NEXT: vmov.32 d11[0], r0
+; LE-I64-NEXT: vmov.f32 s0, s26
+; LE-I64-NEXT: vstmia lr, {d14, d15} @ 16-byte Spill
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s27
+; LE-I64-NEXT: mov r7, r1
+; LE-I64-NEXT: vmov.32 d14[0], r0
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s24
+; LE-I64-NEXT: mov r4, r1
+; LE-I64-NEXT: vmov.32 d15[0], r0
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: add lr, sp, #8
+; LE-I64-NEXT: mov r5, r1
+; LE-I64-NEXT: vmov.32 d12[0], r0
+; LE-I64-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; LE-I64-NEXT: vmov.f32 s0, s1
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: add lr, sp, #24
+; LE-I64-NEXT: mov r6, r1
+; LE-I64-NEXT: vmov.32 d13[0], r0
+; LE-I64-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; LE-I64-NEXT: vmov.f32 s0, s2
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.32 d8[0], r0
+; LE-I64-NEXT: vmov.32 d13[1], r6
+; LE-I64-NEXT: vmov.32 d15[1], r4
+; LE-I64-NEXT: vmov.32 d11[1], r10
+; LE-I64-NEXT: vmov.32 d9[1], r8
+; LE-I64-NEXT: vmov.32 d12[1], r5
+; LE-I64-NEXT: vmov.32 d14[1], r7
+; LE-I64-NEXT: vorr q0, q6, q6
+; LE-I64-NEXT: vmov.32 d10[1], r9
+; LE-I64-NEXT: vorr q1, q7, q7
+; LE-I64-NEXT: vmov.32 d8[1], r1
+; LE-I64-NEXT: vorr q2, q5, q5
+; LE-I64-NEXT: vorr q3, q4, q4
+; LE-I64-NEXT: add sp, sp, #40
+; LE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, pc}
+;
+; BE-I32-LABEL: lrint_v8f32:
+; BE-I32: @ %bb.0:
+; BE-I32-NEXT: .save {r11, lr}
+; BE-I32-NEXT: push {r11, lr}
+; BE-I32-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I32-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I32-NEXT: vrev64.32 q4, q1
+; BE-I32-NEXT: vrev64.32 q5, q0
+; BE-I32-NEXT: vmov.f32 s0, s16
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s20
+; BE-I32-NEXT: vmov.32 d12[0], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s18
+; BE-I32-NEXT: vmov.32 d14[0], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s22
+; BE-I32-NEXT: vmov.32 d13[0], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s19
+; BE-I32-NEXT: vmov.32 d15[0], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s23
+; BE-I32-NEXT: vmov.32 d13[1], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s21
+; BE-I32-NEXT: vmov.32 d15[1], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s17
+; BE-I32-NEXT: vmov.32 d14[1], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.32 d12[1], r0
+; BE-I32-NEXT: vrev64.32 q0, q7
+; BE-I32-NEXT: vrev64.32 q1, q6
+; BE-I32-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I32-NEXT: pop {r11, pc}
+;
+; BE-I64-LABEL: lrint_v8f32:
+; BE-I64: @ %bb.0:
+; BE-I64-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; BE-I64-NEXT: push {r4, r5, r6, r7, r8, r9, r10, lr}
+; BE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: .pad #32
+; BE-I64-NEXT: sub sp, sp, #32
+; BE-I64-NEXT: vorr q4, q1, q1
+; BE-I64-NEXT: add lr, sp, #8
+; BE-I64-NEXT: vorr q5, q0, q0
+; BE-I64-NEXT: vstmia lr, {d0, d1} @ 16-byte Spill
+; BE-I64-NEXT: vrev64.32 d12, d8
+; BE-I64-NEXT: vmov.f32 s0, s25
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s24
+; BE-I64-NEXT: mov r8, r1
+; BE-I64-NEXT: vmov.32 d15[0], r0
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vrev64.32 d0, d11
+; BE-I64-NEXT: mov r9, r1
+; BE-I64-NEXT: vrev64.32 d8, d9
+; BE-I64-NEXT: vorr d9, d0, d0
+; BE-I64-NEXT: vmov.32 d14[0], r0
+; BE-I64-NEXT: vstr d8, [sp, #24] @ 8-byte Spill
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s17
+; BE-I64-NEXT: mov r10, r1
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: add lr, sp, #8
+; BE-I64-NEXT: vmov.f32 s0, s19
+; BE-I64-NEXT: mov r7, r1
+; BE-I64-NEXT: vmov.32 d13[0], r0
+; BE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I64-NEXT: vrev64.32 d8, d16
+; BE-I64-NEXT: vstr d8, [sp, #8] @ 8-byte Spill
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s16
+; BE-I64-NEXT: mov r4, r1
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vldr d0, [sp, #8] @ 8-byte Reload
+; BE-I64-NEXT: mov r5, r1
+; BE-I64-NEXT: vmov.32 d8[0], r0
+; BE-I64-NEXT: vmov.f32 s0, s1
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vldr d0, [sp, #24] @ 8-byte Reload
+; BE-I64-NEXT: mov r6, r1
+; BE-I64-NEXT: @ kill: def $s0 killed $s0 killed $d0
+; BE-I64-NEXT: vmov.32 d9[0], r0
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.32 d12[0], r0
+; BE-I64-NEXT: vmov.32 d9[1], r6
+; BE-I64-NEXT: vmov.32 d11[1], r4
+; BE-I64-NEXT: vmov.32 d15[1], r8
+; BE-I64-NEXT: vmov.32 d13[1], r7
+; BE-I64-NEXT: vmov.32 d8[1], r5
+; BE-I64-NEXT: vmov.32 d10[1], r10
+; BE-I64-NEXT: vmov.32 d14[1], r9
+; BE-I64-NEXT: vmov.32 d12[1], r1
+; BE-I64-NEXT: vrev64.32 q0, q4
+; BE-I64-NEXT: vrev64.32 q1, q5
+; BE-I64-NEXT: vrev64.32 q2, q7
+; BE-I64-NEXT: vrev64.32 q3, q6
+; BE-I64-NEXT: add sp, sp, #32
+; BE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, pc}
+ %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float> %x)
+ ret <8 x iXLen> %a
+}
+declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float>)
+
+define <16 x iXLen> @lrint_v16f32(<16 x float> %x) {
+; LE-I32-LABEL: lrint_v16f32:
+; LE-I32: @ %bb.0:
+; LE-I32-NEXT: .save {r11, lr}
+; LE-I32-NEXT: push {r11, lr}
+; LE-I32-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I32-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I32-NEXT: .pad #80
+; LE-I32-NEXT: sub sp, sp, #80
+; LE-I32-NEXT: vorr q5, q3, q3
+; LE-I32-NEXT: vstmia sp, {d0, d1} @ 16-byte Spill
+; LE-I32-NEXT: add lr, sp, #32
+; LE-I32-NEXT: vorr q6, q2, q2
+; LE-I32-NEXT: vorr q7, q1, q1
+; LE-I32-NEXT: vmov.f32 s0, s20
+; LE-I32-NEXT: vstmia lr, {d6, d7} @ 16-byte Spill
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s22
+; LE-I32-NEXT: vmov.32 d8[0], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s24
+; LE-I32-NEXT: add lr, sp, #48
+; LE-I32-NEXT: vmov.32 d9[0], r0
+; LE-I32-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; LE-I32-NEXT: add lr, sp, #16
+; LE-I32-NEXT: vstmia lr, {d12, d13} @ 16-byte Spill
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s26
+; LE-I32-NEXT: vmov.32 d8[0], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: add lr, sp, #64
+; LE-I32-NEXT: vmov.32 d9[0], r0
+; LE-I32-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; LE-I32-NEXT: vorr q4, q7, q7
+; LE-I32-NEXT: vmov.f32 s0, s16
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s18
+; LE-I32-NEXT: vmov.32 d10[0], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vldmia sp, {d12, d13} @ 16-byte Reload
+; LE-I32-NEXT: vmov.f32 s0, s26
+; LE-I32-NEXT: vmov.32 d11[0], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s24
+; LE-I32-NEXT: vmov.32 d15[0], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s27
+; LE-I32-NEXT: vmov.32 d14[0], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s25
+; LE-I32-NEXT: vmov.32 d15[1], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s19
+; LE-I32-NEXT: vmov.32 d14[1], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s17
+; LE-I32-NEXT: vmov.32 d11[1], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: add lr, sp, #16
+; LE-I32-NEXT: vmov.32 d10[1], r0
+; LE-I32-NEXT: vldmia lr, {d12, d13} @ 16-byte Reload
+; LE-I32-NEXT: vmov.f32 s0, s27
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s25
+; LE-I32-NEXT: add lr, sp, #64
+; LE-I32-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; LE-I32-NEXT: vmov.32 d9[1], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.32 d8[1], r0
+; LE-I32-NEXT: add lr, sp, #64
+; LE-I32-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; LE-I32-NEXT: add lr, sp, #32
+; LE-I32-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; LE-I32-NEXT: vmov.f32 s0, s19
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: vmov.f32 s0, s17
+; LE-I32-NEXT: add lr, sp, #48
+; LE-I32-NEXT: vldmia lr, {d12, d13} @ 16-byte Reload
+; LE-I32-NEXT: vmov.32 d13[1], r0
+; LE-I32-NEXT: bl lrintf
+; LE-I32-NEXT: add lr, sp, #64
+; LE-I32-NEXT: vmov.32 d12[1], r0
+; LE-I32-NEXT: vorr q0, q7, q7
+; LE-I32-NEXT: vldmia lr, {d4, d5} @ 16-byte Reload
+; LE-I32-NEXT: vorr q1, q5, q5
+; LE-I32-NEXT: vorr q3, q6, q6
+; LE-I32-NEXT: add sp, sp, #80
+; LE-I32-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I32-NEXT: pop {r11, pc}
+;
+; LE-I64-LABEL: lrint_v16f32:
+; LE-I64: @ %bb.0:
+; LE-I64-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-I64-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-I64-NEXT: .pad #4
+; LE-I64-NEXT: sub sp, sp, #4
+; LE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: .pad #160
+; LE-I64-NEXT: sub sp, sp, #160
+; LE-I64-NEXT: add lr, sp, #112
+; LE-I64-NEXT: vorr q5, q3, q3
+; LE-I64-NEXT: vorr q6, q0, q0
+; LE-I64-NEXT: mov r4, r0
+; LE-I64-NEXT: vstmia lr, {d4, d5} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #48
+; LE-I64-NEXT: vorr q7, q1, q1
+; LE-I64-NEXT: vstmia lr, {d0, d1} @ 16-byte Spill
+; LE-I64-NEXT: vmov.f32 s0, s23
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s24
+; LE-I64-NEXT: add lr, sp, #144
+; LE-I64-NEXT: vmov.32 d17[0], r0
+; LE-I64-NEXT: str r1, [sp, #108] @ 4-byte Spill
+; LE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s25
+; LE-I64-NEXT: str r1, [sp, #84] @ 4-byte Spill
+; LE-I64-NEXT: vmov.32 d8[0], r0
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s28
+; LE-I64-NEXT: add lr, sp, #128
+; LE-I64-NEXT: vmov.32 d9[0], r0
+; LE-I64-NEXT: str r1, [sp, #44] @ 4-byte Spill
+; LE-I64-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s29
+; LE-I64-NEXT: mov r9, r1
+; LE-I64-NEXT: vmov.32 d8[0], r0
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s30
+; LE-I64-NEXT: mov r6, r1
+; LE-I64-NEXT: vmov.32 d9[0], r0
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s31
+; LE-I64-NEXT: mov r5, r1
+; LE-I64-NEXT: vmov.32 d12[0], r0
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: add lr, sp, #112
+; LE-I64-NEXT: mov r7, r1
+; LE-I64-NEXT: vmov.32 d13[0], r0
+; LE-I64-NEXT: vldmia lr, {d14, d15} @ 16-byte Reload
+; LE-I64-NEXT: vmov.f32 s0, s29
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s22
+; LE-I64-NEXT: add lr, sp, #24
+; LE-I64-NEXT: vmov.32 d17[0], r0
+; LE-I64-NEXT: mov r11, r1
+; LE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-I64-NEXT: vmov.32 d13[1], r7
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: add lr, sp, #144
+; LE-I64-NEXT: vmov.f32 s0, s21
+; LE-I64-NEXT: vmov.32 d12[1], r5
+; LE-I64-NEXT: str r1, [sp, #40] @ 4-byte Spill
+; LE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I64-NEXT: vmov.32 d16[0], r0
+; LE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #88
+; LE-I64-NEXT: vstmia lr, {d12, d13} @ 16-byte Spill
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s20
+; LE-I64-NEXT: mov r10, r1
+; LE-I64-NEXT: vmov.32 d13[0], r0
+; LE-I64-NEXT: vmov.32 d9[1], r6
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s31
+; LE-I64-NEXT: vmov.32 d12[0], r0
+; LE-I64-NEXT: add lr, sp, #8
+; LE-I64-NEXT: mov r8, r1
+; LE-I64-NEXT: vmov.32 d8[1], r9
+; LE-I64-NEXT: vstmia lr, {d12, d13} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #64
+; LE-I64-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: add lr, sp, #128
+; LE-I64-NEXT: vmov.32 d9[0], r0
+; LE-I64-NEXT: ldr r0, [sp, #44] @ 4-byte Reload
+; LE-I64-NEXT: mov r9, r1
+; LE-I64-NEXT: vldmia lr, {d10, d11} @ 16-byte Reload
+; LE-I64-NEXT: add lr, sp, #48
+; LE-I64-NEXT: vldmia lr, {d12, d13} @ 16-byte Reload
+; LE-I64-NEXT: vmov.f32 s0, s27
+; LE-I64-NEXT: vmov.32 d11[1], r0
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.f32 s0, s26
+; LE-I64-NEXT: vmov.32 d15[0], r0
+; LE-I64-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
+; LE-I64-NEXT: add lr, sp, #128
+; LE-I64-NEXT: mov r7, r1
+; LE-I64-NEXT: vmov.32 d10[1], r0
+; LE-I64-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: vmov.32 d14[0], r0
+; LE-I64-NEXT: add lr, sp, #144
+; LE-I64-NEXT: ldr r0, [sp, #108] @ 4-byte Reload
+; LE-I64-NEXT: mov r5, r1
+; LE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I64-NEXT: vmov.32 d17[1], r0
+; LE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #112
+; LE-I64-NEXT: vldmia lr, {d10, d11} @ 16-byte Reload
+; LE-I64-NEXT: vmov.f32 s0, s20
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: add lr, sp, #24
+; LE-I64-NEXT: vmov.f32 s0, s22
+; LE-I64-NEXT: mov r6, r1
+; LE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I64-NEXT: vmov.32 d16[0], r0
+; LE-I64-NEXT: vmov.32 d17[1], r11
+; LE-I64-NEXT: vorr q6, q8, q8
+; LE-I64-NEXT: bl lrintf
+; LE-I64-NEXT: add lr, sp, #144
+; LE-I64-NEXT: vmov.32 d8[0], r0
+; LE-I64-NEXT: ldr r0, [sp, #40] @ 4-byte Reload
+; LE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I64-NEXT: add lr, sp, #8
+; LE-I64-NEXT: vldmia lr, {d18, d19} @ 16-byte Reload
+; LE-I64-NEXT: add lr, sp, #128
+; LE-I64-NEXT: vmov.32 d9[1], r9
+; LE-I64-NEXT: vmov.32 d12[1], r6
+; LE-I64-NEXT: vmov.32 d19[1], r10
+; LE-I64-NEXT: vmov.32 d8[1], r1
+; LE-I64-NEXT: vmov.32 d16[1], r0
+; LE-I64-NEXT: add r0, r4, #64
+; LE-I64-NEXT: vmov.32 d18[1], r8
+; LE-I64-NEXT: vst1.64 {d12, d13}, [r0:128]!
+; LE-I64-NEXT: vst1.64 {d8, d9}, [r0:128]!
+; LE-I64-NEXT: vst1.64 {d18, d19}, [r0:128]!
+; LE-I64-NEXT: vst1.64 {d16, d17}, [r0:128]
+; LE-I64-NEXT: vmov.32 d15[1], r7
+; LE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I64-NEXT: add lr, sp, #64
+; LE-I64-NEXT: vmov.32 d14[1], r5
+; LE-I64-NEXT: vst1.64 {d16, d17}, [r4:128]!
+; LE-I64-NEXT: vst1.64 {d14, d15}, [r4:128]!
+; LE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I64-NEXT: add lr, sp, #88
+; LE-I64-NEXT: vst1.64 {d16, d17}, [r4:128]!
+; LE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I64-NEXT: vst1.64 {d16, d17}, [r4:128]
+; LE-I64-NEXT: add sp, sp, #160
+; LE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: add sp, sp, #4
+; LE-I64-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+;
+; BE-I32-LABEL: lrint_v16f32:
+; BE-I32: @ %bb.0:
+; BE-I32-NEXT: .save {r11, lr}
+; BE-I32-NEXT: push {r11, lr}
+; BE-I32-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I32-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I32-NEXT: .pad #96
+; BE-I32-NEXT: sub sp, sp, #96
+; BE-I32-NEXT: vrev64.32 q3, q3
+; BE-I32-NEXT: add lr, sp, #64
+; BE-I32-NEXT: vrev64.32 q4, q0
+; BE-I32-NEXT: vmov.f32 s0, s12
+; BE-I32-NEXT: vstmia lr, {d6, d7} @ 16-byte Spill
+; BE-I32-NEXT: add lr, sp, #32
+; BE-I32-NEXT: vrev64.32 q5, q1
+; BE-I32-NEXT: vrev64.32 q7, q2
+; BE-I32-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s16
+; BE-I32-NEXT: vmov.32 d16[0], r0
+; BE-I32-NEXT: add lr, sp, #80
+; BE-I32-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s18
+; BE-I32-NEXT: vmov.32 d12[0], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s20
+; BE-I32-NEXT: add lr, sp, #48
+; BE-I32-NEXT: vmov.32 d13[0], r0
+; BE-I32-NEXT: vstmia lr, {d12, d13} @ 16-byte Spill
+; BE-I32-NEXT: add lr, sp, #16
+; BE-I32-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s22
+; BE-I32-NEXT: vmov.32 d8[0], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s28
+; BE-I32-NEXT: vmov.32 d9[0], r0
+; BE-I32-NEXT: vstmia sp, {d8, d9} @ 16-byte Spill
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: add lr, sp, #64
+; BE-I32-NEXT: vmov.32 d12[0], r0
+; BE-I32-NEXT: vldmia lr, {d10, d11} @ 16-byte Reload
+; BE-I32-NEXT: vmov.f32 s0, s22
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s30
+; BE-I32-NEXT: add lr, sp, #80
+; BE-I32-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; BE-I32-NEXT: vmov.32 d9[0], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s23
+; BE-I32-NEXT: vmov.32 d13[0], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s31
+; BE-I32-NEXT: add lr, sp, #80
+; BE-I32-NEXT: vmov.32 d9[1], r0
+; BE-I32-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s29
+; BE-I32-NEXT: vmov.32 d13[1], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: add lr, sp, #16
+; BE-I32-NEXT: vmov.32 d12[1], r0
+; BE-I32-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; BE-I32-NEXT: vmov.f32 s0, s19
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s17
+; BE-I32-NEXT: vldmia sp, {d10, d11} @ 16-byte Reload
+; BE-I32-NEXT: vmov.32 d11[1], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: add lr, sp, #32
+; BE-I32-NEXT: vmov.32 d10[1], r0
+; BE-I32-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; BE-I32-NEXT: vmov.f32 s0, s19
+; BE-I32-NEXT: vorr q7, q5, q5
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: vmov.f32 s0, s17
+; BE-I32-NEXT: add lr, sp, #48
+; BE-I32-NEXT: vldmia lr, {d10, d11} @ 16-byte Reload
+; BE-I32-NEXT: vmov.32 d11[1], r0
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: add lr, sp, #64
+; BE-I32-NEXT: vmov.32 d10[1], r0
+; BE-I32-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; BE-I32-NEXT: vmov.f32 s0, s1
+; BE-I32-NEXT: bl lrintf
+; BE-I32-NEXT: add lr, sp, #80
+; BE-I32-NEXT: vrev64.32 q0, q5
+; BE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I32-NEXT: vrev64.32 q1, q7
+; BE-I32-NEXT: vmov.32 d16[1], r0
+; BE-I32-NEXT: vrev64.32 q2, q6
+; BE-I32-NEXT: vrev64.32 q3, q8
+; BE-I32-NEXT: add sp, sp, #96
+; BE-I32-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I32-NEXT: pop {r11, pc}
+;
+; BE-I64-LABEL: lrint_v16f32:
+; BE-I64: @ %bb.0:
+; BE-I64-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-I64-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-I64-NEXT: .pad #4
+; BE-I64-NEXT: sub sp, sp, #4
+; BE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: .pad #144
+; BE-I64-NEXT: sub sp, sp, #144
+; BE-I64-NEXT: vorr q6, q3, q3
+; BE-I64-NEXT: add lr, sp, #112
+; BE-I64-NEXT: vorr q7, q0, q0
+; BE-I64-NEXT: mov r4, r0
+; BE-I64-NEXT: vstmia lr, {d4, d5} @ 16-byte Spill
+; BE-I64-NEXT: add lr, sp, #96
+; BE-I64-NEXT: vrev64.32 d8, d13
+; BE-I64-NEXT: vstmia lr, {d2, d3} @ 16-byte Spill
+; BE-I64-NEXT: vmov.f32 s0, s17
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s16
+; BE-I64-NEXT: str r1, [sp, #88] @ 4-byte Spill
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vrev64.32 d8, d14
+; BE-I64-NEXT: add lr, sp, #128
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: str r1, [sp, #92] @ 4-byte Spill
+; BE-I64-NEXT: vmov.f32 s0, s16
+; BE-I64-NEXT: vrev64.32 d9, d12
+; BE-I64-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; BE-I64-NEXT: vstr d9, [sp, #64] @ 8-byte Spill
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s19
+; BE-I64-NEXT: mov r9, r1
+; BE-I64-NEXT: vmov.32 d12[0], r0
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s17
+; BE-I64-NEXT: str r1, [sp, #84] @ 4-byte Spill
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: vrev64.32 d9, d15
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s18
+; BE-I64-NEXT: mov r6, r1
+; BE-I64-NEXT: vmov.32 d13[0], r0
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s19
+; BE-I64-NEXT: mov r5, r1
+; BE-I64-NEXT: vmov.32 d14[0], r0
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vldr d0, [sp, #64] @ 8-byte Reload
+; BE-I64-NEXT: mov r7, r1
+; BE-I64-NEXT: @ kill: def $s0 killed $s0 killed $d0
+; BE-I64-NEXT: vmov.32 d15[0], r0
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: add lr, sp, #40
+; BE-I64-NEXT: str r1, [sp, #60] @ 4-byte Spill
+; BE-I64-NEXT: vmov.32 d15[1], r7
+; BE-I64-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; BE-I64-NEXT: add lr, sp, #96
+; BE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I64-NEXT: vrev64.32 d8, d16
+; BE-I64-NEXT: vmov.f32 s0, s17
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s16
+; BE-I64-NEXT: vmov.32 d14[1], r5
+; BE-I64-NEXT: add lr, sp, #64
+; BE-I64-NEXT: mov r10, r1
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: vstmia lr, {d14, d15} @ 16-byte Spill
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: add lr, sp, #24
+; BE-I64-NEXT: mov r11, r1
+; BE-I64-NEXT: vmov.32 d13[1], r6
+; BE-I64-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; BE-I64-NEXT: add lr, sp, #96
+; BE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I64-NEXT: vrev64.32 d8, d17
+; BE-I64-NEXT: vmov.f32 s0, s17
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s16
+; BE-I64-NEXT: vmov.32 d12[1], r9
+; BE-I64-NEXT: add lr, sp, #96
+; BE-I64-NEXT: mov r8, r1
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: vstmia lr, {d12, d13} @ 16-byte Spill
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: add lr, sp, #8
+; BE-I64-NEXT: ldr r0, [sp, #88] @ 4-byte Reload
+; BE-I64-NEXT: mov r9, r1
+; BE-I64-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; BE-I64-NEXT: add lr, sp, #112
+; BE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I64-NEXT: add lr, sp, #128
+; BE-I64-NEXT: vldmia lr, {d10, d11} @ 16-byte Reload
+; BE-I64-NEXT: vrev64.32 d8, d16
+; BE-I64-NEXT: vmov.32 d11[1], r0
+; BE-I64-NEXT: vmov.f32 s0, s17
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s16
+; BE-I64-NEXT: vmov.32 d15[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
+; BE-I64-NEXT: add lr, sp, #128
+; BE-I64-NEXT: mov r7, r1
+; BE-I64-NEXT: vmov.32 d10[1], r0
+; BE-I64-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: add lr, sp, #112
+; BE-I64-NEXT: vmov.32 d14[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
+; BE-I64-NEXT: mov r5, r1
+; BE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I64-NEXT: add lr, sp, #40
+; BE-I64-NEXT: vrev64.32 d8, d17
+; BE-I64-NEXT: vldmia lr, {d12, d13} @ 16-byte Reload
+; BE-I64-NEXT: vmov.f32 s0, s17
+; BE-I64-NEXT: vmov.32 d13[1], r0
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: vmov.f32 s0, s16
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #60] @ 4-byte Reload
+; BE-I64-NEXT: mov r6, r1
+; BE-I64-NEXT: vmov.32 d12[1], r0
+; BE-I64-NEXT: bl lrintf
+; BE-I64-NEXT: add lr, sp, #24
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: add r0, r4, #64
+; BE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I64-NEXT: add lr, sp, #8
+; BE-I64-NEXT: vmov.32 d17[1], r10
+; BE-I64-NEXT: vmov.32 d16[1], r11
+; BE-I64-NEXT: vorr q12, q8, q8
+; BE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I64-NEXT: add lr, sp, #128
+; BE-I64-NEXT: vmov.32 d15[1], r7
+; BE-I64-NEXT: vmov.32 d11[1], r6
+; BE-I64-NEXT: vmov.32 d14[1], r5
+; BE-I64-NEXT: vldmia lr, {d18, d19} @ 16-byte Reload
+; BE-I64-NEXT: add lr, sp, #96
+; BE-I64-NEXT: vmov.32 d10[1], r1
+; BE-I64-NEXT: vmov.32 d17[1], r8
+; BE-I64-NEXT: vldmia lr, {d20, d21} @ 16-byte Reload
+; BE-I64-NEXT: add lr, sp, #64
+; BE-I64-NEXT: vmov.32 d16[1], r9
+; BE-I64-NEXT: vrev64.32 q14, q7
+; BE-I64-NEXT: vorr q13, q8, q8
+; BE-I64-NEXT: vrev64.32 q15, q5
+; BE-I64-NEXT: vldmia lr, {d22, d23} @ 16-byte Reload
+; BE-I64-NEXT: vrev64.32 q8, q6
+; BE-I64-NEXT: vst1.64 {d28, d29}, [r0:128]!
+; BE-I64-NEXT: vst1.64 {d30, d31}, [r0:128]!
+; BE-I64-NEXT: vrev64.32 q9, q9
+; BE-I64-NEXT: vrev64.32 q10, q10
+; BE-I64-NEXT: vst1.64 {d16, d17}, [r0:128]!
+; BE-I64-NEXT: vrev64.32 q11, q11
+; BE-I64-NEXT: vrev64.32 q12, q12
+; BE-I64-NEXT: vst1.64 {d18, d19}, [r0:128]
+; BE-I64-NEXT: vst1.64 {d20, d21}, [r4:128]!
+; BE-I64-NEXT: vst1.64 {d22, d23}, [r4:128]!
+; BE-I64-NEXT: vrev64.32 q13, q13
+; BE-I64-NEXT: vst1.64 {d24, d25}, [r4:128]!
+; BE-I64-NEXT: vst1.64 {d26, d27}, [r4:128]
+; BE-I64-NEXT: add sp, sp, #144
+; BE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: add sp, sp, #4
+; BE-I64-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+ %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float> %x)
+ ret <16 x iXLen> %a
+}
+declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float>)
+
+define <1 x iXLen> @lrint_v1f64(<1 x double> %x) {
+; LE-I32-LABEL: lrint_v1f64:
+; LE-I32: @ %bb.0:
+; LE-I32-NEXT: .save {r11, lr}
+; LE-I32-NEXT: push {r11, lr}
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: pop {r11, pc}
+;
+; LE-I64-LABEL: lrint_v1f64:
+; LE-I64: @ %bb.0:
+; LE-I64-NEXT: .save {r11, lr}
+; LE-I64-NEXT: push {r11, lr}
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vmov.32 d0[0], r0
+; LE-I64-NEXT: vmov.32 d0[1], r1
+; LE-I64-NEXT: pop {r11, pc}
+;
+; BE-I32-LABEL: lrint_v1f64:
+; BE-I32: @ %bb.0:
+; BE-I32-NEXT: .save {r11, lr}
+; BE-I32-NEXT: push {r11, lr}
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: pop {r11, pc}
+;
+; BE-I64-LABEL: lrint_v1f64:
+; BE-I64: @ %bb.0:
+; BE-I64-NEXT: .save {r11, lr}
+; BE-I64-NEXT: push {r11, lr}
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vmov.32 d16[0], r0
+; BE-I64-NEXT: vmov.32 d16[1], r1
+; BE-I64-NEXT: vrev64.32 d0, d16
+; BE-I64-NEXT: pop {r11, pc}
+ %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double> %x)
+ ret <1 x iXLen> %a
+}
+declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double>)
+
+define <2 x iXLen> @lrint_v2f64(<2 x double> %x) {
+; LE-I32-LABEL: lrint_v2f64:
+; LE-I32: @ %bb.0:
+; LE-I32-NEXT: .save {r11, lr}
+; LE-I32-NEXT: push {r11, lr}
+; LE-I32-NEXT: .vsave {d8, d9, d10}
+; LE-I32-NEXT: vpush {d8, d9, d10}
+; LE-I32-NEXT: vorr q4, q0, q0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vorr d0, d9, d9
+; LE-I32-NEXT: vmov.32 d10[0], r0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vmov.32 d10[1], r0
+; LE-I32-NEXT: vorr d0, d10, d10
+; LE-I32-NEXT: vpop {d8, d9, d10}
+; LE-I32-NEXT: pop {r11, pc}
+;
+; LE-I64-LABEL: lrint_v2f64:
+; LE-I64: @ %bb.0:
+; LE-I64-NEXT: .save {r4, lr}
+; LE-I64-NEXT: push {r4, lr}
+; LE-I64-NEXT: .vsave {d8, d9, d10, d11}
+; LE-I64-NEXT: vpush {d8, d9, d10, d11}
+; LE-I64-NEXT: vorr q4, q0, q0
+; LE-I64-NEXT: vorr d0, d9, d9
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d8, d8
+; LE-I64-NEXT: mov r4, r1
+; LE-I64-NEXT: vmov.32 d11[0], r0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vmov.32 d10[0], r0
+; LE-I64-NEXT: vmov.32 d11[1], r4
+; LE-I64-NEXT: vmov.32 d10[1], r1
+; LE-I64-NEXT: vorr q0, q5, q5
+; LE-I64-NEXT: vpop {d8, d9, d10, d11}
+; LE-I64-NEXT: pop {r4, pc}
+;
+; BE-I32-LABEL: lrint_v2f64:
+; BE-I32: @ %bb.0:
+; BE-I32-NEXT: .save {r11, lr}
+; BE-I32-NEXT: push {r11, lr}
+; BE-I32-NEXT: .vsave {d8, d9, d10}
+; BE-I32-NEXT: vpush {d8, d9, d10}
+; BE-I32-NEXT: vorr q4, q0, q0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vorr d0, d9, d9
+; BE-I32-NEXT: vmov.32 d10[0], r0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vmov.32 d10[1], r0
+; BE-I32-NEXT: vrev64.32 d0, d10
+; BE-I32-NEXT: vpop {d8, d9, d10}
+; BE-I32-NEXT: pop {r11, pc}
+;
+; BE-I64-LABEL: lrint_v2f64:
+; BE-I64: @ %bb.0:
+; BE-I64-NEXT: .save {r4, lr}
+; BE-I64-NEXT: push {r4, lr}
+; BE-I64-NEXT: .vsave {d8, d9, d10, d11}
+; BE-I64-NEXT: vpush {d8, d9, d10, d11}
+; BE-I64-NEXT: vorr q4, q0, q0
+; BE-I64-NEXT: vorr d0, d9, d9
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d8, d8
+; BE-I64-NEXT: mov r4, r1
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: vmov.32 d11[1], r4
+; BE-I64-NEXT: vmov.32 d10[1], r1
+; BE-I64-NEXT: vrev64.32 q0, q5
+; BE-I64-NEXT: vpop {d8, d9, d10, d11}
+; BE-I64-NEXT: pop {r4, pc}
+ %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double> %x)
+ ret <2 x iXLen> %a
+}
+declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double>)
+
+define <4 x iXLen> @lrint_v4f64(<4 x double> %x) {
+; LE-I32-LABEL: lrint_v4f64:
+; LE-I32: @ %bb.0:
+; LE-I32-NEXT: .save {r11, lr}
+; LE-I32-NEXT: push {r11, lr}
+; LE-I32-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
+; LE-I32-NEXT: vpush {d8, d9, d10, d11, d12, d13}
+; LE-I32-NEXT: vorr q4, q1, q1
+; LE-I32-NEXT: vorr q5, q0, q0
+; LE-I32-NEXT: vorr d0, d8, d8
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vorr d0, d10, d10
+; LE-I32-NEXT: vmov.32 d13[0], r0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vorr d0, d9, d9
+; LE-I32-NEXT: vmov.32 d12[0], r0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vorr d0, d11, d11
+; LE-I32-NEXT: vmov.32 d13[1], r0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vmov.32 d12[1], r0
+; LE-I32-NEXT: vorr q0, q6, q6
+; LE-I32-NEXT: vpop {d8, d9, d10, d11, d12, d13}
+; LE-I32-NEXT: pop {r11, pc}
+;
+; LE-I64-LABEL: lrint_v4f64:
+; LE-I64: @ %bb.0:
+; LE-I64-NEXT: .save {r4, r5, r6, lr}
+; LE-I64-NEXT: push {r4, r5, r6, lr}
+; LE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: vorr q5, q1, q1
+; LE-I64-NEXT: vorr q6, q0, q0
+; LE-I64-NEXT: vorr d0, d11, d11
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d12, d12
+; LE-I64-NEXT: mov r4, r1
+; LE-I64-NEXT: vmov.32 d9[0], r0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d13, d13
+; LE-I64-NEXT: mov r5, r1
+; LE-I64-NEXT: vmov.32 d14[0], r0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d10, d10
+; LE-I64-NEXT: mov r6, r1
+; LE-I64-NEXT: vmov.32 d15[0], r0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vmov.32 d8[0], r0
+; LE-I64-NEXT: vmov.32 d15[1], r6
+; LE-I64-NEXT: vmov.32 d9[1], r4
+; LE-I64-NEXT: vmov.32 d14[1], r5
+; LE-I64-NEXT: vmov.32 d8[1], r1
+; LE-I64-NEXT: vorr q0, q7, q7
+; LE-I64-NEXT: vorr q1, q4, q4
+; LE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: pop {r4, r5, r6, pc}
+;
+; BE-I32-LABEL: lrint_v4f64:
+; BE-I32: @ %bb.0:
+; BE-I32-NEXT: .save {r11, lr}
+; BE-I32-NEXT: push {r11, lr}
+; BE-I32-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
+; BE-I32-NEXT: vpush {d8, d9, d10, d11, d12, d13}
+; BE-I32-NEXT: vorr q4, q1, q1
+; BE-I32-NEXT: vorr q5, q0, q0
+; BE-I32-NEXT: vorr d0, d8, d8
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vorr d0, d10, d10
+; BE-I32-NEXT: vmov.32 d13[0], r0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vorr d0, d9, d9
+; BE-I32-NEXT: vmov.32 d12[0], r0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vorr d0, d11, d11
+; BE-I32-NEXT: vmov.32 d13[1], r0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vmov.32 d12[1], r0
+; BE-I32-NEXT: vrev64.32 q0, q6
+; BE-I32-NEXT: vpop {d8, d9, d10, d11, d12, d13}
+; BE-I32-NEXT: pop {r11, pc}
+;
+; BE-I64-LABEL: lrint_v4f64:
+; BE-I64: @ %bb.0:
+; BE-I64-NEXT: .save {r4, r5, r6, lr}
+; BE-I64-NEXT: push {r4, r5, r6, lr}
+; BE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: vorr q4, q1, q1
+; BE-I64-NEXT: vorr q5, q0, q0
+; BE-I64-NEXT: vorr d0, d9, d9
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d10, d10
+; BE-I64-NEXT: mov r4, r1
+; BE-I64-NEXT: vmov.32 d13[0], r0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d11, d11
+; BE-I64-NEXT: mov r5, r1
+; BE-I64-NEXT: vmov.32 d14[0], r0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d8, d8
+; BE-I64-NEXT: mov r6, r1
+; BE-I64-NEXT: vmov.32 d15[0], r0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vmov.32 d12[0], r0
+; BE-I64-NEXT: vmov.32 d15[1], r6
+; BE-I64-NEXT: vmov.32 d13[1], r4
+; BE-I64-NEXT: vmov.32 d14[1], r5
+; BE-I64-NEXT: vmov.32 d12[1], r1
+; BE-I64-NEXT: vrev64.32 q0, q7
+; BE-I64-NEXT: vrev64.32 q1, q6
+; BE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: pop {r4, r5, r6, pc}
+ %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double> %x)
+ ret <4 x iXLen> %a
+}
+declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double>)
+
+define <8 x iXLen> @lrint_v8f64(<8 x double> %x) {
+; LE-I32-LABEL: lrint_v8f64:
+; LE-I32: @ %bb.0:
+; LE-I32-NEXT: .save {r11, lr}
+; LE-I32-NEXT: push {r11, lr}
+; LE-I32-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I32-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I32-NEXT: .pad #32
+; LE-I32-NEXT: sub sp, sp, #32
+; LE-I32-NEXT: vorr q5, q0, q0
+; LE-I32-NEXT: add lr, sp, #16
+; LE-I32-NEXT: vorr d0, d4, d4
+; LE-I32-NEXT: vstmia sp, {d6, d7} @ 16-byte Spill
+; LE-I32-NEXT: vorr q7, q3, q3
+; LE-I32-NEXT: vstmia lr, {d4, d5} @ 16-byte Spill
+; LE-I32-NEXT: vorr q6, q1, q1
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vorr d0, d14, d14
+; LE-I32-NEXT: vmov.32 d8[0], r0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vorr d0, d12, d12
+; LE-I32-NEXT: vmov.32 d9[0], r0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vorr d0, d10, d10
+; LE-I32-NEXT: vmov.32 d15[0], r0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vorr d0, d13, d13
+; LE-I32-NEXT: vmov.32 d14[0], r0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vorr d0, d11, d11
+; LE-I32-NEXT: vmov.32 d15[1], r0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vldmia sp, {d16, d17} @ 16-byte Reload
+; LE-I32-NEXT: vorr d0, d17, d17
+; LE-I32-NEXT: vmov.32 d14[1], r0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: add lr, sp, #16
+; LE-I32-NEXT: vmov.32 d9[1], r0
+; LE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I32-NEXT: vorr d0, d17, d17
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vmov.32 d8[1], r0
+; LE-I32-NEXT: vorr q0, q7, q7
+; LE-I32-NEXT: vorr q1, q4, q4
+; LE-I32-NEXT: add sp, sp, #32
+; LE-I32-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I32-NEXT: pop {r11, pc}
+;
+; LE-I64-LABEL: lrint_v8f64:
+; LE-I64: @ %bb.0:
+; LE-I64-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; LE-I64-NEXT: push {r4, r5, r6, r7, r8, r9, r10, lr}
+; LE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: .pad #40
+; LE-I64-NEXT: sub sp, sp, #40
+; LE-I64-NEXT: vorr q4, q0, q0
+; LE-I64-NEXT: add lr, sp, #24
+; LE-I64-NEXT: vorr d0, d7, d7
+; LE-I64-NEXT: vstmia lr, {d6, d7} @ 16-byte Spill
+; LE-I64-NEXT: vorr q7, q2, q2
+; LE-I64-NEXT: vorr q6, q1, q1
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d14, d14
+; LE-I64-NEXT: add lr, sp, #8
+; LE-I64-NEXT: vmov.32 d17[0], r0
+; LE-I64-NEXT: mov r8, r1
+; LE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d15, d15
+; LE-I64-NEXT: mov r9, r1
+; LE-I64-NEXT: vmov.32 d10[0], r0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d12, d12
+; LE-I64-NEXT: mov r10, r1
+; LE-I64-NEXT: vmov.32 d11[0], r0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d13, d13
+; LE-I64-NEXT: mov r7, r1
+; LE-I64-NEXT: vmov.32 d14[0], r0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d8, d8
+; LE-I64-NEXT: mov r4, r1
+; LE-I64-NEXT: vmov.32 d15[0], r0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d9, d9
+; LE-I64-NEXT: mov r5, r1
+; LE-I64-NEXT: vmov.32 d12[0], r0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: add lr, sp, #24
+; LE-I64-NEXT: mov r6, r1
+; LE-I64-NEXT: vmov.32 d13[0], r0
+; LE-I64-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; LE-I64-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: add lr, sp, #8
+; LE-I64-NEXT: vmov.32 d13[1], r6
+; LE-I64-NEXT: vldmia lr, {d6, d7} @ 16-byte Reload
+; LE-I64-NEXT: vmov.32 d15[1], r4
+; LE-I64-NEXT: vmov.32 d11[1], r10
+; LE-I64-NEXT: vmov.32 d6[0], r0
+; LE-I64-NEXT: vmov.32 d12[1], r5
+; LE-I64-NEXT: vmov.32 d14[1], r7
+; LE-I64-NEXT: vorr q0, q6, q6
+; LE-I64-NEXT: vmov.32 d10[1], r9
+; LE-I64-NEXT: vorr q1, q7, q7
+; LE-I64-NEXT: vmov.32 d7[1], r8
+; LE-I64-NEXT: vorr q2, q5, q5
+; LE-I64-NEXT: vmov.32 d6[1], r1
+; LE-I64-NEXT: add sp, sp, #40
+; LE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, pc}
+;
+; BE-I32-LABEL: lrint_v8f64:
+; BE-I32: @ %bb.0:
+; BE-I32-NEXT: .save {r11, lr}
+; BE-I32-NEXT: push {r11, lr}
+; BE-I32-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I32-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I32-NEXT: .pad #32
+; BE-I32-NEXT: sub sp, sp, #32
+; BE-I32-NEXT: vorr q5, q0, q0
+; BE-I32-NEXT: vstmia sp, {d0, d1} @ 16-byte Spill
+; BE-I32-NEXT: vorr d0, d4, d4
+; BE-I32-NEXT: add lr, sp, #16
+; BE-I32-NEXT: vorr q7, q3, q3
+; BE-I32-NEXT: vstmia lr, {d4, d5} @ 16-byte Spill
+; BE-I32-NEXT: vorr q6, q1, q1
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vorr d0, d10, d10
+; BE-I32-NEXT: vmov.32 d8[0], r0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vorr d0, d14, d14
+; BE-I32-NEXT: vmov.32 d10[0], r0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vorr d0, d12, d12
+; BE-I32-NEXT: vmov.32 d9[0], r0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vorr d0, d15, d15
+; BE-I32-NEXT: vmov.32 d11[0], r0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vorr d0, d13, d13
+; BE-I32-NEXT: vmov.32 d9[1], r0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vldmia sp, {d16, d17} @ 16-byte Reload
+; BE-I32-NEXT: vorr d0, d17, d17
+; BE-I32-NEXT: vmov.32 d11[1], r0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: add lr, sp, #16
+; BE-I32-NEXT: vmov.32 d10[1], r0
+; BE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I32-NEXT: vorr d0, d17, d17
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vmov.32 d8[1], r0
+; BE-I32-NEXT: vrev64.32 q0, q5
+; BE-I32-NEXT: vrev64.32 q1, q4
+; BE-I32-NEXT: add sp, sp, #32
+; BE-I32-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I32-NEXT: pop {r11, pc}
+;
+; BE-I64-LABEL: lrint_v8f64:
+; BE-I64: @ %bb.0:
+; BE-I64-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; BE-I64-NEXT: push {r4, r5, r6, r7, r8, r9, r10, lr}
+; BE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: .pad #40
+; BE-I64-NEXT: sub sp, sp, #40
+; BE-I64-NEXT: vorr q4, q0, q0
+; BE-I64-NEXT: add lr, sp, #24
+; BE-I64-NEXT: vorr d0, d7, d7
+; BE-I64-NEXT: vstmia lr, {d6, d7} @ 16-byte Spill
+; BE-I64-NEXT: vorr q7, q2, q2
+; BE-I64-NEXT: vorr q6, q1, q1
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d14, d14
+; BE-I64-NEXT: add lr, sp, #8
+; BE-I64-NEXT: vmov.32 d17[0], r0
+; BE-I64-NEXT: mov r8, r1
+; BE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d15, d15
+; BE-I64-NEXT: mov r9, r1
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d12, d12
+; BE-I64-NEXT: mov r10, r1
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d13, d13
+; BE-I64-NEXT: mov r7, r1
+; BE-I64-NEXT: vmov.32 d14[0], r0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d8, d8
+; BE-I64-NEXT: mov r4, r1
+; BE-I64-NEXT: vmov.32 d15[0], r0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d9, d9
+; BE-I64-NEXT: mov r5, r1
+; BE-I64-NEXT: vmov.32 d12[0], r0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: add lr, sp, #24
+; BE-I64-NEXT: mov r6, r1
+; BE-I64-NEXT: vmov.32 d13[0], r0
+; BE-I64-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; BE-I64-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: add lr, sp, #8
+; BE-I64-NEXT: vmov.32 d13[1], r6
+; BE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I64-NEXT: vmov.32 d15[1], r4
+; BE-I64-NEXT: vmov.32 d16[0], r0
+; BE-I64-NEXT: vmov.32 d11[1], r10
+; BE-I64-NEXT: vmov.32 d17[1], r8
+; BE-I64-NEXT: vmov.32 d12[1], r5
+; BE-I64-NEXT: vmov.32 d14[1], r7
+; BE-I64-NEXT: vmov.32 d10[1], r9
+; BE-I64-NEXT: vmov.32 d16[1], r1
+; BE-I64-NEXT: vrev64.32 q0, q6
+; BE-I64-NEXT: vrev64.32 q1, q7
+; BE-I64-NEXT: vrev64.32 q2, q5
+; BE-I64-NEXT: vrev64.32 q3, q8
+; BE-I64-NEXT: add sp, sp, #40
+; BE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, pc}
+ %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double> %x)
+ ret <8 x iXLen> %a
+}
+declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double>)
+
+define <16 x iXLen> @lrint_v16f64(<16 x double> %x) {
+; LE-I32-LABEL: lrint_v16f64:
+; LE-I32: @ %bb.0:
+; LE-I32-NEXT: .save {r4, r5, r6, lr}
+; LE-I32-NEXT: push {r4, r5, r6, lr}
+; LE-I32-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I32-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I32-NEXT: .pad #128
+; LE-I32-NEXT: sub sp, sp, #128
+; LE-I32-NEXT: add lr, sp, #80
+; LE-I32-NEXT: add r0, sp, #240
+; LE-I32-NEXT: vld1.64 {d16, d17}, [r0]
+; LE-I32-NEXT: add r0, sp, #208
+; LE-I32-NEXT: vorr q6, q0, q0
+; LE-I32-NEXT: vstmia lr, {d6, d7} @ 16-byte Spill
+; LE-I32-NEXT: add lr, sp, #32
+; LE-I32-NEXT: vorr q5, q1, q1
+; LE-I32-NEXT: vstmia lr, {d4, d5} @ 16-byte Spill
+; LE-I32-NEXT: add lr, sp, #16
+; LE-I32-NEXT: vstmia lr, {d0, d1} @ 16-byte Spill
+; LE-I32-NEXT: add lr, sp, #64
+; LE-I32-NEXT: vorr d0, d4, d4
+; LE-I32-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-I32-NEXT: add lr, sp, #112
+; LE-I32-NEXT: vld1.64 {d16, d17}, [r0]
+; LE-I32-NEXT: add r0, sp, #224
+; LE-I32-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-I32-NEXT: add lr, sp, #96
+; LE-I32-NEXT: vld1.64 {d16, d17}, [r0]
+; LE-I32-NEXT: add r0, sp, #256
+; LE-I32-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-I32-NEXT: add lr, sp, #48
+; LE-I32-NEXT: vld1.64 {d14, d15}, [r0]
+; LE-I32-NEXT: vstmia sp, {d2, d3} @ 16-byte Spill
+; LE-I32-NEXT: vstmia lr, {d14, d15} @ 16-byte Spill
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vorr d0, d12, d12
+; LE-I32-NEXT: vmov.32 d8[0], r0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vorr d0, d10, d10
+; LE-I32-NEXT: vmov.32 d12[0], r0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vorr d0, d14, d14
+; LE-I32-NEXT: vmov.32 d13[0], r0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: add lr, sp, #64
+; LE-I32-NEXT: mov r4, r0
+; LE-I32-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; LE-I32-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: add lr, sp, #80
+; LE-I32-NEXT: vmov.32 d14[0], r0
+; LE-I32-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; LE-I32-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: add lr, sp, #112
+; LE-I32-NEXT: vmov.32 d9[0], r0
+; LE-I32-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; LE-I32-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; LE-I32-NEXT: vmov.32 d15[0], r4
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vldmia sp, {d16, d17} @ 16-byte Reload
+; LE-I32-NEXT: vorr d0, d17, d17
+; LE-I32-NEXT: vmov.32 d10[0], r0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: add lr, sp, #96
+; LE-I32-NEXT: vmov.32 d13[1], r0
+; LE-I32-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; LE-I32-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: add lr, sp, #16
+; LE-I32-NEXT: vmov.32 d11[0], r0
+; LE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I32-NEXT: vorr d0, d17, d17
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: add lr, sp, #80
+; LE-I32-NEXT: vmov.32 d12[1], r0
+; LE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I32-NEXT: vorr d0, d17, d17
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: add lr, sp, #32
+; LE-I32-NEXT: vmov.32 d9[1], r0
+; LE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I32-NEXT: vorr d0, d17, d17
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: add lr, sp, #96
+; LE-I32-NEXT: vmov.32 d8[1], r0
+; LE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I32-NEXT: vorr d0, d17, d17
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: add lr, sp, #112
+; LE-I32-NEXT: vmov.32 d11[1], r0
+; LE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I32-NEXT: vorr d0, d17, d17
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: add lr, sp, #48
+; LE-I32-NEXT: vmov.32 d10[1], r0
+; LE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I32-NEXT: vorr d0, d17, d17
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: add lr, sp, #64
+; LE-I32-NEXT: vmov.32 d15[1], r0
+; LE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I32-NEXT: vorr d0, d17, d17
+; LE-I32-NEXT: bl lrint
+; LE-I32-NEXT: vmov.32 d14[1], r0
+; LE-I32-NEXT: vorr q0, q6, q6
+; LE-I32-NEXT: vorr q1, q4, q4
+; LE-I32-NEXT: vorr q2, q5, q5
+; LE-I32-NEXT: vorr q3, q7, q7
+; LE-I32-NEXT: add sp, sp, #128
+; LE-I32-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I32-NEXT: pop {r4, r5, r6, pc}
+;
+; LE-I64-LABEL: lrint_v16f64:
+; LE-I64: @ %bb.0:
+; LE-I64-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-I64-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-I64-NEXT: .pad #4
+; LE-I64-NEXT: sub sp, sp, #4
+; LE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: .pad #176
+; LE-I64-NEXT: sub sp, sp, #176
+; LE-I64-NEXT: add lr, sp, #40
+; LE-I64-NEXT: str r0, [sp, #140] @ 4-byte Spill
+; LE-I64-NEXT: add r0, sp, #312
+; LE-I64-NEXT: vorr q6, q2, q2
+; LE-I64-NEXT: vstmia lr, {d6, d7} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #96
+; LE-I64-NEXT: vorr q7, q1, q1
+; LE-I64-NEXT: vstmia lr, {d0, d1} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #144
+; LE-I64-NEXT: vorr d0, d1, d1
+; LE-I64-NEXT: vld1.64 {d16, d17}, [r0]
+; LE-I64-NEXT: add r0, sp, #280
+; LE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #80
+; LE-I64-NEXT: vld1.64 {d16, d17}, [r0]
+; LE-I64-NEXT: add r0, sp, #296
+; LE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #120
+; LE-I64-NEXT: vld1.64 {d16, d17}, [r0]
+; LE-I64-NEXT: add r0, sp, #328
+; LE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #56
+; LE-I64-NEXT: vld1.64 {d16, d17}, [r0]
+; LE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d14, d14
+; LE-I64-NEXT: str r1, [sp, #116] @ 4-byte Spill
+; LE-I64-NEXT: vmov.32 d11[0], r0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d15, d15
+; LE-I64-NEXT: str r1, [sp, #76] @ 4-byte Spill
+; LE-I64-NEXT: vmov.32 d8[0], r0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d12, d12
+; LE-I64-NEXT: add lr, sp, #160
+; LE-I64-NEXT: vmov.32 d9[0], r0
+; LE-I64-NEXT: str r1, [sp, #72] @ 4-byte Spill
+; LE-I64-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d13, d13
+; LE-I64-NEXT: mov r6, r1
+; LE-I64-NEXT: vmov.32 d14[0], r0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: add lr, sp, #40
+; LE-I64-NEXT: mov r4, r1
+; LE-I64-NEXT: vmov.32 d15[0], r0
+; LE-I64-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; LE-I64-NEXT: vorr d0, d8, d8
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d9, d9
+; LE-I64-NEXT: mov r7, r1
+; LE-I64-NEXT: vmov.32 d12[0], r0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: add lr, sp, #96
+; LE-I64-NEXT: mov r5, r1
+; LE-I64-NEXT: vmov.32 d13[0], r0
+; LE-I64-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; LE-I64-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vmov.32 d10[0], r0
+; LE-I64-NEXT: add lr, sp, #40
+; LE-I64-NEXT: mov r10, r1
+; LE-I64-NEXT: vmov.32 d13[1], r5
+; LE-I64-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #56
+; LE-I64-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; LE-I64-NEXT: vorr d0, d9, d9
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vorr d0, d8, d8
+; LE-I64-NEXT: vmov.32 d12[1], r7
+; LE-I64-NEXT: add lr, sp, #96
+; LE-I64-NEXT: mov r9, r1
+; LE-I64-NEXT: vmov.32 d11[0], r0
+; LE-I64-NEXT: vstmia lr, {d12, d13} @ 16-byte Spill
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vmov.32 d10[0], r0
+; LE-I64-NEXT: add lr, sp, #24
+; LE-I64-NEXT: mov r11, r1
+; LE-I64-NEXT: vmov.32 d15[1], r4
+; LE-I64-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #144
+; LE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I64-NEXT: vorr d0, d17, d17
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: add lr, sp, #8
+; LE-I64-NEXT: vmov.32 d14[1], r6
+; LE-I64-NEXT: mov r8, r1
+; LE-I64-NEXT: vmov.32 d17[0], r0
+; LE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #56
+; LE-I64-NEXT: vstmia lr, {d14, d15} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #80
+; LE-I64-NEXT: vldmia lr, {d10, d11} @ 16-byte Reload
+; LE-I64-NEXT: vorr d0, d11, d11
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vmov.32 d15[0], r0
+; LE-I64-NEXT: add lr, sp, #160
+; LE-I64-NEXT: vorr d0, d10, d10
+; LE-I64-NEXT: ldr r0, [sp, #72] @ 4-byte Reload
+; LE-I64-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; LE-I64-NEXT: mov r6, r1
+; LE-I64-NEXT: vmov.32 d9[1], r0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vmov.32 d14[0], r0
+; LE-I64-NEXT: ldr r0, [sp, #76] @ 4-byte Reload
+; LE-I64-NEXT: add lr, sp, #160
+; LE-I64-NEXT: mov r4, r1
+; LE-I64-NEXT: vmov.32 d8[1], r0
+; LE-I64-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #120
+; LE-I64-NEXT: vldmia lr, {d10, d11} @ 16-byte Reload
+; LE-I64-NEXT: vorr d0, d11, d11
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: vmov.32 d13[0], r0
+; LE-I64-NEXT: add lr, sp, #40
+; LE-I64-NEXT: vorr d0, d10, d10
+; LE-I64-NEXT: ldr r0, [sp, #116] @ 4-byte Reload
+; LE-I64-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; LE-I64-NEXT: mov r5, r1
+; LE-I64-NEXT: vmov.32 d9[1], r0
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: add lr, sp, #144
+; LE-I64-NEXT: mov r7, r1
+; LE-I64-NEXT: vmov.32 d12[0], r0
+; LE-I64-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; LE-I64-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; LE-I64-NEXT: vmov.32 d8[1], r10
+; LE-I64-NEXT: bl lrint
+; LE-I64-NEXT: add lr, sp, #8
+; LE-I64-NEXT: vmov.32 d15[1], r6
+; LE-I64-NEXT: vldmia lr, {d20, d21} @ 16-byte Reload
+; LE-I64-NEXT: add lr, sp, #24
+; LE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I64-NEXT: add lr, sp, #160
+; LE-I64-NEXT: vmov.32 d20[0], r0
+; LE-I64-NEXT: vmov.32 d21[1], r8
+; LE-I64-NEXT: vmov.32 d20[1], r1
+; LE-I64-NEXT: ldr r1, [sp, #140] @ 4-byte Reload
+; LE-I64-NEXT: vmov.32 d13[1], r5
+; LE-I64-NEXT: mov r0, r1
+; LE-I64-NEXT: vst1.64 {d8, d9}, [r0:128]!
+; LE-I64-NEXT: vldmia lr, {d18, d19} @ 16-byte Reload
+; LE-I64-NEXT: add lr, sp, #56
+; LE-I64-NEXT: vmov.32 d14[1], r4
+; LE-I64-NEXT: vst1.64 {d18, d19}, [r0:128]!
+; LE-I64-NEXT: vldmia lr, {d18, d19} @ 16-byte Reload
+; LE-I64-NEXT: add lr, sp, #96
+; LE-I64-NEXT: vmov.32 d12[1], r7
+; LE-I64-NEXT: vst1.64 {d18, d19}, [r0:128]!
+; LE-I64-NEXT: vldmia lr, {d18, d19} @ 16-byte Reload
+; LE-I64-NEXT: vmov.32 d17[1], r9
+; LE-I64-NEXT: vst1.64 {d18, d19}, [r0:128]
+; LE-I64-NEXT: add r0, r1, #64
+; LE-I64-NEXT: vst1.64 {d14, d15}, [r0:128]!
+; LE-I64-NEXT: vst1.64 {d12, d13}, [r0:128]!
+; LE-I64-NEXT: vmov.32 d16[1], r11
+; LE-I64-NEXT: vst1.64 {d20, d21}, [r0:128]!
+; LE-I64-NEXT: vst1.64 {d16, d17}, [r0:128]
+; LE-I64-NEXT: add sp, sp, #176
+; LE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: add sp, sp, #4
+; LE-I64-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+;
+; BE-I32-LABEL: lrint_v16f64:
+; BE-I32: @ %bb.0:
+; BE-I32-NEXT: .save {r4, r5, r6, lr}
+; BE-I32-NEXT: push {r4, r5, r6, lr}
+; BE-I32-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I32-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I32-NEXT: .pad #128
+; BE-I32-NEXT: sub sp, sp, #128
+; BE-I32-NEXT: add lr, sp, #64
+; BE-I32-NEXT: add r0, sp, #240
+; BE-I32-NEXT: vld1.64 {d16, d17}, [r0]
+; BE-I32-NEXT: add r0, sp, #224
+; BE-I32-NEXT: vorr q6, q3, q3
+; BE-I32-NEXT: vstmia lr, {d4, d5} @ 16-byte Spill
+; BE-I32-NEXT: add lr, sp, #16
+; BE-I32-NEXT: vorr q5, q1, q1
+; BE-I32-NEXT: vstmia lr, {d2, d3} @ 16-byte Spill
+; BE-I32-NEXT: add lr, sp, #32
+; BE-I32-NEXT: vstmia lr, {d0, d1} @ 16-byte Spill
+; BE-I32-NEXT: add lr, sp, #80
+; BE-I32-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; BE-I32-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-I32-NEXT: add lr, sp, #112
+; BE-I32-NEXT: vld1.64 {d16, d17}, [r0]
+; BE-I32-NEXT: add r0, sp, #256
+; BE-I32-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-I32-NEXT: add lr, sp, #96
+; BE-I32-NEXT: vld1.64 {d16, d17}, [r0]
+; BE-I32-NEXT: add r0, sp, #208
+; BE-I32-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-I32-NEXT: add lr, sp, #48
+; BE-I32-NEXT: vld1.64 {d14, d15}, [r0]
+; BE-I32-NEXT: vstmia sp, {d6, d7} @ 16-byte Spill
+; BE-I32-NEXT: vstmia lr, {d14, d15} @ 16-byte Spill
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vorr d0, d10, d10
+; BE-I32-NEXT: vmov.32 d8[0], r0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vorr d0, d12, d12
+; BE-I32-NEXT: vmov.32 d9[0], r0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vorr d0, d14, d14
+; BE-I32-NEXT: vmov.32 d11[0], r0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: add lr, sp, #80
+; BE-I32-NEXT: mov r4, r0
+; BE-I32-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; BE-I32-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: add lr, sp, #64
+; BE-I32-NEXT: vmov.32 d12[0], r0
+; BE-I32-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; BE-I32-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: add lr, sp, #112
+; BE-I32-NEXT: vmov.32 d10[0], r0
+; BE-I32-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; BE-I32-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; BE-I32-NEXT: vmov.32 d14[0], r4
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vldmia sp, {d16, d17} @ 16-byte Reload
+; BE-I32-NEXT: vorr d0, d17, d17
+; BE-I32-NEXT: vmov.32 d15[0], r0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: add lr, sp, #96
+; BE-I32-NEXT: vmov.32 d11[1], r0
+; BE-I32-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; BE-I32-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: add lr, sp, #64
+; BE-I32-NEXT: vmov.32 d13[0], r0
+; BE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I32-NEXT: vorr d0, d17, d17
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: add lr, sp, #16
+; BE-I32-NEXT: vmov.32 d10[1], r0
+; BE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I32-NEXT: vorr d0, d17, d17
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: add lr, sp, #32
+; BE-I32-NEXT: vmov.32 d9[1], r0
+; BE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I32-NEXT: vorr d0, d17, d17
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: add lr, sp, #96
+; BE-I32-NEXT: vmov.32 d8[1], r0
+; BE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I32-NEXT: vorr d0, d17, d17
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: add lr, sp, #112
+; BE-I32-NEXT: vmov.32 d13[1], r0
+; BE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I32-NEXT: vorr d0, d17, d17
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: add lr, sp, #48
+; BE-I32-NEXT: vmov.32 d15[1], r0
+; BE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I32-NEXT: vorr d0, d17, d17
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: add lr, sp, #80
+; BE-I32-NEXT: vmov.32 d14[1], r0
+; BE-I32-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I32-NEXT: vorr d0, d17, d17
+; BE-I32-NEXT: bl lrint
+; BE-I32-NEXT: vmov.32 d12[1], r0
+; BE-I32-NEXT: vrev64.32 q0, q4
+; BE-I32-NEXT: vrev64.32 q1, q5
+; BE-I32-NEXT: vrev64.32 q2, q7
+; BE-I32-NEXT: vrev64.32 q3, q6
+; BE-I32-NEXT: add sp, sp, #128
+; BE-I32-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I32-NEXT: pop {r4, r5, r6, pc}
+;
+; BE-I64-LABEL: lrint_v16f64:
+; BE-I64: @ %bb.0:
+; BE-I64-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-I64-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-I64-NEXT: .pad #4
+; BE-I64-NEXT: sub sp, sp, #4
+; BE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: .pad #168
+; BE-I64-NEXT: sub sp, sp, #168
+; BE-I64-NEXT: add lr, sp, #64
+; BE-I64-NEXT: str r0, [sp, #132] @ 4-byte Spill
+; BE-I64-NEXT: add r0, sp, #304
+; BE-I64-NEXT: vorr q4, q3, q3
+; BE-I64-NEXT: vstmia lr, {d0, d1} @ 16-byte Spill
+; BE-I64-NEXT: add lr, sp, #48
+; BE-I64-NEXT: vorr d0, d1, d1
+; BE-I64-NEXT: vld1.64 {d16, d17}, [r0]
+; BE-I64-NEXT: add r0, sp, #320
+; BE-I64-NEXT: vorr q6, q2, q2
+; BE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-I64-NEXT: add lr, sp, #88
+; BE-I64-NEXT: vorr q7, q1, q1
+; BE-I64-NEXT: vld1.64 {d16, d17}, [r0]
+; BE-I64-NEXT: add r0, sp, #272
+; BE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-I64-NEXT: add lr, sp, #112
+; BE-I64-NEXT: vld1.64 {d16, d17}, [r0]
+; BE-I64-NEXT: add r0, sp, #288
+; BE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-I64-NEXT: add lr, sp, #24
+; BE-I64-NEXT: vld1.64 {d16, d17}, [r0]
+; BE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d14, d14
+; BE-I64-NEXT: add lr, sp, #136
+; BE-I64-NEXT: vmov.32 d17[0], r0
+; BE-I64-NEXT: str r1, [sp, #108] @ 4-byte Spill
+; BE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d15, d15
+; BE-I64-NEXT: str r1, [sp, #84] @ 4-byte Spill
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d12, d12
+; BE-I64-NEXT: add lr, sp, #152
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: str r1, [sp, #44] @ 4-byte Spill
+; BE-I64-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d13, d13
+; BE-I64-NEXT: mov r6, r1
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d8, d8
+; BE-I64-NEXT: mov r4, r1
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d9, d9
+; BE-I64-NEXT: mov r7, r1
+; BE-I64-NEXT: vmov.32 d12[0], r0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: add lr, sp, #64
+; BE-I64-NEXT: mov r5, r1
+; BE-I64-NEXT: vmov.32 d13[0], r0
+; BE-I64-NEXT: vldmia lr, {d0, d1} @ 16-byte Reload
+; BE-I64-NEXT: @ kill: def $d0 killed $d0 killed $q0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: add lr, sp, #136
+; BE-I64-NEXT: mov r9, r1
+; BE-I64-NEXT: vmov.32 d13[1], r5
+; BE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I64-NEXT: vmov.32 d16[0], r0
+; BE-I64-NEXT: vstmia lr, {d16, d17} @ 16-byte Spill
+; BE-I64-NEXT: add lr, sp, #24
+; BE-I64-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; BE-I64-NEXT: vorr d0, d9, d9
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d8, d8
+; BE-I64-NEXT: vmov.32 d12[1], r7
+; BE-I64-NEXT: add lr, sp, #64
+; BE-I64-NEXT: mov r10, r1
+; BE-I64-NEXT: vmov.32 d15[0], r0
+; BE-I64-NEXT: vstmia lr, {d12, d13} @ 16-byte Spill
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vmov.32 d14[0], r0
+; BE-I64-NEXT: add lr, sp, #8
+; BE-I64-NEXT: mov r11, r1
+; BE-I64-NEXT: vmov.32 d11[1], r4
+; BE-I64-NEXT: vstmia lr, {d14, d15} @ 16-byte Spill
+; BE-I64-NEXT: add lr, sp, #48
+; BE-I64-NEXT: vorr q6, q5, q5
+; BE-I64-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; BE-I64-NEXT: vorr d0, d9, d9
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d8, d8
+; BE-I64-NEXT: vmov.32 d12[1], r6
+; BE-I64-NEXT: add lr, sp, #24
+; BE-I64-NEXT: mov r8, r1
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: vstmia lr, {d12, d13} @ 16-byte Spill
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: add lr, sp, #48
+; BE-I64-NEXT: ldr r0, [sp, #44] @ 4-byte Reload
+; BE-I64-NEXT: mov r6, r1
+; BE-I64-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; BE-I64-NEXT: add lr, sp, #152
+; BE-I64-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; BE-I64-NEXT: add lr, sp, #88
+; BE-I64-NEXT: vldmia lr, {d12, d13} @ 16-byte Reload
+; BE-I64-NEXT: vorr d0, d13, d13
+; BE-I64-NEXT: vmov.32 d9[1], r0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vmov.32 d15[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
+; BE-I64-NEXT: vorr d0, d12, d12
+; BE-I64-NEXT: add lr, sp, #152
+; BE-I64-NEXT: mov r4, r1
+; BE-I64-NEXT: vmov.32 d8[1], r0
+; BE-I64-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: add lr, sp, #136
+; BE-I64-NEXT: vmov.32 d14[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #108] @ 4-byte Reload
+; BE-I64-NEXT: mov r5, r1
+; BE-I64-NEXT: vldmia lr, {d10, d11} @ 16-byte Reload
+; BE-I64-NEXT: add lr, sp, #112
+; BE-I64-NEXT: vldmia lr, {d8, d9} @ 16-byte Reload
+; BE-I64-NEXT: vorr d0, d9, d9
+; BE-I64-NEXT: vmov.32 d11[1], r0
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: vorr d0, d8, d8
+; BE-I64-NEXT: mov r7, r1
+; BE-I64-NEXT: vmov.32 d13[0], r0
+; BE-I64-NEXT: vmov.32 d10[1], r9
+; BE-I64-NEXT: bl lrint
+; BE-I64-NEXT: add lr, sp, #8
+; BE-I64-NEXT: vmov.32 d12[0], r0
+; BE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I64-NEXT: add lr, sp, #48
+; BE-I64-NEXT: vmov.32 d17[1], r10
+; BE-I64-NEXT: vmov.32 d16[1], r11
+; BE-I64-NEXT: vorr q12, q8, q8
+; BE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; BE-I64-NEXT: add lr, sp, #152
+; BE-I64-NEXT: vmov.32 d17[1], r8
+; BE-I64-NEXT: vldmia lr, {d18, d19} @ 16-byte Reload
+; BE-I64-NEXT: add lr, sp, #24
+; BE-I64-NEXT: vmov.32 d13[1], r7
+; BE-I64-NEXT: vmov.32 d16[1], r6
+; BE-I64-NEXT: vldmia lr, {d20, d21} @ 16-byte Reload
+; BE-I64-NEXT: add lr, sp, #64
+; BE-I64-NEXT: vorr q13, q8, q8
+; BE-I64-NEXT: vmov.32 d12[1], r1
+; BE-I64-NEXT: ldr r1, [sp, #132] @ 4-byte Reload
+; BE-I64-NEXT: vrev64.32 q8, q5
+; BE-I64-NEXT: mov r0, r1
+; BE-I64-NEXT: vldmia lr, {d22, d23} @ 16-byte Reload
+; BE-I64-NEXT: vrev64.32 q9, q9
+; BE-I64-NEXT: vrev64.32 q10, q10
+; BE-I64-NEXT: vst1.64 {d16, d17}, [r0:128]!
+; BE-I64-NEXT: vst1.64 {d18, d19}, [r0:128]!
+; BE-I64-NEXT: vrev64.32 q11, q11
+; BE-I64-NEXT: vmov.32 d15[1], r4
+; BE-I64-NEXT: vst1.64 {d20, d21}, [r0:128]!
+; BE-I64-NEXT: vrev64.32 q15, q6
+; BE-I64-NEXT: vmov.32 d14[1], r5
+; BE-I64-NEXT: vrev64.32 q12, q12
+; BE-I64-NEXT: vst1.64 {d22, d23}, [r0:128]
+; BE-I64-NEXT: add r0, r1, #64
+; BE-I64-NEXT: vrev64.32 q13, q13
+; BE-I64-NEXT: vst1.64 {d30, d31}, [r0:128]!
+; BE-I64-NEXT: vst1.64 {d24, d25}, [r0:128]!
+; BE-I64-NEXT: vrev64.32 q14, q7
+; BE-I64-NEXT: vst1.64 {d26, d27}, [r0:128]!
+; BE-I64-NEXT: vst1.64 {d28, d29}, [r0:128]
+; BE-I64-NEXT: add sp, sp, #168
+; BE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: add sp, sp, #4
+; BE-I64-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+ %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f64(<16 x double> %x)
+ ret <16 x iXLen> %a
+}
+declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f64(<16 x double>)
+
+define <1 x iXLen> @lrint_v1fp128(<1 x fp128> %x) {
+; LE-I32-LABEL: lrint_v1fp128:
+; LE-I32: @ %bb.0:
+; LE-I32-NEXT: .save {r11, lr}
+; LE-I32-NEXT: push {r11, lr}
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: pop {r11, pc}
+;
+; LE-I64-LABEL: lrint_v1fp128:
+; LE-I64: @ %bb.0:
+; LE-I64-NEXT: .save {r11, lr}
+; LE-I64-NEXT: push {r11, lr}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: vmov.32 d0[0], r0
+; LE-I64-NEXT: vmov.32 d0[1], r1
+; LE-I64-NEXT: pop {r11, pc}
+;
+; BE-I32-LABEL: lrint_v1fp128:
+; BE-I32: @ %bb.0:
+; BE-I32-NEXT: .save {r11, lr}
+; BE-I32-NEXT: push {r11, lr}
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: pop {r11, pc}
+;
+; BE-I64-LABEL: lrint_v1fp128:
+; BE-I64: @ %bb.0:
+; BE-I64-NEXT: .save {r11, lr}
+; BE-I64-NEXT: push {r11, lr}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: vmov.32 d16[0], r0
+; BE-I64-NEXT: vmov.32 d16[1], r1
+; BE-I64-NEXT: vrev64.32 d0, d16
+; BE-I64-NEXT: pop {r11, pc}
+ %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1fp128(<1 x fp128> %x)
+ ret <1 x iXLen> %a
+}
+declare <1 x iXLen> @llvm.lrint.v1iXLen.v1fp128(<1 x fp128>)
+
+define <2 x iXLen> @lrint_v2fp128(<2 x fp128> %x) {
+; LE-I32-LABEL: lrint_v2fp128:
+; LE-I32: @ %bb.0:
+; LE-I32-NEXT: .save {r4, r5, r6, r7, r8, lr}
+; LE-I32-NEXT: push {r4, r5, r6, r7, r8, lr}
+; LE-I32-NEXT: mov r8, r3
+; LE-I32-NEXT: add r3, sp, #24
+; LE-I32-NEXT: mov r5, r2
+; LE-I32-NEXT: mov r6, r1
+; LE-I32-NEXT: mov r7, r0
+; LE-I32-NEXT: ldm r3, {r0, r1, r2, r3}
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: mov r4, r0
+; LE-I32-NEXT: mov r0, r7
+; LE-I32-NEXT: mov r1, r6
+; LE-I32-NEXT: mov r2, r5
+; LE-I32-NEXT: mov r3, r8
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: vmov.32 d0[0], r0
+; LE-I32-NEXT: vmov.32 d0[1], r4
+; LE-I32-NEXT: pop {r4, r5, r6, r7, r8, pc}
+;
+; LE-I64-LABEL: lrint_v2fp128:
+; LE-I64: @ %bb.0:
+; LE-I64-NEXT: .save {r4, r5, r6, r7, r8, lr}
+; LE-I64-NEXT: push {r4, r5, r6, r7, r8, lr}
+; LE-I64-NEXT: .vsave {d8, d9}
+; LE-I64-NEXT: vpush {d8, d9}
+; LE-I64-NEXT: mov r8, r3
+; LE-I64-NEXT: add r3, sp, #40
+; LE-I64-NEXT: mov r5, r2
+; LE-I64-NEXT: mov r6, r1
+; LE-I64-NEXT: mov r7, r0
+; LE-I64-NEXT: ldm r3, {r0, r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: mov r4, r1
+; LE-I64-NEXT: vmov.32 d9[0], r0
+; LE-I64-NEXT: mov r0, r7
+; LE-I64-NEXT: mov r1, r6
+; LE-I64-NEXT: mov r2, r5
+; LE-I64-NEXT: mov r3, r8
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: vmov.32 d8[0], r0
+; LE-I64-NEXT: vmov.32 d9[1], r4
+; LE-I64-NEXT: vmov.32 d8[1], r1
+; LE-I64-NEXT: vorr q0, q4, q4
+; LE-I64-NEXT: vpop {d8, d9}
+; LE-I64-NEXT: pop {r4, r5, r6, r7, r8, pc}
+;
+; BE-I32-LABEL: lrint_v2fp128:
+; BE-I32: @ %bb.0:
+; BE-I32-NEXT: .save {r4, r5, r6, r7, r8, lr}
+; BE-I32-NEXT: push {r4, r5, r6, r7, r8, lr}
+; BE-I32-NEXT: mov r8, r3
+; BE-I32-NEXT: add r3, sp, #24
+; BE-I32-NEXT: mov r5, r2
+; BE-I32-NEXT: mov r6, r1
+; BE-I32-NEXT: mov r7, r0
+; BE-I32-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: mov r4, r0
+; BE-I32-NEXT: mov r0, r7
+; BE-I32-NEXT: mov r1, r6
+; BE-I32-NEXT: mov r2, r5
+; BE-I32-NEXT: mov r3, r8
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: vmov.32 d16[0], r0
+; BE-I32-NEXT: vmov.32 d16[1], r4
+; BE-I32-NEXT: vrev64.32 d0, d16
+; BE-I32-NEXT: pop {r4, r5, r6, r7, r8, pc}
+;
+; BE-I64-LABEL: lrint_v2fp128:
+; BE-I64: @ %bb.0:
+; BE-I64-NEXT: .save {r4, r5, r6, r7, r8, lr}
+; BE-I64-NEXT: push {r4, r5, r6, r7, r8, lr}
+; BE-I64-NEXT: .vsave {d8}
+; BE-I64-NEXT: vpush {d8}
+; BE-I64-NEXT: mov r8, r3
+; BE-I64-NEXT: add r3, sp, #32
+; BE-I64-NEXT: mov r5, r2
+; BE-I64-NEXT: mov r6, r1
+; BE-I64-NEXT: mov r7, r0
+; BE-I64-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: mov r4, r1
+; BE-I64-NEXT: vmov.32 d8[0], r0
+; BE-I64-NEXT: mov r0, r7
+; BE-I64-NEXT: mov r1, r6
+; BE-I64-NEXT: mov r2, r5
+; BE-I64-NEXT: mov r3, r8
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: vmov.32 d16[0], r0
+; BE-I64-NEXT: vmov.32 d8[1], r4
+; BE-I64-NEXT: vmov.32 d16[1], r1
+; BE-I64-NEXT: vrev64.32 d1, d8
+; BE-I64-NEXT: vrev64.32 d0, d16
+; BE-I64-NEXT: vpop {d8}
+; BE-I64-NEXT: pop {r4, r5, r6, r7, r8, pc}
+ %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2fp128(<2 x fp128> %x)
+ ret <2 x iXLen> %a
+}
+declare <2 x iXLen> @llvm.lrint.v2iXLen.v2fp128(<2 x fp128>)
+
+define <4 x iXLen> @lrint_v4fp128(<4 x fp128> %x) {
+; LE-I32-LABEL: lrint_v4fp128:
+; LE-I32: @ %bb.0:
+; LE-I32-NEXT: .save {r4, lr}
+; LE-I32-NEXT: push {r4, lr}
+; LE-I32-NEXT: .vsave {d8, d9}
+; LE-I32-NEXT: vpush {d8, d9}
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: add r3, sp, #60
+; LE-I32-NEXT: ldr r12, [sp, #56]
+; LE-I32-NEXT: vmov.32 d8[0], r0
+; LE-I32-NEXT: ldm r3, {r1, r2, r3}
+; LE-I32-NEXT: mov r0, r12
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: add r3, sp, #40
+; LE-I32-NEXT: mov r4, r0
+; LE-I32-NEXT: ldm r3, {r0, r1, r2, r3}
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: add r3, sp, #28
+; LE-I32-NEXT: ldr r12, [sp, #24]
+; LE-I32-NEXT: vmov.32 d9[0], r0
+; LE-I32-NEXT: ldm r3, {r1, r2, r3}
+; LE-I32-NEXT: mov r0, r12
+; LE-I32-NEXT: vmov.32 d9[1], r4
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: vmov.32 d8[1], r0
+; LE-I32-NEXT: vorr q0, q4, q4
+; LE-I32-NEXT: vpop {d8, d9}
+; LE-I32-NEXT: pop {r4, pc}
+;
+; LE-I64-LABEL: lrint_v4fp128:
+; LE-I64: @ %bb.0:
+; LE-I64-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; LE-I64-NEXT: push {r4, r5, r6, r7, r8, r9, r10, lr}
+; LE-I64-NEXT: .vsave {d8, d9, d10, d11}
+; LE-I64-NEXT: vpush {d8, d9, d10, d11}
+; LE-I64-NEXT: mov r5, r3
+; LE-I64-NEXT: add r3, sp, #96
+; LE-I64-NEXT: mov r7, r2
+; LE-I64-NEXT: mov r6, r1
+; LE-I64-NEXT: mov r4, r0
+; LE-I64-NEXT: ldm r3, {r0, r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: mov r9, r1
+; LE-I64-NEXT: vmov.32 d9[0], r0
+; LE-I64-NEXT: mov r0, r4
+; LE-I64-NEXT: mov r1, r6
+; LE-I64-NEXT: mov r2, r7
+; LE-I64-NEXT: mov r3, r5
+; LE-I64-NEXT: ldr r8, [sp, #80]
+; LE-I64-NEXT: ldr r10, [sp, #64]
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #68
+; LE-I64-NEXT: mov r5, r1
+; LE-I64-NEXT: vmov.32 d10[0], r0
+; LE-I64-NEXT: mov r0, r10
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #84
+; LE-I64-NEXT: mov r4, r1
+; LE-I64-NEXT: vmov.32 d11[0], r0
+; LE-I64-NEXT: mov r0, r8
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: vmov.32 d8[0], r0
+; LE-I64-NEXT: vmov.32 d11[1], r4
+; LE-I64-NEXT: vmov.32 d9[1], r9
+; LE-I64-NEXT: vmov.32 d10[1], r5
+; LE-I64-NEXT: vmov.32 d8[1], r1
+; LE-I64-NEXT: vorr q0, q5, q5
+; LE-I64-NEXT: vorr q1, q4, q4
+; LE-I64-NEXT: vpop {d8, d9, d10, d11}
+; LE-I64-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, pc}
+;
+; BE-I32-LABEL: lrint_v4fp128:
+; BE-I32: @ %bb.0:
+; BE-I32-NEXT: .save {r4, lr}
+; BE-I32-NEXT: push {r4, lr}
+; BE-I32-NEXT: .vsave {d8, d9}
+; BE-I32-NEXT: vpush {d8, d9}
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: add r3, sp, #60
+; BE-I32-NEXT: ldr r12, [sp, #56]
+; BE-I32-NEXT: vmov.32 d8[0], r0
+; BE-I32-NEXT: ldm r3, {r1, r2, r3}
+; BE-I32-NEXT: mov r0, r12
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: add r3, sp, #40
+; BE-I32-NEXT: mov r4, r0
+; BE-I32-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: add r3, sp, #28
+; BE-I32-NEXT: ldr r12, [sp, #24]
+; BE-I32-NEXT: vmov.32 d9[0], r0
+; BE-I32-NEXT: ldm r3, {r1, r2, r3}
+; BE-I32-NEXT: mov r0, r12
+; BE-I32-NEXT: vmov.32 d9[1], r4
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: vmov.32 d8[1], r0
+; BE-I32-NEXT: vrev64.32 q0, q4
+; BE-I32-NEXT: vpop {d8, d9}
+; BE-I32-NEXT: pop {r4, pc}
+;
+; BE-I64-LABEL: lrint_v4fp128:
+; BE-I64: @ %bb.0:
+; BE-I64-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; BE-I64-NEXT: push {r4, r5, r6, r7, r8, r9, r10, lr}
+; BE-I64-NEXT: .vsave {d8, d9, d10}
+; BE-I64-NEXT: vpush {d8, d9, d10}
+; BE-I64-NEXT: mov r5, r3
+; BE-I64-NEXT: add r3, sp, #88
+; BE-I64-NEXT: mov r7, r2
+; BE-I64-NEXT: mov r6, r1
+; BE-I64-NEXT: mov r4, r0
+; BE-I64-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: mov r9, r1
+; BE-I64-NEXT: vmov.32 d8[0], r0
+; BE-I64-NEXT: mov r0, r4
+; BE-I64-NEXT: mov r1, r6
+; BE-I64-NEXT: mov r2, r7
+; BE-I64-NEXT: mov r3, r5
+; BE-I64-NEXT: ldr r8, [sp, #72]
+; BE-I64-NEXT: ldr r10, [sp, #56]
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #60
+; BE-I64-NEXT: mov r5, r1
+; BE-I64-NEXT: vmov.32 d9[0], r0
+; BE-I64-NEXT: mov r0, r10
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #76
+; BE-I64-NEXT: mov r4, r1
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: mov r0, r8
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: vmov.32 d16[0], r0
+; BE-I64-NEXT: vmov.32 d10[1], r4
+; BE-I64-NEXT: vmov.32 d8[1], r9
+; BE-I64-NEXT: vmov.32 d9[1], r5
+; BE-I64-NEXT: vmov.32 d16[1], r1
+; BE-I64-NEXT: vrev64.32 d1, d10
+; BE-I64-NEXT: vrev64.32 d3, d8
+; BE-I64-NEXT: vrev64.32 d0, d9
+; BE-I64-NEXT: vrev64.32 d2, d16
+; BE-I64-NEXT: vpop {d8, d9, d10}
+; BE-I64-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, pc}
+ %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4fp128(<4 x fp128> %x)
+ ret <4 x iXLen> %a
+}
+declare <4 x iXLen> @llvm.lrint.v4iXLen.v4fp128(<4 x fp128>)
+
+define <8 x iXLen> @lrint_v8fp128(<8 x fp128> %x) {
+; LE-I32-LABEL: lrint_v8fp128:
+; LE-I32: @ %bb.0:
+; LE-I32-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; LE-I32-NEXT: push {r4, r5, r6, r7, r8, r9, r10, lr}
+; LE-I32-NEXT: .vsave {d8, d9, d10, d11}
+; LE-I32-NEXT: vpush {d8, d9, d10, d11}
+; LE-I32-NEXT: mov r6, r3
+; LE-I32-NEXT: add r3, sp, #112
+; LE-I32-NEXT: mov r7, r2
+; LE-I32-NEXT: mov r4, r1
+; LE-I32-NEXT: mov r5, r0
+; LE-I32-NEXT: ldm r3, {r0, r1, r2, r3}
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: vmov.32 d8[0], r0
+; LE-I32-NEXT: mov r0, r5
+; LE-I32-NEXT: mov r1, r4
+; LE-I32-NEXT: mov r2, r7
+; LE-I32-NEXT: mov r3, r6
+; LE-I32-NEXT: ldr r8, [sp, #160]
+; LE-I32-NEXT: ldr r9, [sp, #64]
+; LE-I32-NEXT: ldr r10, [sp, #80]
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: add r3, sp, #84
+; LE-I32-NEXT: vmov.32 d10[0], r0
+; LE-I32-NEXT: mov r0, r10
+; LE-I32-NEXT: ldm r3, {r1, r2, r3}
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: ldr r6, [sp, #96]
+; LE-I32-NEXT: vmov.32 d11[0], r0
+; LE-I32-NEXT: ldr r1, [sp, #100]
+; LE-I32-NEXT: ldr r2, [sp, #104]
+; LE-I32-NEXT: ldr r3, [sp, #108]
+; LE-I32-NEXT: mov r0, r6
+; LE-I32-NEXT: ldr r4, [sp, #68]
+; LE-I32-NEXT: ldr r5, [sp, #72]
+; LE-I32-NEXT: ldr r10, [sp, #164]
+; LE-I32-NEXT: ldr r7, [sp, #168]
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: ldr r3, [sp, #76]
+; LE-I32-NEXT: vmov.32 d11[1], r0
+; LE-I32-NEXT: mov r0, r9
+; LE-I32-NEXT: mov r1, r4
+; LE-I32-NEXT: mov r2, r5
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: ldr r3, [sp, #172]
+; LE-I32-NEXT: vmov.32 d10[1], r0
+; LE-I32-NEXT: mov r0, r8
+; LE-I32-NEXT: mov r1, r10
+; LE-I32-NEXT: mov r2, r7
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: add r3, sp, #144
+; LE-I32-NEXT: mov r4, r0
+; LE-I32-NEXT: ldm r3, {r0, r1, r2, r3}
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: add r3, sp, #132
+; LE-I32-NEXT: ldr r7, [sp, #128]
+; LE-I32-NEXT: vmov.32 d9[0], r0
+; LE-I32-NEXT: ldm r3, {r1, r2, r3}
+; LE-I32-NEXT: mov r0, r7
+; LE-I32-NEXT: vmov.32 d9[1], r4
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: vmov.32 d8[1], r0
+; LE-I32-NEXT: vorr q0, q5, q5
+; LE-I32-NEXT: vorr q1, q4, q4
+; LE-I32-NEXT: vpop {d8, d9, d10, d11}
+; LE-I32-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, pc}
+;
+; LE-I64-LABEL: lrint_v8fp128:
+; LE-I64: @ %bb.0:
+; LE-I64-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-I64-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-I64-NEXT: .pad #4
+; LE-I64-NEXT: sub sp, sp, #4
+; LE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: .pad #8
+; LE-I64-NEXT: sub sp, sp, #8
+; LE-I64-NEXT: mov r11, r3
+; LE-I64-NEXT: add r3, sp, #208
+; LE-I64-NEXT: mov r10, r2
+; LE-I64-NEXT: mov r4, r1
+; LE-I64-NEXT: mov r5, r0
+; LE-I64-NEXT: ldm r3, {r0, r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r7, sp, #164
+; LE-I64-NEXT: ldr r6, [sp, #160]
+; LE-I64-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; LE-I64-NEXT: vmov.32 d9[0], r0
+; LE-I64-NEXT: ldm r7, {r1, r2, r3, r7}
+; LE-I64-NEXT: mov r0, r6
+; LE-I64-NEXT: ldr r8, [sp, #128]
+; LE-I64-NEXT: ldr r9, [sp, #144]
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #180
+; LE-I64-NEXT: str r1, [sp] @ 4-byte Spill
+; LE-I64-NEXT: vmov.32 d10[0], r0
+; LE-I64-NEXT: mov r0, r7
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #132
+; LE-I64-NEXT: mov r7, r1
+; LE-I64-NEXT: vmov.32 d11[0], r0
+; LE-I64-NEXT: mov r0, r8
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #148
+; LE-I64-NEXT: mov r8, r1
+; LE-I64-NEXT: vmov.32 d12[0], r0
+; LE-I64-NEXT: mov r0, r9
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: mov r9, r1
+; LE-I64-NEXT: vmov.32 d13[0], r0
+; LE-I64-NEXT: mov r0, r5
+; LE-I64-NEXT: mov r1, r4
+; LE-I64-NEXT: mov r2, r10
+; LE-I64-NEXT: mov r3, r11
+; LE-I64-NEXT: ldr r6, [sp, #112]
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #116
+; LE-I64-NEXT: mov r4, r1
+; LE-I64-NEXT: vmov.32 d14[0], r0
+; LE-I64-NEXT: mov r0, r6
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #196
+; LE-I64-NEXT: vmov.32 d15[0], r0
+; LE-I64-NEXT: ldr r0, [sp, #192]
+; LE-I64-NEXT: mov r5, r1
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: vmov.32 d8[0], r0
+; LE-I64-NEXT: ldr r0, [sp] @ 4-byte Reload
+; LE-I64-NEXT: vmov.32 d11[1], r7
+; LE-I64-NEXT: vmov.32 d10[1], r0
+; LE-I64-NEXT: ldr r0, [sp, #4] @ 4-byte Reload
+; LE-I64-NEXT: vmov.32 d15[1], r5
+; LE-I64-NEXT: vorr q2, q5, q5
+; LE-I64-NEXT: vmov.32 d13[1], r9
+; LE-I64-NEXT: vmov.32 d9[1], r0
+; LE-I64-NEXT: vmov.32 d14[1], r4
+; LE-I64-NEXT: vmov.32 d12[1], r8
+; LE-I64-NEXT: vorr q0, q7, q7
+; LE-I64-NEXT: vmov.32 d8[1], r1
+; LE-I64-NEXT: vorr q1, q6, q6
+; LE-I64-NEXT: vorr q3, q4, q4
+; LE-I64-NEXT: add sp, sp, #8
+; LE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: add sp, sp, #4
+; LE-I64-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+;
+; BE-I32-LABEL: lrint_v8fp128:
+; BE-I32: @ %bb.0:
+; BE-I32-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-I32-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-I32-NEXT: .pad #4
+; BE-I32-NEXT: sub sp, sp, #4
+; BE-I32-NEXT: .vsave {d8, d9, d10, d11}
+; BE-I32-NEXT: vpush {d8, d9, d10, d11}
+; BE-I32-NEXT: .pad #8
+; BE-I32-NEXT: sub sp, sp, #8
+; BE-I32-NEXT: str r3, [sp, #4] @ 4-byte Spill
+; BE-I32-NEXT: add r3, sp, #128
+; BE-I32-NEXT: mov r11, r2
+; BE-I32-NEXT: mov r6, r1
+; BE-I32-NEXT: mov r7, r0
+; BE-I32-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: add r3, sp, #100
+; BE-I32-NEXT: ldr r5, [sp, #96]
+; BE-I32-NEXT: vmov.32 d8[0], r0
+; BE-I32-NEXT: ldr r4, [sp, #160]
+; BE-I32-NEXT: ldm r3, {r1, r2, r3}
+; BE-I32-NEXT: mov r0, r5
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: add r3, sp, #164
+; BE-I32-NEXT: vmov.32 d11[0], r0
+; BE-I32-NEXT: mov r0, r4
+; BE-I32-NEXT: ldm r3, {r1, r2, r3}
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: ldr r4, [sp, #176]
+; BE-I32-NEXT: vmov.32 d9[0], r0
+; BE-I32-NEXT: ldr r1, [sp, #180]
+; BE-I32-NEXT: ldr r2, [sp, #184]
+; BE-I32-NEXT: ldr r3, [sp, #188]
+; BE-I32-NEXT: mov r0, r4
+; BE-I32-NEXT: ldr r5, [sp, #116]
+; BE-I32-NEXT: ldr r8, [sp, #120]
+; BE-I32-NEXT: ldr r10, [sp, #84]
+; BE-I32-NEXT: ldr r9, [sp, #88]
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: vmov.32 d9[1], r0
+; BE-I32-NEXT: ldr r3, [sp, #124]
+; BE-I32-NEXT: ldr r0, [sp, #112]
+; BE-I32-NEXT: mov r1, r5
+; BE-I32-NEXT: mov r2, r8
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: vmov.32 d11[1], r0
+; BE-I32-NEXT: ldr r3, [sp, #92]
+; BE-I32-NEXT: ldr r0, [sp, #80]
+; BE-I32-NEXT: mov r1, r10
+; BE-I32-NEXT: mov r2, r9
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: ldr r3, [sp, #4] @ 4-byte Reload
+; BE-I32-NEXT: mov r4, r0
+; BE-I32-NEXT: mov r0, r7
+; BE-I32-NEXT: mov r1, r6
+; BE-I32-NEXT: mov r2, r11
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: add r3, sp, #148
+; BE-I32-NEXT: ldr r7, [sp, #144]
+; BE-I32-NEXT: vmov.32 d10[0], r0
+; BE-I32-NEXT: ldm r3, {r1, r2, r3}
+; BE-I32-NEXT: mov r0, r7
+; BE-I32-NEXT: vmov.32 d10[1], r4
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: vmov.32 d8[1], r0
+; BE-I32-NEXT: vrev64.32 q0, q5
+; BE-I32-NEXT: vrev64.32 q1, q4
+; BE-I32-NEXT: add sp, sp, #8
+; BE-I32-NEXT: vpop {d8, d9, d10, d11}
+; BE-I32-NEXT: add sp, sp, #4
+; BE-I32-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+;
+; BE-I64-LABEL: lrint_v8fp128:
+; BE-I64: @ %bb.0:
+; BE-I64-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-I64-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-I64-NEXT: .pad #4
+; BE-I64-NEXT: sub sp, sp, #4
+; BE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14}
+; BE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14}
+; BE-I64-NEXT: .pad #16
+; BE-I64-NEXT: sub sp, sp, #16
+; BE-I64-NEXT: str r3, [sp, #4] @ 4-byte Spill
+; BE-I64-NEXT: add r3, sp, #208
+; BE-I64-NEXT: mov r11, r2
+; BE-I64-NEXT: mov r4, r1
+; BE-I64-NEXT: mov r5, r0
+; BE-I64-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: ldr r7, [sp, #176]
+; BE-I64-NEXT: add r3, sp, #180
+; BE-I64-NEXT: str r1, [sp, #12] @ 4-byte Spill
+; BE-I64-NEXT: vmov.32 d8[0], r0
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: mov r0, r7
+; BE-I64-NEXT: ldr r6, [sp, #128]
+; BE-I64-NEXT: ldr r8, [sp, #144]
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #132
+; BE-I64-NEXT: str r1, [sp, #8] @ 4-byte Spill
+; BE-I64-NEXT: vmov.32 d9[0], r0
+; BE-I64-NEXT: mov r0, r6
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #148
+; BE-I64-NEXT: mov r6, r1
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: mov r0, r8
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #160
+; BE-I64-NEXT: mov r9, r0
+; BE-I64-NEXT: mov r7, r1
+; BE-I64-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: ldr r3, [sp, #4] @ 4-byte Reload
+; BE-I64-NEXT: mov r8, r1
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: mov r0, r5
+; BE-I64-NEXT: mov r1, r4
+; BE-I64-NEXT: mov r2, r11
+; BE-I64-NEXT: ldr r10, [sp, #112]
+; BE-I64-NEXT: vmov.32 d12[0], r9
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #116
+; BE-I64-NEXT: mov r4, r1
+; BE-I64-NEXT: vmov.32 d13[0], r0
+; BE-I64-NEXT: mov r0, r10
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #196
+; BE-I64-NEXT: vmov.32 d14[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #192]
+; BE-I64-NEXT: mov r5, r1
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: vmov.32 d16[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #8] @ 4-byte Reload
+; BE-I64-NEXT: vmov.32 d14[1], r5
+; BE-I64-NEXT: vmov.32 d9[1], r0
+; BE-I64-NEXT: ldr r0, [sp, #12] @ 4-byte Reload
+; BE-I64-NEXT: vmov.32 d12[1], r7
+; BE-I64-NEXT: vmov.32 d8[1], r0
+; BE-I64-NEXT: vmov.32 d13[1], r4
+; BE-I64-NEXT: vmov.32 d10[1], r6
+; BE-I64-NEXT: vmov.32 d11[1], r8
+; BE-I64-NEXT: vmov.32 d16[1], r1
+; BE-I64-NEXT: vrev64.32 d1, d14
+; BE-I64-NEXT: vrev64.32 d3, d12
+; BE-I64-NEXT: vrev64.32 d5, d9
+; BE-I64-NEXT: vrev64.32 d7, d8
+; BE-I64-NEXT: vrev64.32 d0, d13
+; BE-I64-NEXT: vrev64.32 d2, d10
+; BE-I64-NEXT: vrev64.32 d4, d11
+; BE-I64-NEXT: vrev64.32 d6, d16
+; BE-I64-NEXT: add sp, sp, #16
+; BE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14}
+; BE-I64-NEXT: add sp, sp, #4
+; BE-I64-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+ %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8fp128(<8 x fp128> %x)
+ ret <8 x iXLen> %a
+}
+declare <8 x iXLen> @llvm.lrint.v8iXLen.v8fp128(<8 x fp128>)
+
+define <16 x iXLen> @lrint_v16fp128(<16 x fp128> %x) {
+; LE-I32-LABEL: lrint_v16fp128:
+; LE-I32: @ %bb.0:
+; LE-I32-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-I32-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-I32-NEXT: .pad #4
+; LE-I32-NEXT: sub sp, sp, #4
+; LE-I32-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I32-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I32-NEXT: mov r8, r3
+; LE-I32-NEXT: add r3, sp, #280
+; LE-I32-NEXT: mov r9, r2
+; LE-I32-NEXT: mov r10, r1
+; LE-I32-NEXT: mov r6, r0
+; LE-I32-NEXT: ldm r3, {r0, r1, r2, r3}
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: ldr r4, [sp, #216]
+; LE-I32-NEXT: vmov.32 d8[0], r0
+; LE-I32-NEXT: ldr r1, [sp, #220]
+; LE-I32-NEXT: ldr r2, [sp, #224]
+; LE-I32-NEXT: ldr r3, [sp, #228]
+; LE-I32-NEXT: mov r0, r4
+; LE-I32-NEXT: ldr r7, [sp, #152]
+; LE-I32-NEXT: ldr r11, [sp, #104]
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: add r3, sp, #156
+; LE-I32-NEXT: vmov.32 d10[0], r0
+; LE-I32-NEXT: mov r0, r7
+; LE-I32-NEXT: ldm r3, {r1, r2, r3}
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: ldr r7, [sp, #184]
+; LE-I32-NEXT: vmov.32 d12[0], r0
+; LE-I32-NEXT: ldr r1, [sp, #188]
+; LE-I32-NEXT: ldr r2, [sp, #192]
+; LE-I32-NEXT: ldr r3, [sp, #196]
+; LE-I32-NEXT: mov r0, r7
+; LE-I32-NEXT: ldr r4, [sp, #120]
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: add r3, sp, #124
+; LE-I32-NEXT: vmov.32 d13[0], r0
+; LE-I32-NEXT: mov r0, r4
+; LE-I32-NEXT: ldm r3, {r1, r2, r3}
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: ldr r5, [sp, #136]
+; LE-I32-NEXT: vmov.32 d15[0], r0
+; LE-I32-NEXT: ldr r1, [sp, #140]
+; LE-I32-NEXT: ldr r2, [sp, #144]
+; LE-I32-NEXT: ldr r3, [sp, #148]
+; LE-I32-NEXT: mov r0, r5
+; LE-I32-NEXT: ldr r4, [sp, #108]
+; LE-I32-NEXT: ldr r7, [sp, #112]
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: ldr r3, [sp, #116]
+; LE-I32-NEXT: vmov.32 d15[1], r0
+; LE-I32-NEXT: mov r0, r11
+; LE-I32-NEXT: mov r1, r4
+; LE-I32-NEXT: mov r2, r7
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: mov r4, r0
+; LE-I32-NEXT: mov r0, r6
+; LE-I32-NEXT: mov r1, r10
+; LE-I32-NEXT: mov r2, r9
+; LE-I32-NEXT: mov r3, r8
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: ldr r7, [sp, #200]
+; LE-I32-NEXT: vmov.32 d14[0], r0
+; LE-I32-NEXT: ldr r1, [sp, #204]
+; LE-I32-NEXT: ldr r2, [sp, #208]
+; LE-I32-NEXT: ldr r3, [sp, #212]
+; LE-I32-NEXT: mov r0, r7
+; LE-I32-NEXT: ldr r5, [sp, #172]
+; LE-I32-NEXT: vmov.32 d14[1], r4
+; LE-I32-NEXT: ldr r6, [sp, #176]
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: vmov.32 d13[1], r0
+; LE-I32-NEXT: ldr r3, [sp, #180]
+; LE-I32-NEXT: ldr r0, [sp, #168]
+; LE-I32-NEXT: mov r1, r5
+; LE-I32-NEXT: mov r2, r6
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: add r3, sp, #248
+; LE-I32-NEXT: mov r5, r0
+; LE-I32-NEXT: ldm r3, {r0, r1, r2, r3}
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: ldr r4, [sp, #264]
+; LE-I32-NEXT: vmov.32 d11[0], r0
+; LE-I32-NEXT: ldr r1, [sp, #268]
+; LE-I32-NEXT: ldr r2, [sp, #272]
+; LE-I32-NEXT: vmov.32 d12[1], r5
+; LE-I32-NEXT: ldr r3, [sp, #276]
+; LE-I32-NEXT: mov r0, r4
+; LE-I32-NEXT: ldr r6, [sp, #236]
+; LE-I32-NEXT: ldr r7, [sp, #240]
+; LE-I32-NEXT: ldr r8, [sp, #332]
+; LE-I32-NEXT: ldr r5, [sp, #336]
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: vmov.32 d11[1], r0
+; LE-I32-NEXT: ldr r3, [sp, #244]
+; LE-I32-NEXT: ldr r0, [sp, #232]
+; LE-I32-NEXT: mov r1, r6
+; LE-I32-NEXT: mov r2, r7
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: vmov.32 d10[1], r0
+; LE-I32-NEXT: ldr r3, [sp, #340]
+; LE-I32-NEXT: ldr r0, [sp, #328]
+; LE-I32-NEXT: mov r1, r8
+; LE-I32-NEXT: mov r2, r5
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: add r3, sp, #312
+; LE-I32-NEXT: mov r4, r0
+; LE-I32-NEXT: ldm r3, {r0, r1, r2, r3}
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: add r3, sp, #300
+; LE-I32-NEXT: ldr r7, [sp, #296]
+; LE-I32-NEXT: vmov.32 d9[0], r0
+; LE-I32-NEXT: ldm r3, {r1, r2, r3}
+; LE-I32-NEXT: mov r0, r7
+; LE-I32-NEXT: vmov.32 d9[1], r4
+; LE-I32-NEXT: bl lrintl
+; LE-I32-NEXT: vmov.32 d8[1], r0
+; LE-I32-NEXT: vorr q0, q7, q7
+; LE-I32-NEXT: vorr q1, q6, q6
+; LE-I32-NEXT: vorr q2, q5, q5
+; LE-I32-NEXT: vorr q3, q4, q4
+; LE-I32-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I32-NEXT: add sp, sp, #4
+; LE-I32-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+;
+; LE-I64-LABEL: lrint_v16fp128:
+; LE-I64: @ %bb.0:
+; LE-I64-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-I64-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; LE-I64-NEXT: .pad #4
+; LE-I64-NEXT: sub sp, sp, #4
+; LE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: .pad #72
+; LE-I64-NEXT: sub sp, sp, #72
+; LE-I64-NEXT: mov r6, r3
+; LE-I64-NEXT: add r3, sp, #408
+; LE-I64-NEXT: mov r7, r2
+; LE-I64-NEXT: mov r4, r0
+; LE-I64-NEXT: ldm r3, {r0, r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r5, sp, #176
+; LE-I64-NEXT: mov r10, r1
+; LE-I64-NEXT: vmov.32 d13[0], r0
+; LE-I64-NEXT: mov r0, r7
+; LE-I64-NEXT: ldm r5, {r2, r3, r5}
+; LE-I64-NEXT: mov r1, r6
+; LE-I64-NEXT: ldr r8, [sp, #232]
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #188
+; LE-I64-NEXT: mov r9, r1
+; LE-I64-NEXT: vmov.32 d8[0], r0
+; LE-I64-NEXT: mov r0, r5
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #236
+; LE-I64-NEXT: mov r11, r1
+; LE-I64-NEXT: vmov.32 d9[0], r0
+; LE-I64-NEXT: mov r0, r8
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #252
+; LE-I64-NEXT: vmov.32 d10[0], r0
+; LE-I64-NEXT: ldr r0, [sp, #248]
+; LE-I64-NEXT: mov r8, r1
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #268
+; LE-I64-NEXT: vmov.32 d11[0], r0
+; LE-I64-NEXT: ldr r0, [sp, #264]
+; LE-I64-NEXT: mov r6, r1
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #284
+; LE-I64-NEXT: vmov.32 d14[0], r0
+; LE-I64-NEXT: ldr r0, [sp, #280]
+; LE-I64-NEXT: mov r7, r1
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #316
+; LE-I64-NEXT: vmov.32 d15[0], r0
+; LE-I64-NEXT: ldr r0, [sp, #312]
+; LE-I64-NEXT: mov r5, r1
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: vmov.32 d15[1], r5
+; LE-I64-NEXT: add lr, sp, #56
+; LE-I64-NEXT: ldr r5, [sp, #300]
+; LE-I64-NEXT: vmov.32 d14[1], r7
+; LE-I64-NEXT: ldr r2, [sp, #304]
+; LE-I64-NEXT: ldr r3, [sp, #308]
+; LE-I64-NEXT: vmov.32 d11[1], r6
+; LE-I64-NEXT: ldr r6, [sp, #200]
+; LE-I64-NEXT: ldr r7, [sp, #204]
+; LE-I64-NEXT: vmov.32 d10[1], r8
+; LE-I64-NEXT: ldr r8, [sp, #344]
+; LE-I64-NEXT: vmov.32 d9[1], r11
+; LE-I64-NEXT: ldr r11, [sp, #216]
+; LE-I64-NEXT: vstmia lr, {d14, d15} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #40
+; LE-I64-NEXT: vmov.32 d17[0], r0
+; LE-I64-NEXT: ldr r0, [sp, #296]
+; LE-I64-NEXT: vmov.32 d8[1], r9
+; LE-I64-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; LE-I64-NEXT: add lr, sp, #24
+; LE-I64-NEXT: vorr q5, q8, q8
+; LE-I64-NEXT: vstmia lr, {d8, d9} @ 16-byte Spill
+; LE-I64-NEXT: vorr q4, q6, q6
+; LE-I64-NEXT: vmov.32 d11[1], r1
+; LE-I64-NEXT: mov r1, r5
+; LE-I64-NEXT: vmov.32 d9[1], r10
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: vmov.32 d10[0], r0
+; LE-I64-NEXT: ldr r2, [sp, #208]
+; LE-I64-NEXT: ldr r3, [sp, #212]
+; LE-I64-NEXT: add lr, sp, #8
+; LE-I64-NEXT: mov r9, r1
+; LE-I64-NEXT: mov r0, r6
+; LE-I64-NEXT: mov r1, r7
+; LE-I64-NEXT: vstmia lr, {d10, d11} @ 16-byte Spill
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #220
+; LE-I64-NEXT: mov r10, r1
+; LE-I64-NEXT: vmov.32 d10[0], r0
+; LE-I64-NEXT: mov r0, r11
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #348
+; LE-I64-NEXT: mov r11, r1
+; LE-I64-NEXT: vmov.32 d11[0], r0
+; LE-I64-NEXT: mov r0, r8
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #364
+; LE-I64-NEXT: vmov.32 d13[0], r0
+; LE-I64-NEXT: ldr r0, [sp, #360]
+; LE-I64-NEXT: mov r8, r1
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #380
+; LE-I64-NEXT: vmov.32 d14[0], r0
+; LE-I64-NEXT: ldr r0, [sp, #376]
+; LE-I64-NEXT: mov r5, r1
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #396
+; LE-I64-NEXT: vmov.32 d15[0], r0
+; LE-I64-NEXT: ldr r0, [sp, #392]
+; LE-I64-NEXT: mov r6, r1
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add r3, sp, #332
+; LE-I64-NEXT: vmov.32 d8[0], r0
+; LE-I64-NEXT: ldr r0, [sp, #328]
+; LE-I64-NEXT: mov r7, r1
+; LE-I64-NEXT: ldm r3, {r1, r2, r3}
+; LE-I64-NEXT: bl lrintl
+; LE-I64-NEXT: add lr, sp, #8
+; LE-I64-NEXT: vmov.32 d12[0], r0
+; LE-I64-NEXT: add r0, r4, #64
+; LE-I64-NEXT: vldmia lr, {d18, d19} @ 16-byte Reload
+; LE-I64-NEXT: add lr, sp, #24
+; LE-I64-NEXT: vmov.32 d13[1], r8
+; LE-I64-NEXT: vmov.32 d18[1], r9
+; LE-I64-NEXT: vmov.32 d15[1], r6
+; LE-I64-NEXT: vmov.32 d12[1], r1
+; LE-I64-NEXT: vmov.32 d14[1], r5
+; LE-I64-NEXT: vst1.64 {d18, d19}, [r0:128]!
+; LE-I64-NEXT: vst1.64 {d12, d13}, [r0:128]!
+; LE-I64-NEXT: vmov.32 d8[1], r7
+; LE-I64-NEXT: vst1.64 {d14, d15}, [r0:128]!
+; LE-I64-NEXT: vst1.64 {d8, d9}, [r0:128]
+; LE-I64-NEXT: vmov.32 d11[1], r11
+; LE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I64-NEXT: add lr, sp, #40
+; LE-I64-NEXT: vmov.32 d10[1], r10
+; LE-I64-NEXT: vst1.64 {d16, d17}, [r4:128]!
+; LE-I64-NEXT: vst1.64 {d10, d11}, [r4:128]!
+; LE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I64-NEXT: add lr, sp, #56
+; LE-I64-NEXT: vst1.64 {d16, d17}, [r4:128]!
+; LE-I64-NEXT: vldmia lr, {d16, d17} @ 16-byte Reload
+; LE-I64-NEXT: vst1.64 {d16, d17}, [r4:128]
+; LE-I64-NEXT: add sp, sp, #72
+; LE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; LE-I64-NEXT: add sp, sp, #4
+; LE-I64-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+;
+; BE-I32-LABEL: lrint_v16fp128:
+; BE-I32: @ %bb.0:
+; BE-I32-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-I32-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-I32-NEXT: .pad #4
+; BE-I32-NEXT: sub sp, sp, #4
+; BE-I32-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I32-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I32-NEXT: .pad #16
+; BE-I32-NEXT: sub sp, sp, #16
+; BE-I32-NEXT: stm sp, {r0, r1, r2, r3} @ 16-byte Folded Spill
+; BE-I32-NEXT: add r3, sp, #264
+; BE-I32-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: add r3, sp, #332
+; BE-I32-NEXT: ldr r7, [sp, #328]
+; BE-I32-NEXT: vmov.32 d9[0], r0
+; BE-I32-NEXT: ldr r10, [sp, #280]
+; BE-I32-NEXT: ldm r3, {r1, r2, r3}
+; BE-I32-NEXT: mov r0, r7
+; BE-I32-NEXT: ldr r8, [sp, #168]
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: ldr r5, [sp, #344]
+; BE-I32-NEXT: vmov.32 d11[0], r0
+; BE-I32-NEXT: ldr r1, [sp, #348]
+; BE-I32-NEXT: ldr r2, [sp, #352]
+; BE-I32-NEXT: ldr r3, [sp, #356]
+; BE-I32-NEXT: mov r0, r5
+; BE-I32-NEXT: ldr r7, [sp, #284]
+; BE-I32-NEXT: ldr r4, [sp, #288]
+; BE-I32-NEXT: ldr r6, [sp, #172]
+; BE-I32-NEXT: ldr r9, [sp, #176]
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: ldr r3, [sp, #292]
+; BE-I32-NEXT: vmov.32 d11[1], r0
+; BE-I32-NEXT: mov r0, r10
+; BE-I32-NEXT: mov r1, r7
+; BE-I32-NEXT: mov r2, r4
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: ldr r3, [sp, #180]
+; BE-I32-NEXT: vmov.32 d9[1], r0
+; BE-I32-NEXT: mov r0, r8
+; BE-I32-NEXT: mov r1, r6
+; BE-I32-NEXT: mov r2, r9
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: add r3, sp, #232
+; BE-I32-NEXT: mov r4, r0
+; BE-I32-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: add r3, sp, #136
+; BE-I32-NEXT: mov r6, r0
+; BE-I32-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: ldr r5, [sp, #296]
+; BE-I32-NEXT: vmov.32 d13[0], r0
+; BE-I32-NEXT: ldr r1, [sp, #300]
+; BE-I32-NEXT: ldr r2, [sp, #304]
+; BE-I32-NEXT: ldr r3, [sp, #308]
+; BE-I32-NEXT: mov r0, r5
+; BE-I32-NEXT: ldr r10, [sp, #216]
+; BE-I32-NEXT: ldr r8, [sp, #220]
+; BE-I32-NEXT: ldr r9, [sp, #152]
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: ldr r7, [sp, #248]
+; BE-I32-NEXT: vmov.32 d10[0], r0
+; BE-I32-NEXT: ldr r1, [sp, #252]
+; BE-I32-NEXT: ldr r2, [sp, #256]
+; BE-I32-NEXT: vmov.32 d8[0], r6
+; BE-I32-NEXT: ldr r3, [sp, #260]
+; BE-I32-NEXT: mov r0, r7
+; BE-I32-NEXT: ldr r5, [sp, #224]
+; BE-I32-NEXT: ldr r11, [sp, #120]
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: ldr r3, [sp, #228]
+; BE-I32-NEXT: vmov.32 d8[1], r0
+; BE-I32-NEXT: mov r0, r10
+; BE-I32-NEXT: mov r1, r8
+; BE-I32-NEXT: mov r2, r5
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: add r3, sp, #200
+; BE-I32-NEXT: mov r5, r0
+; BE-I32-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: vmov.32 d15[0], r0
+; BE-I32-NEXT: ldr r0, [sp, #184]
+; BE-I32-NEXT: ldr r1, [sp, #188]
+; BE-I32-NEXT: ldr r2, [sp, #192]
+; BE-I32-NEXT: vmov.32 d14[0], r4
+; BE-I32-NEXT: ldr r3, [sp, #196]
+; BE-I32-NEXT: vmov.32 d15[1], r5
+; BE-I32-NEXT: ldr r7, [sp, #156]
+; BE-I32-NEXT: ldr r6, [sp, #160]
+; BE-I32-NEXT: ldr r4, [sp, #124]
+; BE-I32-NEXT: ldr r5, [sp, #128]
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: ldr r3, [sp, #164]
+; BE-I32-NEXT: vmov.32 d14[1], r0
+; BE-I32-NEXT: mov r0, r9
+; BE-I32-NEXT: mov r1, r7
+; BE-I32-NEXT: mov r2, r6
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: ldr r3, [sp, #132]
+; BE-I32-NEXT: vmov.32 d13[1], r0
+; BE-I32-NEXT: mov r0, r11
+; BE-I32-NEXT: mov r1, r4
+; BE-I32-NEXT: mov r2, r5
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: mov r4, r0
+; BE-I32-NEXT: ldm sp, {r0, r1, r2, r3} @ 16-byte Folded Reload
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: add r3, sp, #316
+; BE-I32-NEXT: ldr r7, [sp, #312]
+; BE-I32-NEXT: vmov.32 d12[0], r0
+; BE-I32-NEXT: ldm r3, {r1, r2, r3}
+; BE-I32-NEXT: mov r0, r7
+; BE-I32-NEXT: vmov.32 d12[1], r4
+; BE-I32-NEXT: bl lrintl
+; BE-I32-NEXT: vmov.32 d10[1], r0
+; BE-I32-NEXT: vrev64.32 q0, q6
+; BE-I32-NEXT: vrev64.32 q1, q7
+; BE-I32-NEXT: vrev64.32 q2, q4
+; BE-I32-NEXT: vrev64.32 q3, q5
+; BE-I32-NEXT: add sp, sp, #16
+; BE-I32-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I32-NEXT: add sp, sp, #4
+; BE-I32-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+;
+; BE-I64-LABEL: lrint_v16fp128:
+; BE-I64: @ %bb.0:
+; BE-I64-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-I64-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; BE-I64-NEXT: .pad #4
+; BE-I64-NEXT: sub sp, sp, #4
+; BE-I64-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: .pad #56
+; BE-I64-NEXT: sub sp, sp, #56
+; BE-I64-NEXT: mov r5, r3
+; BE-I64-NEXT: add r3, sp, #376
+; BE-I64-NEXT: mov r6, r2
+; BE-I64-NEXT: mov r4, r0
+; BE-I64-NEXT: ldm r3, {r0, r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: ldr r7, [sp, #392]
+; BE-I64-NEXT: add r3, sp, #396
+; BE-I64-NEXT: mov r9, r1
+; BE-I64-NEXT: vmov.32 d8[0], r0
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: mov r0, r7
+; BE-I64-NEXT: ldr r11, [sp, #168]
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: ldr r2, [sp, #160]
+; BE-I64-NEXT: mov r10, r1
+; BE-I64-NEXT: ldr r3, [sp, #164]
+; BE-I64-NEXT: vmov.32 d9[0], r0
+; BE-I64-NEXT: mov r0, r6
+; BE-I64-NEXT: mov r1, r5
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #172
+; BE-I64-NEXT: mov r8, r1
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: mov r0, r11
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #220
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #216]
+; BE-I64-NEXT: mov r11, r1
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #236
+; BE-I64-NEXT: vmov.32 d12[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #232]
+; BE-I64-NEXT: mov r6, r1
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #252
+; BE-I64-NEXT: vmov.32 d13[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #248]
+; BE-I64-NEXT: mov r7, r1
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #268
+; BE-I64-NEXT: vmov.32 d14[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #264]
+; BE-I64-NEXT: mov r5, r1
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: vmov.32 d15[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #280]
+; BE-I64-NEXT: ldr r2, [sp, #288]
+; BE-I64-NEXT: vmov.32 d13[1], r7
+; BE-I64-NEXT: ldr r7, [sp, #284]
+; BE-I64-NEXT: ldr r3, [sp, #292]
+; BE-I64-NEXT: vmov.32 d14[1], r5
+; BE-I64-NEXT: ldr r5, [sp, #328]
+; BE-I64-NEXT: vmov.32 d12[1], r6
+; BE-I64-NEXT: ldr r6, [sp, #300]
+; BE-I64-NEXT: vmov.32 d10[1], r8
+; BE-I64-NEXT: ldr r8, [sp, #184]
+; BE-I64-NEXT: vmov.32 d11[1], r11
+; BE-I64-NEXT: vmov.32 d9[1], r10
+; BE-I64-NEXT: vmov.32 d8[1], r9
+; BE-I64-NEXT: vmov.32 d15[1], r1
+; BE-I64-NEXT: mov r1, r7
+; BE-I64-NEXT: vstr d14, [sp, #48] @ 8-byte Spill
+; BE-I64-NEXT: vstr d13, [sp, #40] @ 8-byte Spill
+; BE-I64-NEXT: vstr d12, [sp, #32] @ 8-byte Spill
+; BE-I64-NEXT: vstr d11, [sp, #24] @ 8-byte Spill
+; BE-I64-NEXT: vstr d10, [sp, #16] @ 8-byte Spill
+; BE-I64-NEXT: vstr d9, [sp, #8] @ 8-byte Spill
+; BE-I64-NEXT: vstr d8, [sp] @ 8-byte Spill
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: mov r10, r1
+; BE-I64-NEXT: ldr r1, [sp, #296]
+; BE-I64-NEXT: ldr r2, [sp, #304]
+; BE-I64-NEXT: vmov.32 d8[0], r0
+; BE-I64-NEXT: ldr r3, [sp, #308]
+; BE-I64-NEXT: mov r0, r1
+; BE-I64-NEXT: mov r1, r6
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #332
+; BE-I64-NEXT: mov r11, r1
+; BE-I64-NEXT: vmov.32 d9[0], r0
+; BE-I64-NEXT: mov r0, r5
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #188
+; BE-I64-NEXT: mov r7, r1
+; BE-I64-NEXT: vmov.32 d10[0], r0
+; BE-I64-NEXT: mov r0, r8
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #204
+; BE-I64-NEXT: vmov.32 d11[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #200]
+; BE-I64-NEXT: mov r8, r1
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #348
+; BE-I64-NEXT: vmov.32 d12[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #344]
+; BE-I64-NEXT: mov r5, r1
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #364
+; BE-I64-NEXT: vmov.32 d13[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #360]
+; BE-I64-NEXT: mov r9, r1
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: add r3, sp, #316
+; BE-I64-NEXT: vmov.32 d14[0], r0
+; BE-I64-NEXT: ldr r0, [sp, #312]
+; BE-I64-NEXT: mov r6, r1
+; BE-I64-NEXT: ldm r3, {r1, r2, r3}
+; BE-I64-NEXT: bl lrintl
+; BE-I64-NEXT: vldr d18, [sp, #48] @ 8-byte Reload
+; BE-I64-NEXT: vrev64.32 d17, d15
+; BE-I64-NEXT: vrev64.32 d16, d18
+; BE-I64-NEXT: vldr d18, [sp, #40] @ 8-byte Reload
+; BE-I64-NEXT: vmov.32 d24[0], r0
+; BE-I64-NEXT: add r0, r4, #64
+; BE-I64-NEXT: vldr d20, [sp, #32] @ 8-byte Reload
+; BE-I64-NEXT: vrev64.32 d19, d18
+; BE-I64-NEXT: vmov.32 d9[1], r11
+; BE-I64-NEXT: vmov.32 d10[1], r7
+; BE-I64-NEXT: vrev64.32 d18, d20
+; BE-I64-NEXT: vldr d20, [sp, #24] @ 8-byte Reload
+; BE-I64-NEXT: vmov.32 d8[1], r10
+; BE-I64-NEXT: vmov.32 d14[1], r6
+; BE-I64-NEXT: vmov.32 d24[1], r1
+; BE-I64-NEXT: vldr d22, [sp, #16] @ 8-byte Reload
+; BE-I64-NEXT: vrev64.32 d21, d20
+; BE-I64-NEXT: vrev64.32 d1, d9
+; BE-I64-NEXT: vmov.32 d13[1], r9
+; BE-I64-NEXT: vrev64.32 d31, d10
+; BE-I64-NEXT: vrev64.32 d20, d22
+; BE-I64-NEXT: vldr d22, [sp, #8] @ 8-byte Reload
+; BE-I64-NEXT: vrev64.32 d0, d8
+; BE-I64-NEXT: vrev64.32 d29, d14
+; BE-I64-NEXT: vmov.32 d12[1], r5
+; BE-I64-NEXT: vrev64.32 d30, d24
+; BE-I64-NEXT: vrev64.32 d27, d22
+; BE-I64-NEXT: vldr d22, [sp] @ 8-byte Reload
+; BE-I64-NEXT: vst1.64 {d0, d1}, [r0:128]!
+; BE-I64-NEXT: vmov.32 d11[1], r8
+; BE-I64-NEXT: vrev64.32 d28, d13
+; BE-I64-NEXT: vst1.64 {d30, d31}, [r0:128]!
+; BE-I64-NEXT: vrev64.32 d26, d22
+; BE-I64-NEXT: vrev64.32 d23, d12
+; BE-I64-NEXT: vst1.64 {d28, d29}, [r0:128]!
+; BE-I64-NEXT: vrev64.32 d22, d11
+; BE-I64-NEXT: vst1.64 {d26, d27}, [r0:128]
+; BE-I64-NEXT: vst1.64 {d20, d21}, [r4:128]!
+; BE-I64-NEXT: vst1.64 {d22, d23}, [r4:128]!
+; BE-I64-NEXT: vst1.64 {d18, d19}, [r4:128]!
+; BE-I64-NEXT: vst1.64 {d16, d17}, [r4:128]
+; BE-I64-NEXT: add sp, sp, #56
+; BE-I64-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; BE-I64-NEXT: add sp, sp, #4
+; BE-I64-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+ %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16fp128(<16 x fp128> %x)
+ ret <16 x iXLen> %a
+}
+declare <16 x iXLen> @llvm.lrint.v16iXLen.v16fp128(<16 x fp128>)
diff --git a/llvm/test/CodeGen/ARM/vtrn.ll b/llvm/test/CodeGen/ARM/vtrn.ll
index 136fec3..6377469 100644
--- a/llvm/test/CodeGen/ARM/vtrn.ll
+++ b/llvm/test/CodeGen/ARM/vtrn.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
define <8 x i8> @vtrni8(ptr %A, ptr %B) nounwind {
@@ -20,11 +21,11 @@ define <8 x i8> @vtrni8(ptr %A, ptr %B) nounwind {
define <16 x i8> @vtrni8_Qres(ptr %A, ptr %B) nounwind {
; CHECK-LABEL: vtrni8_Qres:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1]
-; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0]
-; CHECK-NEXT: vtrn.8 [[LDR0]], [[LDR1]]
-; CHECK-NEXT: vmov r0, r1, [[LDR0]]
-; CHECK-NEXT: vmov r2, r3, [[LDR1]]
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vtrn.8 d17, d16
+; CHECK-NEXT: vmov r0, r1, d17
+; CHECK-NEXT: vmov r2, r3, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i8>, ptr %A
%tmp2 = load <8 x i8>, ptr %B
@@ -52,11 +53,11 @@ define <4 x i16> @vtrni16(ptr %A, ptr %B) nounwind {
define <8 x i16> @vtrni16_Qres(ptr %A, ptr %B) nounwind {
; CHECK-LABEL: vtrni16_Qres:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1]
-; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0]
-; CHECK-NEXT: vtrn.16 [[LDR0]], [[LDR1]]
-; CHECK-NEXT: vmov r0, r1, [[LDR0]]
-; CHECK-NEXT: vmov r2, r3, [[LDR1]]
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vtrn.16 d17, d16
+; CHECK-NEXT: vmov r0, r1, d17
+; CHECK-NEXT: vmov r2, r3, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
@@ -84,11 +85,11 @@ define <2 x i32> @vtrni32(ptr %A, ptr %B) nounwind {
define <4 x i32> @vtrni32_Qres(ptr %A, ptr %B) nounwind {
; CHECK-LABEL: vtrni32_Qres:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1]
-; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0]
-; CHECK-NEXT: vtrn.32 [[LDR0]], [[LDR1]]
-; CHECK-NEXT: vmov r0, r1, [[LDR0]]
-; CHECK-NEXT: vmov r2, r3, [[LDR1]]
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vtrn.32 d17, d16
+; CHECK-NEXT: vmov r0, r1, d17
+; CHECK-NEXT: vmov r2, r3, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <2 x i32>, ptr %A
%tmp2 = load <2 x i32>, ptr %B
@@ -116,11 +117,11 @@ define <2 x float> @vtrnf(ptr %A, ptr %B) nounwind {
define <4 x float> @vtrnf_Qres(ptr %A, ptr %B) nounwind {
; CHECK-LABEL: vtrnf_Qres:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1]
-; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0]
-; CHECK-NEXT: vtrn.32 [[LDR0]], [[LDR1]]
-; CHECK-NEXT: vmov r0, r1, [[LDR0]]
-; CHECK-NEXT: vmov r2, r3, [[LDR1]]
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vtrn.32 d17, d16
+; CHECK-NEXT: vmov r0, r1, d17
+; CHECK-NEXT: vmov r2, r3, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <2 x float>, ptr %A
%tmp2 = load <2 x float>, ptr %B
@@ -281,11 +282,11 @@ define <8 x i8> @vtrni8_undef(ptr %A, ptr %B) nounwind {
define <16 x i8> @vtrni8_undef_Qres(ptr %A, ptr %B) nounwind {
; CHECK-LABEL: vtrni8_undef_Qres:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1]
-; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0]
-; CHECK-NEXT: vtrn.8 [[LDR0]], [[LDR1]]
-; CHECK-NEXT: vmov r0, r1, [[LDR0]]
-; CHECK-NEXT: vmov r2, r3, [[LDR1]]
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vtrn.8 d17, d16
+; CHECK-NEXT: vmov r0, r1, d17
+; CHECK-NEXT: vmov r2, r3, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i8>, ptr %A
%tmp2 = load <8 x i8>, ptr %B
@@ -327,9 +328,15 @@ define <16 x i16> @vtrnQi16_undef_QQres(ptr %A, ptr %B) nounwind {
}
define <8 x i16> @vtrn_lower_shufflemask_undef(ptr %A, ptr %B) {
+; CHECK-LABEL: vtrn_lower_shufflemask_undef:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vtrn.16 d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d16
+; CHECK-NEXT: mov pc, lr
entry:
- ; CHECK-LABEL: vtrn_lower_shufflemask_undef
- ; CHECK: vtrn
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
%0 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 5, i32 3, i32 7>
@@ -340,12 +347,26 @@ entry:
; values do modify the type. However, we get different input types, as some of
; them get truncated from i32 to i8 (from comparing cmp0 with cmp1) and some of
; them get truncated from i16 to i8 (from comparing cmp2 with cmp3).
-define <8 x i8> @vtrn_mismatched_builvector0(<8 x i8> %tr0, <8 x i8> %tr1,
- <4 x i32> %cmp0, <4 x i32> %cmp1,
- <4 x i16> %cmp2, <4 x i16> %cmp3) {
- ; CHECK-LABEL: vtrn_mismatched_builvector0:
- ; CHECK: vmovn.i32
- ; CHECK: vbsl
+define <8 x i8> @vtrn_mismatched_builvector0(<8 x i8> %tr0, <8 x i8> %tr1, <4 x i32> %cmp0, <4 x i32> %cmp1, <4 x i16> %cmp2, <4 x i16> %cmp3) {
+; CHECK-LABEL: vtrn_mismatched_builvector0:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: add r12, sp, #16
+; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT: vcgt.u32 q8, q9, q8
+; CHECK-NEXT: vldr d20, [sp, #32]
+; CHECK-NEXT: vldr d18, [sp, #40]
+; CHECK-NEXT: vcgt.u16 d18, d18, d20
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vtrn.8 d16, d18
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vshl.i8 d16, d16, #7
+; CHECK-NEXT: vshr.s8 d16, d16, #7
+; CHECK-NEXT: vbsl d16, d18, d17
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%c0 = icmp ult <4 x i32> %cmp0, %cmp1
%c1 = icmp ult <4 x i16> %cmp2, %cmp3
%c = shufflevector <4 x i1> %c0, <4 x i1> %c1, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
@@ -356,12 +377,30 @@ define <8 x i8> @vtrn_mismatched_builvector0(<8 x i8> %tr0, <8 x i8> %tr1,
; Here we get a build_vector node, where half the incoming extract_element
; values do not modify the type (the values form cmp2), but half of them do
; (from the icmp operation).
-define <8 x i8> @vtrn_mismatched_builvector1(<8 x i8> %tr0, <8 x i8> %tr1,
- <4 x i32> %cmp0, <4 x i32> %cmp1, ptr %cmp2_ptr) {
- ; CHECK-LABEL: vtrn_mismatched_builvector1:
- ; We need to extend the 4 x i8 to 4 x i16 in order to perform the vtrn
- ; CHECK: vmovl
- ; CHECK: vbsl
+; We need to extend the 4 x i8 to 4 x i16 in order to perform the vtrn
+define <8 x i8> @vtrn_mismatched_builvector1(<8 x i8> %tr0, <8 x i8> %tr1, <4 x i32> %cmp0, <4 x i32> %cmp1, ptr %cmp2_ptr) {
+; CHECK-LABEL: vtrn_mismatched_builvector1:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: add r12, sp, #8
+; CHECK-NEXT: add lr, sp, #24
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: ldr r12, [sp, #40]
+; CHECK-NEXT: vld1.64 {d18, d19}, [lr]
+; CHECK-NEXT: vcgt.u32 q8, q9, q8
+; CHECK-NEXT: vld1.32 {d18[0]}, [r12:32]
+; CHECK-NEXT: vmovl.u8 q9, d18
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vtrn.8 d16, d18
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vshl.i8 d16, d16, #7
+; CHECK-NEXT: vshr.s8 d16, d16, #7
+; CHECK-NEXT: vbsl d16, d18, d17
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: pop {r11, lr}
+; CHECK-NEXT: mov pc, lr
%cmp2_load = load <4 x i8>, ptr %cmp2_ptr, align 4
%cmp2 = trunc <4 x i8> %cmp2_load to <4 x i1>
%c0 = icmp ult <4 x i32> %cmp0, %cmp1
@@ -373,15 +412,15 @@ define <8 x i8> @vtrn_mismatched_builvector1(<8 x i8> %tr0, <8 x i8> %tr1,
; The shuffle mask is half a vtrn; we duplicate the half to produce the
; full result.
define void @lower_twice_no_vtrn(ptr %A, ptr %B, ptr %C) {
+; CHECK-LABEL: lower_twice_no_vtrn:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d18, [r0]
+; CHECK-NEXT: vtrn.16 d18, d16
+; CHECK-NEXT: vorr d17, d16, d16
+; CHECK-NEXT: vst1.64 {d16, d17}, [r2]
+; CHECK-NEXT: mov pc, lr
entry:
- ; CHECK-LABEL: lower_twice_no_vtrn:
- ; CHECK: @ %bb.0:
- ; CHECK-NEXT: vldr d16, [r1]
- ; CHECK-NEXT: vldr d18, [r0]
- ; CHECK-NEXT: vtrn.16 d18, d16
- ; CHECK-NEXT: vorr d17, d16, d16
- ; CHECK-NEXT: vst1.64 {d16, d17}, [r2]
- ; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
%0 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 undef, i32 5, i32 3, i32 7, i32 1, i32 5, i32 3, i32 7>
@@ -392,18 +431,49 @@ entry:
; The shuffle mask is half a vtrn; we duplicate the half to produce the
; full result.
define void @upper_twice_no_vtrn(ptr %A, ptr %B, ptr %C) {
+; CHECK-LABEL: upper_twice_no_vtrn:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr d16, [r1]
+; CHECK-NEXT: vldr d18, [r0]
+; CHECK-NEXT: vtrn.16 d18, d16
+; CHECK-NEXT: vorr d19, d18, d18
+; CHECK-NEXT: vst1.64 {d18, d19}, [r2]
+; CHECK-NEXT: mov pc, lr
entry:
- ; CHECK-LABEL: upper_twice_no_vtrn:
- ; CHECK: @ %bb.0:
- ; CHECK-NEXT: vldr d16, [r1]
- ; CHECK-NEXT: vldr d18, [r0]
- ; CHECK-NEXT: vtrn.16 d18, d16
- ; CHECK-NEXT: vorr d19, d18, d18
- ; CHECK-NEXT: vst1.64 {d18, d19}, [r2]
- ; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i16>, ptr %A
%tmp2 = load <4 x i16>, ptr %B
%0 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 0, i32 undef, i32 2, i32 6, i32 0, i32 4, i32 2, i32 6>
store <8 x i16> %0, ptr %C
ret void
}
+
+define void @test_15xi16(ptr %next.gep, ptr %next.gep13) {
+; CHECK-LABEL: test_15xi16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: add r2, r0, #2
+; CHECK-NEXT: add r3, r0, #6
+; CHECK-NEXT: vld1.16 {d16, d17}, [r2]!
+; CHECK-NEXT: vld1.16 {d18}, [r2]!
+; CHECK-NEXT: vld1.16 {d20, d21}, [r3]!
+; CHECK-NEXT: ldr r2, [r2]
+; CHECK-NEXT: vld1.16 {d22}, [r3]!
+; CHECK-NEXT: vmov.16 d19[0], r2
+; CHECK-NEXT: ldr r3, [r3]
+; CHECK-NEXT: add r2, r0, #30
+; CHECK-NEXT: add r0, r0, #34
+; CHECK-NEXT: vmov.16 d19[1], r3
+; CHECK-NEXT: vld1.16 {d19[2]}, [r2:16]
+; CHECK-NEXT: vtrn.16 q8, q10
+; CHECK-NEXT: vld1.16 {d19[3]}, [r0:16]
+; CHECK-NEXT: vtrn.16 d18, d22
+; CHECK-NEXT: vst1.16 {d16, d17}, [r1]!
+; CHECK-NEXT: vst1.16 {d18, d19}, [r1]
+; CHECK-NEXT: mov pc, lr
+ %a = getelementptr inbounds nuw i8, ptr %next.gep, i32 2
+ %b = load <15 x i16>, ptr %a, align 2
+ %c = getelementptr inbounds nuw i8, ptr %next.gep, i32 6
+ %d = load <15 x i16>, ptr %c, align 2
+ %interleaved.vec = shufflevector <15 x i16> %b, <15 x i16> %d, <16 x i32> <i32 0, i32 15, i32 2, i32 17, i32 4, i32 19, i32 6, i32 21, i32 8, i32 23, i32 10, i32 25, i32 12, i32 27, i32 14, i32 29>
+ store <16 x i16> %interleaved.vec, ptr %next.gep13, align 2
+ ret void
+}
diff --git a/llvm/test/CodeGen/ARM/vuzp.ll b/llvm/test/CodeGen/ARM/vuzp.ll
index 7e1dfba..d24dadc 100644
--- a/llvm/test/CodeGen/ARM/vuzp.ll
+++ b/llvm/test/CodeGen/ARM/vuzp.ll
@@ -535,3 +535,59 @@ define %struct.uint8x8x2_t @vuzp_extract_subvector(<16 x i8> %t) #0 {
%.fca.0.1.insert = insertvalue %struct.uint8x8x2_t %.fca.0.0.insert, <8 x i8> %vuzp1.i, 0, 1
ret %struct.uint8x8x2_t %.fca.0.1.insert
}
+
+define void @test_15xi16(ptr %next.gep, ptr %next.gep13) {
+; CHECK-LABEL: test_15xi16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, r5, r6, lr}
+; CHECK-NEXT: push {r4, r5, r6, lr}
+; CHECK-NEXT: add r2, r0, #2
+; CHECK-NEXT: add r3, r0, #6
+; CHECK-NEXT: vld1.16 {d20, d21}, [r2]!
+; CHECK-NEXT: vld1.16 {d16}, [r2]!
+; CHECK-NEXT: vmov.u16 r12, d16[0]
+; CHECK-NEXT: ldr r2, [r2]
+; CHECK-NEXT: vmov.u16 r4, d20[0]
+; CHECK-NEXT: vld1.16 {d22, d23}, [r3]!
+; CHECK-NEXT: vld1.16 {d24}, [r3]!
+; CHECK-NEXT: vmov.u16 lr, d16[2]
+; CHECK-NEXT: vmov.u16 r5, d22[0]
+; CHECK-NEXT: vmov.u16 r6, d21[0]
+; CHECK-NEXT: vmov.16 d17[0], r12
+; CHECK-NEXT: vmov.16 d16[0], r4
+; CHECK-NEXT: vmov.u16 r4, d24[0]
+; CHECK-NEXT: vmov.u16 r12, d24[2]
+; CHECK-NEXT: vmov.16 d17[1], lr
+; CHECK-NEXT: vmov.16 d18[0], r5
+; CHECK-NEXT: vmov.u16 r5, d20[2]
+; CHECK-NEXT: vmov.u16 lr, d23[0]
+; CHECK-NEXT: vmov.16 d19[0], r4
+; CHECK-NEXT: vmov.u16 r4, d22[2]
+; CHECK-NEXT: vmov.16 d16[1], r5
+; CHECK-NEXT: vmov.u16 r5, d21[2]
+; CHECK-NEXT: vmov.16 d17[2], r2
+; CHECK-NEXT: ldr r2, [r3]
+; CHECK-NEXT: vmov.16 d16[2], r6
+; CHECK-NEXT: vmov.16 d18[1], r4
+; CHECK-NEXT: vmov.u16 r4, d23[2]
+; CHECK-NEXT: vmov.16 d19[1], r12
+; CHECK-NEXT: vmov.16 d18[2], lr
+; CHECK-NEXT: vmov.16 d19[2], r2
+; CHECK-NEXT: add r2, r0, #30
+; CHECK-NEXT: add r0, r0, #34
+; CHECK-NEXT: vld1.16 {d17[3]}, [r2:16]
+; CHECK-NEXT: vmov.16 d16[3], r5
+; CHECK-NEXT: vmov.16 d18[3], r4
+; CHECK-NEXT: vld1.16 {d19[3]}, [r0:16]
+; CHECK-NEXT: vst1.16 {d16, d17}, [r1]!
+; CHECK-NEXT: vst1.16 {d18, d19}, [r1]
+; CHECK-NEXT: pop {r4, r5, r6, lr}
+; CHECK-NEXT: mov pc, lr
+ %a = getelementptr inbounds nuw i8, ptr %next.gep, i32 2
+ %b = load <15 x i16>, ptr %a, align 2
+ %c = getelementptr inbounds nuw i8, ptr %next.gep, i32 6
+ %d = load <15 x i16>, ptr %c, align 2
+ %interleaved.vec = shufflevector <15 x i16> %b, <15 x i16> %d, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29>
+ store <16 x i16> %interleaved.vec, ptr %next.gep13, align 2
+ ret void
+}
diff --git a/llvm/test/CodeGen/ARM/vzip.ll b/llvm/test/CodeGen/ARM/vzip.ll
index dda774a..ce40a2e 100644
--- a/llvm/test/CodeGen/ARM/vzip.ll
+++ b/llvm/test/CodeGen/ARM/vzip.ll
@@ -381,3 +381,22 @@ entry:
%vzip.i = shufflevector <8 x i8> %lane, <8 x i8> %lane3, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
ret <8 x i8> %vzip.i
}
+
+define <16 x i16> @test_15xi16(ptr %next.gep, ptr %next.gep13) {
+; CHECK-LABEL: test_15xi16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: add r1, r1, #2
+; CHECK-NEXT: mov r2, #4
+; CHECK-NEXT: vld1.16 {d16, d17}, [r1], r2
+; CHECK-NEXT: vld1.16 {d18, d19}, [r1]
+; CHECK-NEXT: vzip.16 q8, q9
+; CHECK-NEXT: vst1.16 {d16, d17}, [r0:128]!
+; CHECK-NEXT: vst1.64 {d18, d19}, [r0:128]
+; CHECK-NEXT: mov pc, lr
+ %a = getelementptr inbounds nuw i8, ptr %next.gep, i32 2
+ %b = load <15 x i16>, ptr %a, align 2
+ %c = getelementptr inbounds nuw i8, ptr %next.gep, i32 6
+ %d = load <15 x i16>, ptr %c, align 2
+ %interleaved.vec = shufflevector <15 x i16> %b, <15 x i16> %d, <16 x i32> <i32 0, i32 15, i32 1, i32 16, i32 2, i32 17, i32 3, i32 18, i32 4, i32 19, i32 5, i32 20, i32 6, i32 21, i32 7, i32 22>
+ ret <16 x i16> %interleaved.vec
+}