aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/PowerPC
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/PowerPC')
-rw-r--r--llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessNoProfileData.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessProfileData.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir12
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll24
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-nest-param.ll11
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-trampoline.ll22
-rw-r--r--llvm/test/CodeGen/PowerPC/alignlongjumptest.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/block-placement-1.mir8
-rw-r--r--llvm/test/CodeGen/PowerPC/block-placement.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/builtins-bcd-format-conversion.ll40
-rw-r--r--llvm/test/CodeGen/PowerPC/check-zero-vector.ll309
-rw-r--r--llvm/test/CodeGen/PowerPC/collapse-rotates.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir28
-rw-r--r--llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir80
-rw-r--r--llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir356
-rw-r--r--llvm/test/CodeGen/PowerPC/ctrloop-do-not-duplicate-mi.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/dmr-copy.ll245
-rw-r--r--llvm/test/CodeGen/PowerPC/dmr-enable.ll63
-rw-r--r--llvm/test/CodeGen/PowerPC/fp_to_uint_endian.ll17
-rw-r--r--llvm/test/CodeGen/PowerPC/livevars-crash2.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/llrint-conv.ll50
-rw-r--r--llvm/test/CodeGen/PowerPC/lower-scalar-mass-afn.ll160
-rw-r--r--llvm/test/CodeGen/PowerPC/lrint-conv.ll32
-rw-r--r--llvm/test/CodeGen/PowerPC/milicode32.ll (renamed from llvm/test/CodeGen/PowerPC/memintr32.ll)36
-rw-r--r--llvm/test/CodeGen/PowerPC/milicode64.ll (renamed from llvm/test/CodeGen/PowerPC/memintr64.ll)49
-rw-r--r--llvm/test/CodeGen/PowerPC/mtvsrbmi.ll87
-rw-r--r--llvm/test/CodeGen/PowerPC/nofpclass.ll15
-rw-r--r--llvm/test/CodeGen/PowerPC/p10-spill-crlt.ll16
-rw-r--r--llvm/test/CodeGen/PowerPC/peephole-phi-acc.mir16
-rw-r--r--llvm/test/CodeGen/PowerPC/peephole-replaceInstr-after-eliminate-extsw.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/phi-eliminate.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/pow-025-075-intrinsic-scalar-mass-fast.ll2
-rw-r--r--llvm/test/CodeGen/PowerPC/pow-025-075-nointrinsic-scalar-mass-fast.ll2
-rw-r--r--llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll1007
-rw-r--r--llvm/test/CodeGen/PowerPC/ppc_reduce_cr_logicals.ll88
-rw-r--r--llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/remove-implicit-use.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/remove-redundant-li-skip-imp-kill.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/remove-self-copies.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/rlwinm_rldicl_to_andi.mir24
-rw-r--r--llvm/test/CodeGen/PowerPC/schedule-addi-load.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/setcr_bc.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/setcr_bc2.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/setcr_bc3.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/stack-protector-target.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/swaps-le-1.ll40
-rw-r--r--llvm/test/CodeGen/PowerPC/tls-picgot.ll31
-rw-r--r--llvm/test/CodeGen/PowerPC/tls_get_addr_fence1.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/tls_get_addr_fence2.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/two-address-crash.mir4
-rw-r--r--llvm/test/CodeGen/PowerPC/vector-llrint.ll1252
-rw-r--r--llvm/test/CodeGen/PowerPC/vector-lrint.ll1766
-rw-r--r--llvm/test/CodeGen/PowerPC/vsro-vsr-vsrq-dag-combine.ll337
-rw-r--r--llvm/test/CodeGen/PowerPC/xxeval-vselect-x-b.ll50
-rw-r--r--llvm/test/CodeGen/PowerPC/xxeval-vselect-x-c.ll50
55 files changed, 3586 insertions, 2823 deletions
diff --git a/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessNoProfileData.mir b/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessNoProfileData.mir
index d3d3b19..a23f0af 100644
--- a/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessNoProfileData.mir
+++ b/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessNoProfileData.mir
@@ -123,8 +123,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
constants: []
diff --git a/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessProfileData.mir b/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessProfileData.mir
index e8ad54b..7d80c02 100644
--- a/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessProfileData.mir
+++ b/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessProfileData.mir
@@ -168,8 +168,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
constants: []
diff --git a/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir b/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir
index 41e2124..60a399d 100644
--- a/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir
+++ b/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir
@@ -1,6 +1,12 @@
# RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu -start-after \
# RUN: virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \
# RUN: -o - | FileCheck %s
+# RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu -start-after \
+# RUN: virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \
+# RUN: -o - | FileCheck %s
+# RUN: llc -mcpu=pwr10 -mtriple=powerpc64le-unknown-linux-gnu -start-after \
+# RUN: virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \
+# RUN: -o - | FileCheck %s
--- |
; ModuleID = 'a.ll'
@@ -30,7 +36,7 @@
; Function Attrs: nounwind
declare void @llvm.stackprotector(ptr, ptr) #1
- attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+htm,+power8-vector,+vsx,-power9-vector" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind }
!llvm.ident = !{!0}
@@ -71,8 +77,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack:
- { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 4,
diff --git a/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll b/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll
index 9ffb4fd..258ddf6 100644
--- a/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll
@@ -37,9 +37,9 @@ define signext i8 @test_chars(i8 signext %c1, i8 signext %c2, i8 signext %c3, i8
; 32BIT: bb.0.entry:
; 32BIT-NEXT: liveins: $r3, $r4, $r5, $r6
; 32BIT-NEXT: {{ $}}
- ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r4
- ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r5
- ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r6
+ ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r4
+ ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r5
+ ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r6
; 32BIT-NEXT: renamable $r3 = EXTSB killed renamable $r3
; 32BIT-NEXT: BLR implicit $lr, implicit $rm, implicit $r3
;
@@ -47,9 +47,9 @@ define signext i8 @test_chars(i8 signext %c1, i8 signext %c2, i8 signext %c3, i8
; 64BIT: bb.0.entry:
; 64BIT-NEXT: liveins: $x3, $x4, $x5, $x6
; 64BIT-NEXT: {{ $}}
- ; 64BIT-NEXT: renamable $r3 = ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3
- ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r5, implicit killed $x5
- ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3
+ ; 64BIT-NEXT: renamable $r3 = nsw ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3
+ ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r5, implicit killed $x5
+ ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3
; 64BIT-NEXT: renamable $x3 = EXTSB8 killed renamable $x3
; 64BIT-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3
entry:
@@ -96,9 +96,9 @@ define signext i8 @test_chars_mix(i8 signext %c1, i8 zeroext %c2, i8 zeroext %c3
; 32BIT: bb.0.entry:
; 32BIT-NEXT: liveins: $r3, $r4, $r5, $r6
; 32BIT-NEXT: {{ $}}
- ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r4
- ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r5
- ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r6
+ ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r4
+ ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r5
+ ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r6
; 32BIT-NEXT: renamable $r3 = EXTSB killed renamable $r3
; 32BIT-NEXT: BLR implicit $lr, implicit $rm, implicit $r3
;
@@ -106,9 +106,9 @@ define signext i8 @test_chars_mix(i8 signext %c1, i8 zeroext %c2, i8 zeroext %c3
; 64BIT: bb.0.entry:
; 64BIT-NEXT: liveins: $x3, $x4, $x5, $x6
; 64BIT-NEXT: {{ $}}
- ; 64BIT-NEXT: renamable $r3 = ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3
- ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r5, implicit killed $x5
- ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3
+ ; 64BIT-NEXT: renamable $r3 = nsw ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3
+ ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r5, implicit killed $x5
+ ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3
; 64BIT-NEXT: renamable $x3 = EXTSB8 killed renamable $x3
; 64BIT-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3
entry:
diff --git a/llvm/test/CodeGen/PowerPC/aix-nest-param.ll b/llvm/test/CodeGen/PowerPC/aix-nest-param.ll
index 1863eaf..bfc7fbb 100644
--- a/llvm/test/CodeGen/PowerPC/aix-nest-param.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-nest-param.ll
@@ -1,5 +1,5 @@
-; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
-; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
+; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
+; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
define ptr @nest_receiver(ptr nest %arg) nounwind {
ret ptr %arg
@@ -9,5 +9,10 @@ define ptr @nest_caller(ptr %arg) nounwind {
%result = call ptr @nest_receiver(ptr nest %arg)
ret ptr %result
}
+; CHECK-LABEL: .nest_receiver:
+; CHECK: mr 3, 11
+; CHECK: blr
-; CHECK: LLVM ERROR: Nest arguments are unimplemented.
+; CHECK-LABEL: .nest_caller:
+; CHECK: mr 11, 3
+; CHECK: bl .nest_receiver
diff --git a/llvm/test/CodeGen/PowerPC/aix-trampoline.ll b/llvm/test/CodeGen/PowerPC/aix-trampoline.ll
index b71f6b5..19df220 100644
--- a/llvm/test/CodeGen/PowerPC/aix-trampoline.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-trampoline.ll
@@ -1,7 +1,7 @@
-; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
-; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
-
-; CHECK: LLVM ERROR: INIT_TRAMPOLINE operation is not supported on AIX.
+; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | \
+; RUN: FileCheck %s --check-prefix=32BIT
+; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 -mattr=-altivec | \
+; RUN: FileCheck %s --check-prefix=64BIT
define void @create_trampoline(ptr %buffer, ptr %nval) nounwind {
entry:
@@ -12,3 +12,17 @@ entry:
declare i32 @nested(i32);
declare void @llvm.init.trampoline(ptr, ptr, ptr) nounwind
+
+; 32BIT: stw 4, 8(3)
+; 32BIT: lwz [[FuncDesc:[0-9]+]], L..C0(2)
+; 32BIT-DAG: lwz [[SCRATCH1:[0-9]+]], 0([[FuncDesc]])
+; 32BIT-DAG: lwz [[SCRATCH2:[0-9]+]], 4([[FuncDesc]])
+; 32BIT-DAG: stw [[SCRATCH1]], 0(3)
+; 32BIT-DAG: stw [[SCRATCH2]], 4(3)
+
+; 64BIT: std 4, 16(3)
+; 64BIT-DAG: ld [[FuncDesc:[0-9]+]], L..C0(2)
+; 64BIT-DAG: ld [[SCRATCH1:[0-9]+]], 0([[FuncDesc]])
+; 64BIT-DAG: ld [[SCRATCH2:[0-9]+]], 8([[FuncDesc]])
+; 64BIT-DAG: std [[SCRATCH1]], 0(3)
+; 64BIT-DAG: std [[SCRATCH2]], 8(3)
diff --git a/llvm/test/CodeGen/PowerPC/alignlongjumptest.mir b/llvm/test/CodeGen/PowerPC/alignlongjumptest.mir
index 314844a..f1ae4a2 100644
--- a/llvm/test/CodeGen/PowerPC/alignlongjumptest.mir
+++ b/llvm/test/CodeGen/PowerPC/alignlongjumptest.mir
@@ -43,8 +43,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
callSites: []
diff --git a/llvm/test/CodeGen/PowerPC/block-placement-1.mir b/llvm/test/CodeGen/PowerPC/block-placement-1.mir
index f91ab63..a74af48 100644
--- a/llvm/test/CodeGen/PowerPC/block-placement-1.mir
+++ b/llvm/test/CodeGen/PowerPC/block-placement-1.mir
@@ -140,8 +140,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
callSites: []
@@ -186,8 +186,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
- { id: 0, type: spill-slot, offset: -80, size: 8, alignment: 16, stack-id: default,
callee-saved-register: '$x30', callee-saved-restored: true, debug-info-variable: '',
diff --git a/llvm/test/CodeGen/PowerPC/block-placement.mir b/llvm/test/CodeGen/PowerPC/block-placement.mir
index dab8dfb..99d399d 100644
--- a/llvm/test/CodeGen/PowerPC/block-placement.mir
+++ b/llvm/test/CodeGen/PowerPC/block-placement.mir
@@ -111,8 +111,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
callSites: []
diff --git a/llvm/test/CodeGen/PowerPC/builtins-bcd-format-conversion.ll b/llvm/test/CodeGen/PowerPC/builtins-bcd-format-conversion.ll
new file mode 100644
index 0000000..ede8625
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/builtins-bcd-format-conversion.ll
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-unknown -mcpu=pwr9 \
+; RUN: --ppc-asm-full-reg-names < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-unknown -mcpu=pwr9 \
+; RUN: --ppc-asm-full-reg-names < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64-ibm-aix-xcoff \
+; RUN: -ppc-asm-full-reg-names < %s | FileCheck %s
+
+define dso_local <16 x i8> @test_bcdcopysign(<16 x i8> noundef %a, <16 x i8> noundef %b) {
+; CHECK-LABEL: test_bcdcopysign:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: bcdcpsgn. v2, v2, v3
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call <16 x i8> @llvm.ppc.bcdcopysign(<16 x i8> %a, <16 x i8> %b)
+ ret <16 x i8> %0
+}
+
+define dso_local <16 x i8> @test_bcdsetsign_imm0(<16 x i8> noundef %a) {
+; CHECK-LABEL: test_bcdsetsign_imm0:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: bcdsetsgn. v2, v2, 0
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call <16 x i8> @llvm.ppc.bcdsetsign(<16 x i8> %a, i32 0)
+ ret <16 x i8> %0
+}
+
+define dso_local <16 x i8> @test_bcdsetsign_imm1(<16 x i8> noundef %a) {
+; CHECK-LABEL: test_bcdsetsign_imm1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: bcdsetsgn. v2, v2, 1
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call <16 x i8> @llvm.ppc.bcdsetsign(<16 x i8> %a, i32 1)
+ ret <16 x i8> %0
+}
+
+declare <16 x i8> @llvm.ppc.bcdcopysign(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.ppc.bcdsetsign(<16 x i8>, i32)
diff --git a/llvm/test/CodeGen/PowerPC/check-zero-vector.ll b/llvm/test/CodeGen/PowerPC/check-zero-vector.ll
index 59173e2..0f7e0c7 100644
--- a/llvm/test/CodeGen/PowerPC/check-zero-vector.ll
+++ b/llvm/test/CodeGen/PowerPC/check-zero-vector.ll
@@ -1,246 +1,97 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu \
-; RUN: < %s | FileCheck %s --check-prefix=POWERPC_64LE
+; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWERPC_64LE
; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64-ibm-aix \
-; RUN: < %s | FileCheck %s --check-prefix=POWERPC_64
+; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWERPC_64
; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc-ibm-aix \
-; RUN: < %s | FileCheck %s --check-prefix=POWERPC_32
-
-define i32 @test_Greater_than(ptr %colauths, i32 signext %ncols) {
-; This testcase is manually reduced to isolate the critical code blocks.
-; It is designed to check for vector comparison specifically for zero vectors.
-; In the vector.body section, we are expecting a comparison instruction (vcmpequh),
-; merge instructions (vmrghh and vmrglh) which use exactly 2 vectors.
-; The output of the merge instruction is being used by xxland and finally
-; accumulated by vadduwm instruction.
+; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWERPC_32
+define i32 @test_Greater_than(ptr %colauths) {
+; This testcase is for the special case of zero-vector comparisons.
+; Currently the generated code does a comparison (vcmpequh) and then a negation (xxlnor).
+; This pattern is expected to be optimized in a future patch.
; POWERPC_64LE-LABEL: test_Greater_than:
-; POWERPC_64LE: .LBB0_6: # %vector.body
-; POWERPC_64LE-NEXT: #
-; POWERPC_64LE-NEXT: lxv [[R1:[0-9]+]], -64(4)
-; POWERPC_64LE-NEXT: vcmpequh [[R2:[0-9]+]], [[R2]], [[R3:[0-9]+]]
-; POWERPC_64LE-NEXT: xxlnor [[R1]], [[R1]], [[R1]]
-; POWERPC_64LE-NEXT: vmrghh [[R4:[0-9]+]], [[R2]], [[R2]]
-; POWERPC_64LE-NEXT: vmrglh [[R2]], [[R2]], [[R2]]
-; POWERPC_64LE-NEXT: xxland [[R5:[0-9]+]], [[R5]], [[R6:[0-9]+]]
-; POWERPC_64LE-NEXT: xxland [[R1]], [[R1]], [[R6]]
-; POWERPC_64LE-NEXT: vadduwm [[R7:[0-9]+]], [[R7]], [[R4]]
-; POWERPC_64LE: .LBB0_10: # %vec.epilog.vector.body
-; POWERPC_64LE-NEXT: #
-; POWERPC_64LE-NEXT: lxv [[R8:[0-9]+]], 0(4)
-; POWERPC_64LE-NEXT: addi 4, 4, 16
-; POWERPC_64LE-NEXT: vcmpequh [[R9:[0-9]+]], [[R9]], [[R10:[0-9]+]]
-; POWERPC_64LE-NEXT: xxlnor [[R8]], [[R8]], [[R8]]
-; POWERPC_64LE-NEXT: vmrglh [[R11:[0-9]+]], [[R9]], [[R9]]
-; POWERPC_64LE-NEXT: vmrghh [[R9]], [[R9]], [[R9]]
-; POWERPC_64LE-NEXT: xxland [[R12:[0-9]+]], [[R12]], [[R6]]
-; POWERPC_64LE-NEXT: xxland [[R8]], [[R8]], [[R6]]
-; POWERPC_64LE-NEXT: vadduwm [[R7]], [[R7]], [[R9]]
-; POWERPC_64LE-NEXT: vadduwm [[R3]], [[R3]], [[R11]]
-; POWERPC_64LE-NEXT: bdnz .LBB0_10
-; POWERPC_64LE: blr
+; POWERPC_64LE: # %bb.0: # %entry
+; POWERPC_64LE-NEXT: lfd f0, 0(r3)
+; POWERPC_64LE-NEXT: xxlxor v3, v3, v3
+; POWERPC_64LE-NEXT: li r4, 0
+; POWERPC_64LE-NEXT: li r3, 4
+; POWERPC_64LE-NEXT: xxswapd v2, f0
+; POWERPC_64LE-NEXT: vcmpequh v2, v2, v3
+; POWERPC_64LE-NEXT: xxlnor v2, v2, v2
+; POWERPC_64LE-NEXT: vmrglh v3, v2, v2
+; POWERPC_64LE-NEXT: vextuwrx r4, r4, v2
+; POWERPC_64LE-NEXT: vextuwrx r3, r3, v3
+; POWERPC_64LE-NEXT: clrlwi r4, r4, 31
+; POWERPC_64LE-NEXT: rlwimi r4, r3, 1, 30, 30
+; POWERPC_64LE-NEXT: mfvsrwz r3, v3
+; POWERPC_64LE-NEXT: rlwimi r4, r3, 2, 29, 29
+; POWERPC_64LE-NEXT: li r3, 12
+; POWERPC_64LE-NEXT: vextuwrx r3, r3, v3
+; POWERPC_64LE-NEXT: rlwimi r4, r3, 3, 28, 28
+; POWERPC_64LE-NEXT: stb r4, -1(r1)
+; POWERPC_64LE-NEXT: lbz r3, -1(r1)
+; POWERPC_64LE-NEXT: popcntd r3, r3
+; POWERPC_64LE-NEXT: blr
;
; POWERPC_64-LABEL: test_Greater_than:
-; POWERPC_64: L..BB0_6: # %vector.body
-; POWERPC_64-NEXT: #
-; POWERPC_64-NEXT: lxv [[R1:[0-9]+]], -64(4)
-; POWERPC_64-NEXT: vcmpequh [[R2:[0-9]+]], [[R2]], [[R3:[0-9]+]]
-; POWERPC_64-NEXT: xxlnor [[R1]], [[R1]], [[R1]]
-; POWERPC_64-NEXT: vmrglh [[R4:[0-9]+]], [[R2]], [[R2]]
-; POWERPC_64-NEXT: vmrghh [[R2]], [[R2]], [[R2]]
-; POWERPC_64-NEXT: xxland [[R5:[0-9]+]], [[R5]], [[R6:[0-9]+]]
-; POWERPC_64-NEXT: xxland [[R1]], [[R1]], [[R6]]
-; POWERPC_64-NEXT: vadduwm [[R7:[0-9]+]], [[R7]], [[R4]]
-; POWERPC_64: L..BB0_10: # %vec.epilog.vector.body
-; POWERPC_64-NEXT: #
-; POWERPC_64-NEXT: lxv [[R8:[0-9]+]], 0(4)
-; POWERPC_64-NEXT: addi 4, 4, 16
-; POWERPC_64-NEXT: vcmpequh [[R9:[0-9]+]], [[R9]], [[R10:[0-9]+]]
-; POWERPC_64-NEXT: xxlnor [[R8]], [[R8]], [[R8]]
-; POWERPC_64-NEXT: vmrghh [[R11:[0-9]+]], [[R9]], [[R9]]
-; POWERPC_64-NEXT: vmrglh [[R9]], [[R9]], [[R9]]
-; POWERPC_64-NEXT: xxland [[R12:[0-9]+]], [[R12]], [[R6]]
-; POWERPC_64-NEXT: xxland [[R8]], [[R8]], [[R6]]
-; POWERPC_64-NEXT: vadduwm [[R7]], [[R7]], [[R9]]
-; POWERPC_64-NEXT: vadduwm [[R3]], [[R3]], [[R11]]
-; POWERPC_64-NEXT: bdnz L..BB0_10
-; POWERPC_64: blr
+; POWERPC_64: # %bb.0: # %entry
+; POWERPC_64-NEXT: lxsd v2, 0(r3)
+; POWERPC_64-NEXT: xxlxor v3, v3, v3
+; POWERPC_64-NEXT: li r4, 12
+; POWERPC_64-NEXT: li r3, 8
+; POWERPC_64-NEXT: vcmpequh v2, v2, v3
+; POWERPC_64-NEXT: xxlnor v2, v2, v2
+; POWERPC_64-NEXT: vmrghh v2, v2, v2
+; POWERPC_64-NEXT: vextuwlx r4, r4, v2
+; POWERPC_64-NEXT: vextuwlx r3, r3, v2
+; POWERPC_64-NEXT: clrlwi r4, r4, 31
+; POWERPC_64-NEXT: rlwimi r4, r3, 1, 30, 30
+; POWERPC_64-NEXT: mfvsrwz r3, v2
+; POWERPC_64-NEXT: rlwimi r4, r3, 2, 29, 29
+; POWERPC_64-NEXT: li r3, 0
+; POWERPC_64-NEXT: vextuwlx r3, r3, v2
+; POWERPC_64-NEXT: rlwimi r4, r3, 3, 28, 28
+; POWERPC_64-NEXT: stb r4, -1(r1)
+; POWERPC_64-NEXT: lbz r3, -1(r1)
+; POWERPC_64-NEXT: popcntd r3, r3
+; POWERPC_64-NEXT: blr
;
; POWERPC_32-LABEL: test_Greater_than:
-; POWERPC_32: L..BB0_7: # %vector.body
-; POWERPC_32-NEXT: #
-; POWERPC_32-NEXT: lxv [[R1:[0-9]+]], 0(10)
-; POWERPC_32-NEXT: addic [[R13:[0-9]+]], [[R13]], 64
-; POWERPC_32-NEXT: addze [[R14:[0-9]+]], [[R14]]
-; POWERPC_32-NEXT: xor [[R15:[0-9]+]], [[R13]], [[R16:[0-9]+]]
-; POWERPC_32-NEXT: or. [[R15]], [[R15]], [[R14]]
-; POWERPC_32-NEXT: vcmpequh [[R2:[0-9]+]], [[R2]], [[R3:[0-9]+]]
-; POWERPC_32-NEXT: xxlnor [[R1]], [[R1]], [[R1]]
-; POWERPC_32-NEXT: vmrglh [[R4:[0-9]+]], [[R2]], [[R2]]
-; POWERPC_32-NEXT: vmrghh [[R2]], [[R2]], [[R2]]
-; POWERPC_32-NEXT: xxland [[R5:[0-9]+]], [[R5]], [[R6:[0-9]+]]
-; POWERPC_32-NEXT: xxland [[R1]], [[R1]], [[R6]]
-; POWERPC_32-NEXT: vadduwm [[R7:[0-9]+]], [[R7]], [[R4]]
-; POWERPC_32: L..BB0_11: # %vec.epilog.vector.body
-; POWERPC_32-NEXT: #
-; POWERPC_32-NEXT: slwi [[R14]], [[R13]], 1
-; POWERPC_32-NEXT: addic [[R13]], [[R13]], 8
-; POWERPC_32-NEXT: addze [[R17:[0-9]+]], [[R17]]
-; POWERPC_32-NEXT: lxvx [[R8:[0-9]+]], [[R18:[0-9]+]], [[R14]]
-; POWERPC_32-NEXT: xor [[R14]], [[R13]], [[R16]]
-; POWERPC_32-NEXT: or. [[R14]], [[R14]], [[R17]]
-; POWERPC_32-NEXT: vcmpequh [[R9:[0-9]+]], [[R9]], [[R3]]
-; POWERPC_32-NEXT: xxlnor [[R8]], [[R8]], [[R8]]
-; POWERPC_32-NEXT: vmrghh [[R11:[0-9]+]], [[R9]], [[R9]]
-; POWERPC_32-NEXT: vmrglh [[R9]], [[R9]], [[R9]]
-; POWERPC_32-NEXT: xxland [[R12:[0-9]+]], [[R12]], [[R6]]
-; POWERPC_32-NEXT: xxland [[R8]], [[R8]], [[R6]]
-; POWERPC_32-NEXT: vadduwm [[R7]], [[R7]], [[R9]]
-; POWERPC_32-NEXT: vadduwm [[R19:[0-9]+]], [[R19]], [[R11]]
-; POWERPC_32-NEXT: bne 0, L..BB0_11
-; POWERPC_32: blr
- entry:
- %cmp5 = icmp sgt i32 %ncols, 0
- br i1 %cmp5, label %iter.check, label %for.cond.cleanup
-
-iter.check: ; preds = %entry
- %wide.trip.count = zext nneg i32 %ncols to i64
- %min.iters.check = icmp ult i32 %ncols, 8
- br i1 %min.iters.check, label %for.body.preheader, label %vector.main.loop.iter.check
-
-for.body.preheader: ; preds = %vec.epilog.iter.check, %vec.epilog.middle.block, %iter.check
- %indvars.iv.ph = phi i64 [ 0, %iter.check ], [ %n.vec, %vec.epilog.iter.check ], [ %n.vec31, %vec.epilog.middle.block ]
- %num_cols_needed.06.ph = phi i32 [ 0, %iter.check ], [ %33, %vec.epilog.iter.check ], [ %40, %vec.epilog.middle.block ]
- br label %for.body
-
-vector.main.loop.iter.check: ; preds = %iter.check
- %min.iters.check9 = icmp ult i32 %ncols, 64
- br i1 %min.iters.check9, label %vec.epilog.ph, label %vector.ph
-
-vector.ph: ; preds = %vector.main.loop.iter.check
- %n.vec = and i64 %wide.trip.count, 2147483584
- br label %vector.body
-
-vector.body: ; preds = %vector.body, %vector.ph
- %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %vec.phi = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %24, %vector.body ]
- %vec.phi10 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %25, %vector.body ]
- %vec.phi11 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %26, %vector.body ]
- %vec.phi12 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %27, %vector.body ]
- %vec.phi13 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %28, %vector.body ]
- %vec.phi14 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %29, %vector.body ]
- %vec.phi15 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %30, %vector.body ]
- %vec.phi16 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %31, %vector.body ]
- %0 = getelementptr inbounds nuw i16, ptr %colauths, i64 %index
- %1 = getelementptr inbounds nuw i8, ptr %0, i64 16
- %2 = getelementptr inbounds nuw i8, ptr %0, i64 32
- %3 = getelementptr inbounds nuw i8, ptr %0, i64 48
- %4 = getelementptr inbounds nuw i8, ptr %0, i64 64
- %5 = getelementptr inbounds nuw i8, ptr %0, i64 80
- %6 = getelementptr inbounds nuw i8, ptr %0, i64 96
- %7 = getelementptr inbounds nuw i8, ptr %0, i64 112
- %wide.load = load <8 x i16>, ptr %0, align 2, !tbaa !5
- %wide.load17 = load <8 x i16>, ptr %1, align 2, !tbaa !5
- %wide.load18 = load <8 x i16>, ptr %2, align 2, !tbaa !5
- %wide.load19 = load <8 x i16>, ptr %3, align 2, !tbaa !5
- %wide.load20 = load <8 x i16>, ptr %4, align 2, !tbaa !5
- %wide.load21 = load <8 x i16>, ptr %5, align 2, !tbaa !5
- %wide.load22 = load <8 x i16>, ptr %6, align 2, !tbaa !5
- %wide.load23 = load <8 x i16>, ptr %7, align 2, !tbaa !5
- %8 = icmp ne <8 x i16> %wide.load, zeroinitializer
- %9 = icmp ne <8 x i16> %wide.load17, zeroinitializer
- %10 = icmp ne <8 x i16> %wide.load18, zeroinitializer
- %11 = icmp ne <8 x i16> %wide.load19, zeroinitializer
- %12 = icmp ne <8 x i16> %wide.load20, zeroinitializer
- %13 = icmp ne <8 x i16> %wide.load21, zeroinitializer
- %14 = icmp ne <8 x i16> %wide.load22, zeroinitializer
- %15 = icmp ne <8 x i16> %wide.load23, zeroinitializer
- %16 = zext <8 x i1> %8 to <8 x i32>
- %17 = zext <8 x i1> %9 to <8 x i32>
- %18 = zext <8 x i1> %10 to <8 x i32>
- %19 = zext <8 x i1> %11 to <8 x i32>
- %20 = zext <8 x i1> %12 to <8 x i32>
- %21 = zext <8 x i1> %13 to <8 x i32>
- %22 = zext <8 x i1> %14 to <8 x i32>
- %23 = zext <8 x i1> %15 to <8 x i32>
- %24 = add <8 x i32> %vec.phi, %16
- %25 = add <8 x i32> %vec.phi10, %17
- %26 = add <8 x i32> %vec.phi11, %18
- %27 = add <8 x i32> %vec.phi12, %19
- %28 = add <8 x i32> %vec.phi13, %20
- %29 = add <8 x i32> %vec.phi14, %21
- %30 = add <8 x i32> %vec.phi15, %22
- %31 = add <8 x i32> %vec.phi16, %23
- %index.next = add nuw i64 %index, 64
- %32 = icmp eq i64 %index.next, %n.vec
- br i1 %32, label %middle.block, label %vector.body, !llvm.loop !9
-
-middle.block: ; preds = %vector.body
- %bin.rdx = add <8 x i32> %25, %24
- %bin.rdx24 = add <8 x i32> %26, %bin.rdx
- %bin.rdx25 = add <8 x i32> %27, %bin.rdx24
- %bin.rdx26 = add <8 x i32> %28, %bin.rdx25
- %bin.rdx27 = add <8 x i32> %29, %bin.rdx26
- %bin.rdx28 = add <8 x i32> %30, %bin.rdx27
- %bin.rdx29 = add <8 x i32> %31, %bin.rdx28
- %33 = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %bin.rdx29)
- %cmp.n = icmp eq i64 %n.vec, %wide.trip.count
- br i1 %cmp.n, label %for.cond.cleanup, label %vec.epilog.iter.check
-
-vec.epilog.iter.check: ; preds = %middle.block
- %n.vec.remaining = and i64 %wide.trip.count, 56
- %min.epilog.iters.check = icmp eq i64 %n.vec.remaining, 0
- br i1 %min.epilog.iters.check, label %for.body.preheader, label %vec.epilog.ph
-
-vec.epilog.ph: ; preds = %vec.epilog.iter.check, %vector.main.loop.iter.check
- %vec.epilog.resume.val = phi i64 [ %n.vec, %vec.epilog.iter.check ], [ 0, %vector.main.loop.iter.check ]
- %bc.merge.rdx = phi i32 [ %33, %vec.epilog.iter.check ], [ 0, %vector.main.loop.iter.check ]
- %n.vec31 = and i64 %wide.trip.count, 2147483640
- %34 = insertelement <8 x i32> <i32 poison, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, i32 %bc.merge.rdx, i64 0
- br label %vec.epilog.vector.body
-
-vec.epilog.vector.body: ; preds = %vec.epilog.vector.body, %vec.epilog.ph
- %index32 = phi i64 [ %vec.epilog.resume.val, %vec.epilog.ph ], [ %index.next35, %vec.epilog.vector.body ]
- %vec.phi33 = phi <8 x i32> [ %34, %vec.epilog.ph ], [ %38, %vec.epilog.vector.body ]
- %35 = getelementptr inbounds nuw i16, ptr %colauths, i64 %index32
- %wide.load34 = load <8 x i16>, ptr %35, align 2, !tbaa !5
- %36 = icmp ne <8 x i16> %wide.load34, zeroinitializer
- %37 = zext <8 x i1> %36 to <8 x i32>
- %38 = add <8 x i32> %vec.phi33, %37
- %index.next35 = add nuw i64 %index32, 8
- %39 = icmp eq i64 %index.next35, %n.vec31
- br i1 %39, label %vec.epilog.middle.block, label %vec.epilog.vector.body, !llvm.loop !13
-
-vec.epilog.middle.block: ; preds = %vec.epilog.vector.body
- %40 = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %38)
- %cmp.n36 = icmp eq i64 %n.vec31, %wide.trip.count
- br i1 %cmp.n36, label %for.cond.cleanup, label %for.body.preheader
-
-for.cond.cleanup: ; preds = %for.body, %middle.block, %vec.epilog.middle.block, %entry
- %num_cols_needed.0.lcssa = phi i32 [ 0, %entry ], [ %33, %middle.block ], [ %40, %vec.epilog.middle.block ], [ %spec.select, %for.body ]
- ret i32 %num_cols_needed.0.lcssa
-
-for.body: ; preds = %for.body.preheader, %for.body
- %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
- %num_cols_needed.06 = phi i32 [ %spec.select, %for.body ], [ %num_cols_needed.06.ph, %for.body.preheader ]
- %arrayidx = getelementptr inbounds nuw i16, ptr %colauths, i64 %indvars.iv
- %41 = load i16, ptr %arrayidx, align 2, !tbaa !5
- %tobool.not = icmp ne i16 %41, 0
- %inc = zext i1 %tobool.not to i32
- %spec.select = add nuw nsw i32 %num_cols_needed.06, %inc
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
- br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !14
+; POWERPC_32: # %bb.0: # %entry
+; POWERPC_32-NEXT: li r4, 4
+; POWERPC_32-NEXT: lxvwsx vs1, 0, r3
+; POWERPC_32-NEXT: xxlxor v3, v3, v3
+; POWERPC_32-NEXT: lxvwsx vs0, r3, r4
+; POWERPC_32-NEXT: xxmrghw v2, vs1, vs0
+; POWERPC_32-NEXT: vcmpequh v2, v2, v3
+; POWERPC_32-NEXT: xxlnor v2, v2, v2
+; POWERPC_32-NEXT: vmrghh v2, v2, v2
+; POWERPC_32-NEXT: stxv v2, -32(r1)
+; POWERPC_32-NEXT: lwz r3, -20(r1)
+; POWERPC_32-NEXT: lwz r4, -24(r1)
+; POWERPC_32-NEXT: clrlwi r3, r3, 31
+; POWERPC_32-NEXT: rlwimi r3, r4, 1, 30, 30
+; POWERPC_32-NEXT: lwz r4, -28(r1)
+; POWERPC_32-NEXT: rlwimi r3, r4, 2, 29, 29
+; POWERPC_32-NEXT: lwz r4, -32(r1)
+; POWERPC_32-NEXT: rlwimi r3, r4, 3, 28, 28
+; POWERPC_32-NEXT: popcntw r3, r3
+; POWERPC_32-NEXT: blr
+entry:
+ %0 = load <4 x i16>, ptr %colauths, align 2, !tbaa !5
+ %1 = icmp ne <4 x i16> %0, zeroinitializer
+ %2 = bitcast <4 x i1> %1 to i4
+ %3 = tail call range(i4 0, 5) i4 @llvm.ctpop.i4(i4 %2)
+ %4 = zext nneg i4 %3 to i32
+ ret i32 %4
}
+declare i4 @llvm.ctpop.i4(i4) #1
+
!5 = !{!6, !6, i64 0}
!6 = !{!"short", !7, i64 0}
!7 = !{!"omnipotent char", !8, i64 0}
!8 = !{!"Simple C/C++ TBAA"}
-!9 = distinct !{!9, !10, !11, !12}
-!10 = !{!"llvm.loop.mustprogress"}
-!11 = !{!"llvm.loop.isvectorized", i32 1}
-!12 = !{!"llvm.loop.unroll.runtime.disable"}
-!13 = distinct !{!13, !10, !11, !12}
-!14 = distinct !{!14, !10, !12, !11}
diff --git a/llvm/test/CodeGen/PowerPC/collapse-rotates.mir b/llvm/test/CodeGen/PowerPC/collapse-rotates.mir
index 938b27f..b30b161 100644
--- a/llvm/test/CodeGen/PowerPC/collapse-rotates.mir
+++ b/llvm/test/CodeGen/PowerPC/collapse-rotates.mir
@@ -45,8 +45,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
constants: []
diff --git a/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir b/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir
index e1d0285..fac09d2 100644
--- a/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir
+++ b/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir
@@ -111,8 +111,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -165,8 +165,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -219,8 +219,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -272,8 +272,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -322,8 +322,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -370,8 +370,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -417,8 +417,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
diff --git a/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir b/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir
index cdd6be5..0b61455 100644
--- a/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir
+++ b/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir
@@ -242,8 +242,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -292,8 +292,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -348,8 +348,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -411,8 +411,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -470,8 +470,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -528,8 +528,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -590,8 +590,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -648,8 +648,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -707,8 +707,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -765,8 +765,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -821,8 +821,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -876,8 +876,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -931,8 +931,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -986,8 +986,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1040,8 +1040,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1093,8 +1093,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1146,8 +1146,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1199,8 +1199,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1252,8 +1252,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1305,8 +1305,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
diff --git a/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir b/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
index fa06dd5..61c0da6 100644
--- a/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
+++ b/llvm/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
@@ -1044,8 +1044,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1100,8 +1100,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1160,8 +1160,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1221,8 +1221,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1280,8 +1280,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1335,8 +1335,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1385,8 +1385,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1439,8 +1439,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1494,8 +1494,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1548,8 +1548,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1602,8 +1602,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1655,8 +1655,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1708,8 +1708,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1762,8 +1762,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1818,8 +1818,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1877,8 +1877,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -1938,8 +1938,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -2002,8 +2002,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -2072,8 +2072,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -2149,8 +2149,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -2229,8 +2229,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -2306,8 +2306,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -2384,8 +2384,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -2461,8 +2461,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -2542,8 +2542,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -2621,8 +2621,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -2697,8 +2697,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -2772,8 +2772,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -2845,8 +2845,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -2920,8 +2920,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -2993,8 +2993,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -3077,8 +3077,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
- { id: 0, name: '', type: default, offset: 0, size: 16, alignment: 16,
@@ -3183,8 +3183,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -3256,8 +3256,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -3329,8 +3329,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -3402,8 +3402,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -3465,8 +3465,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -3515,8 +3515,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -3562,8 +3562,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -3608,8 +3608,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -3658,8 +3658,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -3713,8 +3713,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -3768,8 +3768,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -3823,8 +3823,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -3874,8 +3874,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -3920,8 +3920,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -3970,8 +3970,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4024,8 +4024,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4078,8 +4078,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4131,8 +4131,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4183,8 +4183,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4235,8 +4235,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4284,8 +4284,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4339,8 +4339,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4404,8 +4404,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4467,8 +4467,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4522,8 +4522,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4575,8 +4575,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4628,8 +4628,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4681,8 +4681,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4739,8 +4739,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4797,8 +4797,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4859,8 +4859,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4917,8 +4917,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -4976,8 +4976,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -5036,8 +5036,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -5091,8 +5091,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -5144,8 +5144,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -5209,8 +5209,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -5282,8 +5282,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -5357,8 +5357,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -5430,8 +5430,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -5505,8 +5505,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -5578,8 +5578,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -5652,8 +5652,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -5723,8 +5723,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -5794,8 +5794,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -5867,8 +5867,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -5938,8 +5938,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -6011,8 +6011,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -6074,8 +6074,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -6126,8 +6126,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -6178,8 +6178,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -6236,8 +6236,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -6297,8 +6297,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -6351,8 +6351,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -6401,8 +6401,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -6448,8 +6448,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -6494,8 +6494,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
diff --git a/llvm/test/CodeGen/PowerPC/ctrloop-do-not-duplicate-mi.mir b/llvm/test/CodeGen/PowerPC/ctrloop-do-not-duplicate-mi.mir
index 651869d..668e7fe 100644
--- a/llvm/test/CodeGen/PowerPC/ctrloop-do-not-duplicate-mi.mir
+++ b/llvm/test/CodeGen/PowerPC/ctrloop-do-not-duplicate-mi.mir
@@ -117,8 +117,8 @@ frameInfo:
hasMustTailInVarArgFunc: false
hasTailCall: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
callSites: []
diff --git a/llvm/test/CodeGen/PowerPC/dmr-copy.ll b/llvm/test/CodeGen/PowerPC/dmr-copy.ll
new file mode 100644
index 0000000..d5a2430
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/dmr-copy.ll
@@ -0,0 +1,245 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
+; RUN: -mcpu=future -ppc-asm-full-reg-names \
+; RUN: -ppc-vsr-nums-as-vr < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-ibm-aix \
+; RUN: -mcpu=future -ppc-asm-full-reg-names \
+; RUN: -ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=CHECK-BE
+
+define void @test_wacc_copy(ptr noundef %vdmrp, ptr noundef %vpp, <16 x i8> noundef %vc, ptr noundef %resp) #0 {
+; CHECK-LABEL: test_wacc_copy:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: std r31, -8(r1)
+; CHECK-NEXT: std r30, -16(r1)
+; CHECK-NEXT: mr r30, r1
+; CHECK-NEXT: clrldi r0, r1, 57
+; CHECK-NEXT: subfic r0, r0, -384
+; CHECK-NEXT: stdux r1, r1, r0
+; CHECK-NEXT: .cfi_def_cfa_register r30
+; CHECK-NEXT: .cfi_offset r31, -8
+; CHECK-NEXT: .cfi_offset r30, -16
+; CHECK-NEXT: mr r31, r1
+; CHECK-NEXT: std r3, 360(r31)
+; CHECK-NEXT: std r4, 352(r31)
+; CHECK-NEXT: stxv v2, 336(r31)
+; CHECK-NEXT: std r7, 328(r31)
+; CHECK-NEXT: ld r3, 360(r31)
+; CHECK-NEXT: lxvp vsp34, 0(r3)
+; CHECK-NEXT: lxvp vsp36, 32(r3)
+; CHECK-NEXT: dmxxinstdmr512 wacc_hi0, vsp36, vsp34, 1
+; CHECK-NEXT: lxvp vsp34, 64(r3)
+; CHECK-NEXT: lxvp vsp36, 96(r3)
+; CHECK-NEXT: dmxxinstdmr512 wacc0, vsp36, vsp34, 0
+; CHECK-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc0, 0
+; CHECK-NEXT: stxvp vsp34, 224(r31)
+; CHECK-NEXT: stxvp vsp36, 192(r31)
+; CHECK-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc_hi0, 1
+; CHECK-NEXT: stxvp vsp34, 160(r31)
+; CHECK-NEXT: stxvp vsp36, 128(r31)
+; CHECK-NEXT: ld r3, 352(r31)
+; CHECK-NEXT: lxv v2, 16(r3)
+; CHECK-NEXT: lxv v3, 0(r3)
+; CHECK-NEXT: stxv v2, 112(r31)
+; CHECK-NEXT: stxv v3, 96(r31)
+; CHECK-NEXT: lxv v2, 112(r31)
+; CHECK-NEXT: lxv v3, 96(r31)
+; CHECK-NEXT: lxv vs0, 336(r31)
+; CHECK-NEXT: dmxvi8gerx4 dmr0, vsp34, vs0
+; CHECK-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc0, 0
+; CHECK-NEXT: stxvp vsp34, 224(r31)
+; CHECK-NEXT: stxvp vsp36, 192(r31)
+; CHECK-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc_hi0, 1
+; CHECK-NEXT: stxvp vsp34, 160(r31)
+; CHECK-NEXT: stxvp vsp36, 128(r31)
+; CHECK-NEXT: lxvp vsp34, 128(r31)
+; CHECK-NEXT: lxvp vsp36, 160(r31)
+; CHECK-NEXT: dmxxinstdmr512 wacc_hi0, vsp36, vsp34, 1
+; CHECK-NEXT: lxvp vsp34, 192(r31)
+; CHECK-NEXT: lxvp vsp36, 224(r31)
+; CHECK-NEXT: dmxxinstdmr512 wacc0, vsp36, vsp34, 0
+; CHECK-NEXT: ld r3, 328(r31)
+; CHECK-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc0, 0
+; CHECK-NEXT: stxvp vsp34, 96(r3)
+; CHECK-NEXT: stxvp vsp36, 64(r3)
+; CHECK-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc_hi0, 1
+; CHECK-NEXT: stxvp vsp34, 32(r3)
+; CHECK-NEXT: stxvp vsp36, 0(r3)
+; CHECK-NEXT: mr r1, r30
+; CHECK-NEXT: ld r31, -8(r1)
+; CHECK-NEXT: ld r30, -16(r1)
+; CHECK-NEXT: blr
+;
+; CHECK-BE-LABEL: test_wacc_copy:
+; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: std r31, -8(r1)
+; CHECK-BE-NEXT: std r30, -16(r1)
+; CHECK-BE-NEXT: mr r30, r1
+; CHECK-BE-NEXT: clrldi r0, r1, 57
+; CHECK-BE-NEXT: subfic r0, r0, -384
+; CHECK-BE-NEXT: stdux r1, r1, r0
+; CHECK-BE-NEXT: mr r31, r1
+; CHECK-BE-NEXT: std r3, 360(r31)
+; CHECK-BE-NEXT: std r4, 352(r31)
+; CHECK-BE-NEXT: stxv v2, 336(r31)
+; CHECK-BE-NEXT: std r5, 328(r31)
+; CHECK-BE-NEXT: ld r3, 360(r31)
+; CHECK-BE-NEXT: lxvp vsp34, 96(r3)
+; CHECK-BE-NEXT: lxvp vsp36, 64(r3)
+; CHECK-BE-NEXT: dmxxinstdmr512 wacc_hi0, vsp36, vsp34, 1
+; CHECK-BE-NEXT: lxvp vsp34, 32(r3)
+; CHECK-BE-NEXT: lxvp vsp36, 0(r3)
+; CHECK-BE-NEXT: dmxxinstdmr512 wacc0, vsp36, vsp34, 0
+; CHECK-BE-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc_hi0, 1
+; CHECK-BE-NEXT: stxvp vsp36, 224(r31)
+; CHECK-BE-NEXT: stxvp vsp34, 192(r31)
+; CHECK-BE-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc0, 0
+; CHECK-BE-NEXT: stxvp vsp36, 160(r31)
+; CHECK-BE-NEXT: stxvp vsp34, 128(r31)
+; CHECK-BE-NEXT: ld r3, 352(r31)
+; CHECK-BE-NEXT: lxv v2, 0(r3)
+; CHECK-BE-NEXT: lxv v3, 16(r3)
+; CHECK-BE-NEXT: stxv v3, 112(r31)
+; CHECK-BE-NEXT: stxv v2, 96(r31)
+; CHECK-BE-NEXT: lxv v2, 96(r31)
+; CHECK-BE-NEXT: lxv v3, 112(r31)
+; CHECK-BE-NEXT: lxv vs0, 336(r31)
+; CHECK-BE-NEXT: dmxvi8gerx4 dmr0, vsp34, vs0
+; CHECK-BE-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc_hi0, 1
+; CHECK-BE-NEXT: stxvp vsp36, 224(r31)
+; CHECK-BE-NEXT: stxvp vsp34, 192(r31)
+; CHECK-BE-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc0, 0
+; CHECK-BE-NEXT: stxvp vsp36, 160(r31)
+; CHECK-BE-NEXT: stxvp vsp34, 128(r31)
+; CHECK-BE-NEXT: lxvp vsp34, 224(r31)
+; CHECK-BE-NEXT: lxvp vsp36, 192(r31)
+; CHECK-BE-NEXT: dmxxinstdmr512 wacc_hi0, vsp36, vsp34, 1
+; CHECK-BE-NEXT: lxvp vsp34, 160(r31)
+; CHECK-BE-NEXT: lxvp vsp36, 128(r31)
+; CHECK-BE-NEXT: dmxxinstdmr512 wacc0, vsp36, vsp34, 0
+; CHECK-BE-NEXT: ld r3, 328(r31)
+; CHECK-BE-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc_hi0, 1
+; CHECK-BE-NEXT: stxvp vsp36, 96(r3)
+; CHECK-BE-NEXT: stxvp vsp34, 64(r3)
+; CHECK-BE-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc0, 0
+; CHECK-BE-NEXT: stxvp vsp36, 32(r3)
+; CHECK-BE-NEXT: stxvp vsp34, 0(r3)
+; CHECK-BE-NEXT: mr r1, r30
+; CHECK-BE-NEXT: ld r31, -8(r1)
+; CHECK-BE-NEXT: ld r30, -16(r1)
+; CHECK-BE-NEXT: blr
+entry:
+ %vdmrp.addr = alloca ptr, align 8
+ %vpp.addr = alloca ptr, align 8
+ %vc.addr = alloca <16 x i8>, align 16
+ %resp.addr = alloca ptr, align 8
+ %vdmr = alloca <1024 x i1>, align 128
+ %vp = alloca <256 x i1>, align 32
+ store ptr %vdmrp, ptr %vdmrp.addr, align 8
+ store ptr %vpp, ptr %vpp.addr, align 8
+ store <16 x i8> %vc, ptr %vc.addr, align 16
+ store ptr %resp, ptr %resp.addr, align 8
+ %0 = load ptr, ptr %vdmrp.addr, align 8
+ %1 = load <1024 x i1>, ptr %0, align 128
+ store <1024 x i1> %1, ptr %vdmr, align 128
+ %2 = load ptr, ptr %vpp.addr, align 8
+ %3 = load <256 x i1>, ptr %2, align 32
+ store <256 x i1> %3, ptr %vp, align 32
+ %4 = load <256 x i1>, ptr %vp, align 32
+ %5 = load <16 x i8>, ptr %vc.addr, align 16
+ %6 = call <1024 x i1> @llvm.ppc.mma.dmxvi8gerx4(<256 x i1> %4, <16 x i8> %5)
+ store <1024 x i1> %6, ptr %vdmr, align 128
+ %7 = load <1024 x i1>, ptr %vdmr, align 128
+ %8 = load ptr, ptr %resp.addr, align 8
+ store <1024 x i1> %7, ptr %8, align 128
+ ret void
+}
+
+define void @foo(ptr noundef readonly captures(none) %p1, ptr noundef readonly captures(none) %p2, ptr noundef writeonly captures(none) initializes((0, 128)) %res1, ptr noundef writeonly captures(none) initializes((0, 128)) %res2) local_unnamed_addr #0 {
+; CHECK-LABEL: foo:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: dmsetdmrz dmr0
+; CHECK-NEXT: lxvp vsp34, 0(r3)
+; CHECK-NEXT: lxvp vsp36, 32(r3)
+; CHECK-NEXT: dmxxinstdmr512 wacc_hi1, vsp36, vsp34, 1
+; CHECK-NEXT: lxvp vsp34, 64(r3)
+; CHECK-NEXT: lxvp vsp36, 96(r3)
+; CHECK-NEXT: dmxxinstdmr512 wacc1, vsp36, vsp34, 0
+; CHECK-NEXT: dmmr dmr2, dmr0
+; CHECK-NEXT: dmxor dmr2, dmr1
+; CHECK-NEXT: lxvp vsp34, 0(r4)
+; CHECK-NEXT: lxvp vsp36, 32(r4)
+; CHECK-NEXT: dmxxinstdmr512 wacc_hi1, vsp36, vsp34, 1
+; CHECK-NEXT: lxvp vsp34, 64(r4)
+; CHECK-NEXT: lxvp vsp36, 96(r4)
+; CHECK-NEXT: dmxxinstdmr512 wacc1, vsp36, vsp34, 0
+; CHECK-NEXT: dmxor dmr0, dmr1
+; CHECK-NEXT: dmmr dmr1, dmr2
+; CHECK-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc1, 0
+; CHECK-NEXT: stxvp vsp34, 96(r5)
+; CHECK-NEXT: stxvp vsp36, 64(r5)
+; CHECK-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc_hi1, 1
+; CHECK-NEXT: stxvp vsp34, 32(r5)
+; CHECK-NEXT: stxvp vsp36, 0(r5)
+; CHECK-NEXT: dmmr dmr0, dmr0
+; CHECK-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc0, 0
+; CHECK-NEXT: stxvp vsp34, 96(r6)
+; CHECK-NEXT: stxvp vsp36, 64(r6)
+; CHECK-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc_hi0, 1
+; CHECK-NEXT: stxvp vsp34, 32(r6)
+; CHECK-NEXT: stxvp vsp36, 0(r6)
+; CHECK-NEXT: blr
+;
+; CHECK-BE-LABEL: foo:
+; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: dmsetdmrz dmr0
+; CHECK-BE-NEXT: lxvp vsp34, 96(r3)
+; CHECK-BE-NEXT: lxvp vsp36, 64(r3)
+; CHECK-BE-NEXT: dmxxinstdmr512 wacc_hi1, vsp36, vsp34, 1
+; CHECK-BE-NEXT: lxvp vsp34, 32(r3)
+; CHECK-BE-NEXT: lxvp vsp36, 0(r3)
+; CHECK-BE-NEXT: dmxxinstdmr512 wacc1, vsp36, vsp34, 0
+; CHECK-BE-NEXT: dmmr dmr2, dmr0
+; CHECK-BE-NEXT: dmxor dmr2, dmr1
+; CHECK-BE-NEXT: lxvp vsp34, 96(r4)
+; CHECK-BE-NEXT: lxvp vsp36, 64(r4)
+; CHECK-BE-NEXT: dmxxinstdmr512 wacc_hi1, vsp36, vsp34, 1
+; CHECK-BE-NEXT: lxvp vsp34, 32(r4)
+; CHECK-BE-NEXT: lxvp vsp36, 0(r4)
+; CHECK-BE-NEXT: dmxxinstdmr512 wacc1, vsp36, vsp34, 0
+; CHECK-BE-NEXT: dmxor dmr0, dmr1
+; CHECK-BE-NEXT: dmmr dmr1, dmr2
+; CHECK-BE-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc_hi1, 1
+; CHECK-BE-NEXT: stxvp vsp36, 96(r5)
+; CHECK-BE-NEXT: stxvp vsp34, 64(r5)
+; CHECK-BE-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc1, 0
+; CHECK-BE-NEXT: stxvp vsp36, 32(r5)
+; CHECK-BE-NEXT: stxvp vsp34, 0(r5)
+; CHECK-BE-NEXT: dmmr dmr0, dmr0
+; CHECK-BE-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc_hi0, 1
+; CHECK-BE-NEXT: stxvp vsp36, 96(r6)
+; CHECK-BE-NEXT: stxvp vsp34, 64(r6)
+; CHECK-BE-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc0, 0
+; CHECK-BE-NEXT: stxvp vsp36, 32(r6)
+; CHECK-BE-NEXT: stxvp vsp34, 0(r6)
+; CHECK-BE-NEXT: blr
+entry:
+ %0 = tail call <1024 x i1> @llvm.ppc.mma.dmsetdmrz()
+ %1 = load <1024 x i1>, ptr %p1, align 128
+ %2 = tail call <1024 x i1> @llvm.ppc.mma.dmxor(<1024 x i1> %0, <1024 x i1> %1)
+ %3 = load <1024 x i1>, ptr %p2, align 128
+ %4 = tail call <1024 x i1> @llvm.ppc.mma.dmxor(<1024 x i1> %0, <1024 x i1> %3)
+ %5 = tail call <1024 x i1> @llvm.ppc.mma.dmmr(<1024 x i1> %2)
+ store <1024 x i1> %5, ptr %res1, align 128
+ %6 = tail call <1024 x i1> @llvm.ppc.mma.dmmr(<1024 x i1> %4)
+ store <1024 x i1> %6, ptr %res2, align 128
+ ret void
+}
+
+declare <1024 x i1> @llvm.ppc.mma.dmsetdmrz()
+declare <1024 x i1> @llvm.ppc.mma.dmxor(<1024 x i1>, <1024 x i1>)
+declare <1024 x i1> @llvm.ppc.mma.dmmr(<1024 x i1>)
+declare <1024 x i1> @llvm.ppc.mma.dmxvi8gerx4(<256 x i1>, <16 x i8>)
+
+attributes #0 = { noinline nounwind optnone uwtable "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="future" "target-features"="+64bit,+allow-unaligned-fp-access,+altivec,+bpermd,+cmpb,+crbits,+crypto,+direct-move,+extdiv,+fast-MFLR,+fcpsgn,+fpcvt,+fprnd,+fpu,+fre,+fres,+frsqrte,+frsqrtes,+fsqrt,+fuse-add-logical,+fuse-arith-add,+fuse-logical,+fuse-logical-add,+fuse-sha3,+fuse-store,+fusion,+hard-float,+icbt,+isa-future-instructions,+isa-v206-instructions,+isa-v207-instructions,+isa-v30-instructions,+isa-v31-instructions,+isel,+ldbrx,+lfiwax,+mfocrf,+mma,+paired-vector-memops,+partword-atomics,+pcrelative-memops,+popcntd,+power10-vector,+power8-altivec,+power8-vector,+power9-altivec,+power9-vector,+ppc-postra-sched,+ppc-prera-sched,+predictable-select-expensive,+prefix-instrs,+quadword-atomics,+recipprec,+stfiwx,+two-const-nr,+vsx" }
+
+
diff --git a/llvm/test/CodeGen/PowerPC/dmr-enable.ll b/llvm/test/CodeGen/PowerPC/dmr-enable.ll
index 1e30144..a505ac4 100644
--- a/llvm/test/CodeGen/PowerPC/dmr-enable.ll
+++ b/llvm/test/CodeGen/PowerPC/dmr-enable.ll
@@ -367,6 +367,69 @@ entry:
ret void
}
+define void @tbuild(ptr %p1, ptr %p2, ptr %res1, ptr %res2, ptr %v) {
+; CHECK-LABEL: tbuild:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lxv v3, 0(r7)
+; CHECK-NEXT: vmr v2, v3
+; CHECK-NEXT: dmxxinstdmr512 wacc_hi0, vsp34, vsp34, 1
+; CHECK-NEXT: dmxxinstdmr512 wacc0, vsp34, vsp34, 0
+; CHECK-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc0, 0
+; CHECK-NEXT: stxvp vsp34, 96(r6)
+; CHECK-NEXT: stxvp vsp36, 64(r6)
+; CHECK-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc_hi0, 1
+; CHECK-NEXT: stxvp vsp34, 32(r6)
+; CHECK-NEXT: stxvp vsp36, 0(r6)
+; CHECK-NEXT: lxvp vsp34, 0(r3)
+; CHECK-NEXT: lxvp vsp36, 32(r3)
+; CHECK-NEXT: dmxxinstdmr512 wacc_hi0, vsp36, vsp34, 1
+; CHECK-NEXT: lxvp vsp34, 64(r3)
+; CHECK-NEXT: lxvp vsp36, 96(r3)
+; CHECK-NEXT: dmxxinstdmr512 wacc0, vsp36, vsp34, 0
+; CHECK-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc0, 0
+; CHECK-NEXT: stxvp vsp34, 96(r5)
+; CHECK-NEXT: stxvp vsp36, 64(r5)
+; CHECK-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc_hi0, 1
+; CHECK-NEXT: stxvp vsp34, 32(r5)
+; CHECK-NEXT: stxvp vsp36, 0(r5)
+; CHECK-NEXT: blr
+;
+; CHECK-BE-LABEL: tbuild:
+; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: lxv v3, 0(r7)
+; CHECK-BE-NEXT: vmr v2, v3
+; CHECK-BE-NEXT: dmxxinstdmr512 wacc_hi0, vsp34, vsp34, 1
+; CHECK-BE-NEXT: dmxxinstdmr512 wacc0, vsp34, vsp34, 0
+; CHECK-BE-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc_hi0, 1
+; CHECK-BE-NEXT: stxvp vsp36, 96(r6)
+; CHECK-BE-NEXT: stxvp vsp34, 64(r6)
+; CHECK-BE-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc0, 0
+; CHECK-BE-NEXT: stxvp vsp36, 32(r6)
+; CHECK-BE-NEXT: stxvp vsp34, 0(r6)
+; CHECK-BE-NEXT: lxvp vsp34, 96(r3)
+; CHECK-BE-NEXT: lxvp vsp36, 64(r3)
+; CHECK-BE-NEXT: dmxxinstdmr512 wacc_hi0, vsp36, vsp34, 1
+; CHECK-BE-NEXT: lxvp vsp34, 32(r3)
+; CHECK-BE-NEXT: lxvp vsp36, 0(r3)
+; CHECK-BE-NEXT: dmxxinstdmr512 wacc0, vsp36, vsp34, 0
+; CHECK-BE-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc_hi0, 1
+; CHECK-BE-NEXT: stxvp vsp36, 96(r5)
+; CHECK-BE-NEXT: stxvp vsp34, 64(r5)
+; CHECK-BE-NEXT: dmxxextfdmr512 vsp34, vsp36, wacc0, 0
+; CHECK-BE-NEXT: stxvp vsp36, 32(r5)
+; CHECK-BE-NEXT: stxvp vsp34, 0(r5)
+; CHECK-BE-NEXT: blr
+entry:
+ %0 = load <16 x i8>, ptr %v, align 16
+ %1 = tail call <1024 x i1> @llvm.ppc.mma.build.dmr(<16 x i8> %0, <16 x i8> %0, <16 x i8> %0, <16 x i8> %0, <16 x i8> %0, <16 x i8> %0, <16 x i8> %0, <16 x i8> %0)
+ store <1024 x i1> %1, ptr %res2, align 128
+ %2 = load <1024 x i1>, ptr %p1, align 128
+ tail call void @llvm.ppc.mma.disassemble.dmr(ptr %res1, <1024 x i1> %2)
+ ret void
+}
+
+declare <1024 x i1> @llvm.ppc.mma.build.dmr(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>)
+declare void @llvm.ppc.mma.disassemble.dmr(ptr, <1024 x i1>)
declare <1024 x i1> @llvm.ppc.mma.dmsetdmrz()
declare <1024 x i1> @llvm.ppc.mma.dmmr(<1024 x i1>)
declare <1024 x i1> @llvm.ppc.mma.dmxor(<1024 x i1>, <1024 x i1>)
diff --git a/llvm/test/CodeGen/PowerPC/fp_to_uint_endian.ll b/llvm/test/CodeGen/PowerPC/fp_to_uint_endian.ll
new file mode 100644
index 0000000..f8f0611
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/fp_to_uint_endian.ll
@@ -0,0 +1,17 @@
+; RUN: llc < %s -mcpu=440 -mtriple=ppc32le-unknown-unknown | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
+; RUN: llc < %s -mcpu=440 -mtriple=ppc32-unknown-unknown | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
+
+define i32 @foo(double %a) {
+; CHECK-LABEL: foo:
+; CHECK-DAG: fctiwz [[FPR_1_i:[0-9]+]], {{[0-9]+}}
+; CHECK-DAG: stfd [[FPR_1_i]], [[#%u,VAL1_ADDR:]](1)
+; CHECK-LE-DAG: lwz {{[0-9]+}}, [[#%u,== VAL1_ADDR]](1)
+; CHECK-BE-DAG: lwz {{[0-9]+}}, [[#%u,== VAL1_ADDR + 4]](1)
+; CHECK-DAG: fctiwz [[FPR_2:[0-9]+]], {{[0-9]+}}
+; CHECK-DAG: stfd [[FPR_2]], [[#%u,VAL2_ADDR:]](1)
+; CHECK-LE-DAG: lwz {{[0-9]+}}, [[#%u,== VAL2_ADDR]](1)
+; CHECK-BE-DAG: lwz {{[0-9]+}}, [[#%u,== VAL2_ADDR + 4]](1)
+entry:
+ %tmp.1 = fptoui double %a to i32 ; <i32> [#uses=1]
+ ret i32 %tmp.1
+}
diff --git a/llvm/test/CodeGen/PowerPC/livevars-crash2.mir b/llvm/test/CodeGen/PowerPC/livevars-crash2.mir
index deaae39..c7a98f8 100644
--- a/llvm/test/CodeGen/PowerPC/livevars-crash2.mir
+++ b/llvm/test/CodeGen/PowerPC/livevars-crash2.mir
@@ -136,8 +136,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
callSites: []
diff --git a/llvm/test/CodeGen/PowerPC/llrint-conv.ll b/llvm/test/CodeGen/PowerPC/llrint-conv.ll
index daadf85..8e49ddc 100644
--- a/llvm/test/CodeGen/PowerPC/llrint-conv.ll
+++ b/llvm/test/CodeGen/PowerPC/llrint-conv.ll
@@ -1,10 +1,25 @@
; RUN: llc < %s -mtriple=powerpc64le | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc | FileCheck %s
+
+; FIXME: crash "Input type needs to be promoted!"
+; define signext i32 @testmswh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.llrint.i64.f16(half %x)
+; %conv = trunc i64 %0 to i32
+; ret i32 %conv
+; }
+
+; define i64 @testmsxh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.llrint.i64.f16(half %x)
+; ret i64 %0
+; }
; CHECK-LABEL: testmsws:
; CHECK: bl llrintf
define signext i32 @testmsws(float %x) {
entry:
- %0 = tail call i64 @llvm.llrint.f32(float %x)
+ %0 = tail call i64 @llvm.llrint.i64.f32(float %x)
%conv = trunc i64 %0 to i32
ret i32 %conv
}
@@ -13,7 +28,7 @@ entry:
; CHECK: bl llrintf
define i64 @testmsxs(float %x) {
entry:
- %0 = tail call i64 @llvm.llrint.f32(float %x)
+ %0 = tail call i64 @llvm.llrint.i64.f32(float %x)
ret i64 %0
}
@@ -21,7 +36,7 @@ entry:
; CHECK: bl llrint
define signext i32 @testmswd(double %x) {
entry:
- %0 = tail call i64 @llvm.llrint.f64(double %x)
+ %0 = tail call i64 @llvm.llrint.i64.f64(double %x)
%conv = trunc i64 %0 to i32
ret i32 %conv
}
@@ -30,7 +45,7 @@ entry:
; CHECK: bl llrint
define i64 @testmsxd(double %x) {
entry:
- %0 = tail call i64 @llvm.llrint.f64(double %x)
+ %0 = tail call i64 @llvm.llrint.i64.f64(double %x)
ret i64 %0
}
@@ -38,7 +53,7 @@ entry:
; CHECK: bl llrintl
define signext i32 @testmswl(ppc_fp128 %x) {
entry:
- %0 = tail call i64 @llvm.llrint.ppcf128(ppc_fp128 %x)
+ %0 = tail call i64 @llvm.llrint.i64.ppcf128(ppc_fp128 %x)
%conv = trunc i64 %0 to i32
ret i32 %conv
}
@@ -47,10 +62,27 @@ entry:
; CHECK: bl llrintl
define i64 @testmsll(ppc_fp128 %x) {
entry:
- %0 = tail call i64 @llvm.llrint.ppcf128(ppc_fp128 %x)
+ %0 = tail call i64 @llvm.llrint.i64.ppcf128(ppc_fp128 %x)
+ ret i64 %0
+}
+
+; CHECK-LABEL: testmswq:
+; CHECK: bl llrintf128
+define signext i32 @testmswq(fp128 %x) {
+entry:
+ %0 = tail call i64 @llvm.llrint.i64.f128(fp128 %x)
+ %conv = trunc i64 %0 to i32
+ ret i32 %conv
+}
+
+; CHECK-LABEL: testmslq:
+; CHECK: bl llrintf128
+define i64 @testmslq(fp128 %x) {
+entry:
+ %0 = tail call i64 @llvm.llrint.i64.f128(fp128 %x)
ret i64 %0
}
-declare i64 @llvm.llrint.f32(float) nounwind readnone
-declare i64 @llvm.llrint.f64(double) nounwind readnone
-declare i64 @llvm.llrint.ppcf128(ppc_fp128) nounwind readnone
+declare i64 @llvm.llrint.i64.f32(float) nounwind readnone
+declare i64 @llvm.llrint.i64.f64(double) nounwind readnone
+declare i64 @llvm.llrint.i64.ppcf128(ppc_fp128) nounwind readnone
diff --git a/llvm/test/CodeGen/PowerPC/lower-scalar-mass-afn.ll b/llvm/test/CodeGen/PowerPC/lower-scalar-mass-afn.ll
index 018fbe9..f3085cc 100644
--- a/llvm/test/CodeGen/PowerPC/lower-scalar-mass-afn.ll
+++ b/llvm/test/CodeGen/PowerPC/lower-scalar-mass-afn.ll
@@ -81,7 +81,7 @@ declare double @__log10_finite (double);
declare double @__pow_finite (double, double);
declare double @__sinh_finite (double);
-define float @acosf_f32(float %a) #0 {
+define float @acosf_f32(float %a) {
; CHECK-LABEL: acosf_f32
; CHECK: __xl_acosf
; CHECK: blr
@@ -90,7 +90,7 @@ entry:
ret float %call
}
-define float @acoshf_f32(float %a) #0 {
+define float @acoshf_f32(float %a) {
; CHECK-LABEL: acoshf_f32
; CHECK: __xl_acoshf
; CHECK: blr
@@ -99,7 +99,7 @@ entry:
ret float %call
}
-define float @asinf_f32(float %a) #0 {
+define float @asinf_f32(float %a) {
; CHECK-LABEL: asinf_f32
; CHECK: __xl_asinf
; CHECK: blr
@@ -108,7 +108,7 @@ entry:
ret float %call
}
-define float @asinhf_f32(float %a) #0 {
+define float @asinhf_f32(float %a) {
; CHECK-LABEL: asinhf_f32
; CHECK: __xl_asinhf
; CHECK: blr
@@ -117,7 +117,7 @@ entry:
ret float %call
}
-define float @atan2f_f32(float %a, float %b) #0 {
+define float @atan2f_f32(float %a, float %b) {
; CHECK-LABEL: atan2f_f32
; CHECK: __xl_atan2f
; CHECK: blr
@@ -126,7 +126,7 @@ entry:
ret float %call
}
-define float @atanf_f32(float %a) #0 {
+define float @atanf_f32(float %a) {
; CHECK-LABEL: atanf_f32
; CHECK: __xl_atanf
; CHECK: blr
@@ -135,7 +135,7 @@ entry:
ret float %call
}
-define float @atanhf_f32(float %a) #0 {
+define float @atanhf_f32(float %a) {
; CHECK-LABEL: atanhf_f32
; CHECK: __xl_atanhf
; CHECK: blr
@@ -144,7 +144,7 @@ entry:
ret float %call
}
-define float @cbrtf_f32(float %a) #0 {
+define float @cbrtf_f32(float %a) {
; CHECK-LABEL: cbrtf_f32
; CHECK: __xl_cbrtf
; CHECK: blr
@@ -153,7 +153,7 @@ entry:
ret float %call
}
-define float @copysignf_f32(float %a, float %b) #0 {
+define float @copysignf_f32(float %a, float %b) {
; CHECK-LABEL: copysignf_f32
; CHECK: copysignf
; CHECK: blr
@@ -162,7 +162,7 @@ entry:
ret float %call
}
-define float @cosf_f32(float %a) #0 {
+define float @cosf_f32(float %a) {
; CHECK-LABEL: cosf_f32
; CHECK: __xl_cosf
; CHECK: blr
@@ -171,7 +171,7 @@ entry:
ret float %call
}
-define float @coshf_f32(float %a) #0 {
+define float @coshf_f32(float %a) {
; CHECK-LABEL: coshf_f32
; CHECK: __xl_coshf
; CHECK: blr
@@ -180,7 +180,7 @@ entry:
ret float %call
}
-define float @erfcf_f32(float %a) #0 {
+define float @erfcf_f32(float %a) {
; CHECK-LABEL: erfcf_f32
; CHECK: __xl_erfcf
; CHECK: blr
@@ -189,7 +189,7 @@ entry:
ret float %call
}
-define float @erff_f32(float %a) #0 {
+define float @erff_f32(float %a) {
; CHECK-LABEL: erff_f32
; CHECK: __xl_erff
; CHECK: blr
@@ -198,7 +198,7 @@ entry:
ret float %call
}
-define float @expf_f32(float %a) #0 {
+define float @expf_f32(float %a) {
; CHECK-LABEL: expf_f32
; CHECK: __xl_expf
; CHECK: blr
@@ -207,7 +207,7 @@ entry:
ret float %call
}
-define float @expm1f_f32(float %a) #0 {
+define float @expm1f_f32(float %a) {
; CHECK-LABEL: expm1f_f32
; CHECK: __xl_expm1f
; CHECK: blr
@@ -216,7 +216,7 @@ entry:
ret float %call
}
-define float @hypotf_f32(float %a, float %b) #0 {
+define float @hypotf_f32(float %a, float %b) {
; CHECK-LABEL: hypotf_f32
; CHECK: __xl_hypotf
; CHECK: blr
@@ -225,7 +225,7 @@ entry:
ret float %call
}
-define float @lgammaf_f32(float %a) #0 {
+define float @lgammaf_f32(float %a) {
; CHECK-LABEL: lgammaf_f32
; CHECK: __xl_lgammaf
; CHECK: blr
@@ -234,7 +234,7 @@ entry:
ret float %call
}
-define float @log10f_f32(float %a) #0 {
+define float @log10f_f32(float %a) {
; CHECK-LABEL: log10f_f32
; CHECK: __xl_log10f
; CHECK: blr
@@ -243,7 +243,7 @@ entry:
ret float %call
}
-define float @log1pf_f32(float %a) #0 {
+define float @log1pf_f32(float %a) {
; CHECK-LABEL: log1pf_f32
; CHECK: __xl_log1pf
; CHECK: blr
@@ -252,7 +252,7 @@ entry:
ret float %call
}
-define float @logf_f32(float %a) #0 {
+define float @logf_f32(float %a) {
; CHECK-LABEL: logf_f32
; CHECK: __xl_logf
; CHECK: blr
@@ -261,7 +261,7 @@ entry:
ret float %call
}
-define float @powf_f32(float %a, float %b) #0 {
+define float @powf_f32(float %a, float %b) {
; CHECK-LABEL: powf_f32
; CHECK: __xl_powf
; CHECK: blr
@@ -270,7 +270,7 @@ entry:
ret float %call
}
-define float @rintf_f32(float %a) #0 {
+define float @rintf_f32(float %a) {
; CHECK-LABEL: rintf_f32
; CHECK-NOT: __xl_rintf
; CHECK: blr
@@ -279,7 +279,7 @@ entry:
ret float %call
}
-define float @sinf_f32(float %a) #0 {
+define float @sinf_f32(float %a) {
; CHECK-LABEL: sinf_f32
; CHECK: __xl_sinf
; CHECK: blr
@@ -288,7 +288,7 @@ entry:
ret float %call
}
-define float @sinhf_f32(float %a) #0 {
+define float @sinhf_f32(float %a) {
; CHECK-LABEL: sinhf_f32
; CHECK: __xl_sinhf
; CHECK: blr
@@ -297,7 +297,7 @@ entry:
ret float %call
}
-define float @tanf_f32(float %a) #0 {
+define float @tanf_f32(float %a) {
; CHECK-LABEL: tanf_f32
; CHECK: __xl_tanf
; CHECK: blr
@@ -306,7 +306,7 @@ entry:
ret float %call
}
-define float @tanhf_f32(float %a) #0 {
+define float @tanhf_f32(float %a) {
; CHECK-LABEL: tanhf_f32
; CHECK: __xl_tanhf
; CHECK: blr
@@ -315,7 +315,7 @@ entry:
ret float %call
}
-define double @acos_f64(double %a) #0 {
+define double @acos_f64(double %a) {
; CHECK-LABEL: acos_f64
; CHECK: __xl_acos
; CHECK: blr
@@ -324,7 +324,7 @@ entry:
ret double %call
}
-define double @acosh_f64(double %a) #0 {
+define double @acosh_f64(double %a) {
; CHECK-LABEL: acosh_f64
; CHECK: __xl_acosh
; CHECK: blr
@@ -333,7 +333,7 @@ entry:
ret double %call
}
-define double @anint_f64(double %a) #0 {
+define double @anint_f64(double %a) {
; CHECK-LABEL: anint_f64
; CHECK-NOT: __xl_anint
; CHECK: blr
@@ -342,7 +342,7 @@ entry:
ret double %call
}
-define double @asin_f64(double %a) #0 {
+define double @asin_f64(double %a) {
; CHECK-LABEL: asin_f64
; CHECK: __xl_asin
; CHECK: blr
@@ -351,7 +351,7 @@ entry:
ret double %call
}
-define double @asinh_f64(double %a) #0 {
+define double @asinh_f64(double %a) {
; CHECK-LABEL: asinh_f64
; CHECK: __xl_asinh
; CHECK: blr
@@ -360,7 +360,7 @@ entry:
ret double %call
}
-define double @atan_f64(double %a) #0 {
+define double @atan_f64(double %a) {
; CHECK-LABEL: atan_f64
; CHECK: __xl_atan
; CHECK: blr
@@ -369,7 +369,7 @@ entry:
ret double %call
}
-define double @atan2_f64(double %a, double %b) #0 {
+define double @atan2_f64(double %a, double %b) {
; CHECK-LABEL: atan2_f64
; CHECK: __xl_atan2
; CHECK: blr
@@ -378,7 +378,7 @@ entry:
ret double %call
}
-define double @atanh_f64(double %a) #0 {
+define double @atanh_f64(double %a) {
; CHECK-LABEL: atanh_f64
; CHECK: __xl_atanh
; CHECK: blr
@@ -387,7 +387,7 @@ entry:
ret double %call
}
-define double @cbrt_f64(double %a) #0 {
+define double @cbrt_f64(double %a) {
; CHECK-LABEL: cbrt_f64
; CHECK: __xl_cbrt
; CHECK: blr
@@ -396,7 +396,7 @@ entry:
ret double %call
}
-define double @copysign_f64(double %a, double %b) #0 {
+define double @copysign_f64(double %a, double %b) {
; CHECK-LABEL: copysign_f64
; CHECK: copysign
; CHECK: blr
@@ -405,7 +405,7 @@ entry:
ret double %call
}
-define double @cos_f64(double %a) #0 {
+define double @cos_f64(double %a) {
; CHECK-LABEL: cos_f64
; CHECK: __xl_cos
; CHECK: blr
@@ -414,7 +414,7 @@ entry:
ret double %call
}
-define double @cosh_f64(double %a) #0 {
+define double @cosh_f64(double %a) {
; CHECK-LABEL: cosh_f64
; CHECK: __xl_cosh
; CHECK: blr
@@ -423,7 +423,7 @@ entry:
ret double %call
}
-define double @cosisin_f64(double %a) #0 {
+define double @cosisin_f64(double %a) {
; CHECK-LABEL: cosisin_f64
; CHECK-NOT: __xl_cosisin
; CHECK: blr
@@ -432,7 +432,7 @@ entry:
ret double %call
}
-define double @dnint_f64(double %a) #0 {
+define double @dnint_f64(double %a) {
; CHECK-LABEL: dnint_f64
; CHECK-NOT: __xl_dnint
; CHECK: blr
@@ -441,7 +441,7 @@ entry:
ret double %call
}
-define double @erf_f64(double %a) #0 {
+define double @erf_f64(double %a) {
; CHECK-LABEL: erf_f64
; CHECK: __xl_erf
; CHECK: blr
@@ -450,7 +450,7 @@ entry:
ret double %call
}
-define double @erfc_f64(double %a) #0 {
+define double @erfc_f64(double %a) {
; CHECK-LABEL: erfc_f64
; CHECK: __xl_erfc
; CHECK: blr
@@ -459,7 +459,7 @@ entry:
ret double %call
}
-define double @exp_f64(double %a) #0 {
+define double @exp_f64(double %a) {
; CHECK-LABEL: exp_f64
; CHECK: __xl_exp
; CHECK: blr
@@ -468,7 +468,7 @@ entry:
ret double %call
}
-define double @expm1_f64(double %a) #0 {
+define double @expm1_f64(double %a) {
; CHECK-LABEL: expm1_f64
; CHECK: __xl_expm1
; CHECK: blr
@@ -477,7 +477,7 @@ entry:
ret double %call
}
-define double @hypot_f64(double %a, double %b) #0 {
+define double @hypot_f64(double %a, double %b) {
; CHECK-LABEL: hypot_f64
; CHECK: __xl_hypot
; CHECK: blr
@@ -486,7 +486,7 @@ entry:
ret double %call
}
-define double @lgamma_f64(double %a) #0 {
+define double @lgamma_f64(double %a) {
; CHECK-LABEL: lgamma_f64
; CHECK: __xl_lgamma
; CHECK: blr
@@ -495,7 +495,7 @@ entry:
ret double %call
}
-define double @log_f64(double %a) #0 {
+define double @log_f64(double %a) {
; CHECK-LABEL: log_f64
; CHECK: __xl_log
; CHECK: blr
@@ -504,7 +504,7 @@ entry:
ret double %call
}
-define double @log10_f64(double %a) #0 {
+define double @log10_f64(double %a) {
; CHECK-LABEL: log10_f64
; CHECK: __xl_log10
; CHECK: blr
@@ -513,7 +513,7 @@ entry:
ret double %call
}
-define double @log1p_f64(double %a) #0 {
+define double @log1p_f64(double %a) {
; CHECK-LABEL: log1p_f64
; CHECK: __xl_log1p
; CHECK: blr
@@ -522,7 +522,7 @@ entry:
ret double %call
}
-define double @pow_f64(double %a, double %b) #0 {
+define double @pow_f64(double %a, double %b) {
; CHECK-LABEL: pow_f64
; CHECK: __xl_pow
; CHECK: blr
@@ -531,7 +531,7 @@ entry:
ret double %call
}
-define double @rsqrt_f64(double %a) #0 {
+define double @rsqrt_f64(double %a) {
; CHECK-LABEL: rsqrt_f64
; CHECK: __xl_rsqrt
; CHECK: blr
@@ -540,7 +540,7 @@ entry:
ret double %call
}
-define double @sin_f64(double %a) #0 {
+define double @sin_f64(double %a) {
; CHECK-LABEL: sin_f64
; CHECK: __xl_sin
; CHECK: blr
@@ -549,7 +549,7 @@ entry:
ret double %call
}
-define double @sincos_f64(double %a) #0 {
+define double @sincos_f64(double %a) {
; CHECK-LABEL: sincos_f64
; CHECK-NOT: __xl_sincos
; CHECK: blr
@@ -558,7 +558,7 @@ entry:
ret double %call
}
-define double @sinh_f64(double %a) #0 {
+define double @sinh_f64(double %a) {
; CHECK-LABEL: sinh_f64
; CHECK: __xl_sinh
; CHECK: blr
@@ -567,7 +567,7 @@ entry:
ret double %call
}
-define double @sqrt_f64(double %a) #0 {
+define double @sqrt_f64(double %a) {
; CHECK-LABEL: sqrt_f64
; CHECK: __xl_sqrt
; CHECK: blr
@@ -576,7 +576,7 @@ entry:
ret double %call
}
-define double @tan_f64(double %a) #0 {
+define double @tan_f64(double %a) {
; CHECK-LABEL: tan_f64
; CHECK: __xl_tan
; CHECK: blr
@@ -585,7 +585,7 @@ entry:
ret double %call
}
-define double @tanh_f64(double %a) #0 {
+define double @tanh_f64(double %a) {
; CHECK-LABEL: tanh_f64
; CHECK: __xl_tanh
; CHECK: blr
@@ -594,7 +594,7 @@ entry:
ret double %call
}
-define float @__acosf_finite_f32(float %a) #0 {
+define float @__acosf_finite_f32(float %a) {
; CHECK-LABEL: __acosf_finite_f32
; CHECK: __xl_acosf
; CHECK: blr
@@ -603,7 +603,7 @@ entry:
ret float %call
}
-define float @__acoshf_finite_f32(float %a) #0 {
+define float @__acoshf_finite_f32(float %a) {
; CHECK-LABEL: __acoshf_finite_f32
; CHECK: __xl_acoshf
; CHECK: blr
@@ -612,7 +612,7 @@ entry:
ret float %call
}
-define float @__asinf_finite_f32(float %a) #0 {
+define float @__asinf_finite_f32(float %a) {
; CHECK-LABEL: __asinf_finite_f32
; CHECK: __xl_asinf
; CHECK: blr
@@ -621,7 +621,7 @@ entry:
ret float %call
}
-define float @__atan2f_finite_f32(float %a, float %b) #0 {
+define float @__atan2f_finite_f32(float %a, float %b) {
; CHECK-LABEL: __atan2f_finite_f32
; CHECK: __xl_atan2f
; CHECK: blr
@@ -630,7 +630,7 @@ entry:
ret float %call
}
-define float @__atanhf_finite_f32(float %a) #0 {
+define float @__atanhf_finite_f32(float %a) {
; CHECK-LABEL: __atanhf_finite_f32
; CHECK: __xl_atanhf
; CHECK: blr
@@ -639,7 +639,7 @@ entry:
ret float %call
}
-define float @__coshf_finite_f32(float %a) #0 {
+define float @__coshf_finite_f32(float %a) {
; CHECK-LABEL: __coshf_finite_f32
; CHECK: __xl_coshf
; CHECK: blr
@@ -647,7 +647,7 @@ entry:
%call = tail call afn float @__coshf_finite(float %a)
ret float %call
}
-define float @__expf_finite_f32(float %a) #0 {
+define float @__expf_finite_f32(float %a) {
; CHECK-LABEL: __expf_finite_f32
; CHECK: __xl_expf
; CHECK: blr
@@ -655,7 +655,7 @@ entry:
%call = tail call afn float @__expf_finite(float %a)
ret float %call
}
-define float @__logf_finite_f32(float %a) #0 {
+define float @__logf_finite_f32(float %a) {
; CHECK-LABEL: __logf_finite_f32
; CHECK: __xl_logf
; CHECK: blr
@@ -663,7 +663,7 @@ entry:
%call = tail call afn float @__logf_finite(float %a)
ret float %call
}
-define float @__log10f_finite_f32(float %a) #0 {
+define float @__log10f_finite_f32(float %a) {
; CHECK-LABEL: __log10f_finite_f32
; CHECK: __xl_log10f
; CHECK: blr
@@ -671,7 +671,7 @@ entry:
%call = tail call afn float @__log10f_finite(float %a)
ret float %call
}
-define float @__powf_finite_f32(float %a, float %b) #0 {
+define float @__powf_finite_f32(float %a, float %b) {
; CHECK-LABEL: __powf_finite_f32
; CHECK: __xl_powf
; CHECK: blr
@@ -679,7 +679,7 @@ entry:
%call = tail call afn float @__powf_finite(float %a, float %b)
ret float %call
}
-define float @__sinhf_finite_f32(float %a) #0 {
+define float @__sinhf_finite_f32(float %a) {
; CHECK-LABEL: __sinhf_finite_f32
; CHECK: __xl_sinhf
; CHECK: blr
@@ -688,7 +688,7 @@ entry:
ret float %call
}
-define double @__acos_finite_f64(double %a) #0 {
+define double @__acos_finite_f64(double %a) {
; CHECK-LABEL: __acos_finite_f64
; CHECK: __xl_acos
; CHECK: blr
@@ -697,7 +697,7 @@ entry:
ret double %call
}
-define double @__acosh_finite_f64(double %a) #0 {
+define double @__acosh_finite_f64(double %a) {
; CHECK-LABEL: __acosh_finite_f64
; CHECK: __xl_acosh
; CHECK: blr
@@ -706,7 +706,7 @@ entry:
ret double %call
}
-define double @__asin_finite_f64(double %a) #0 {
+define double @__asin_finite_f64(double %a) {
; CHECK-LABEL: __asin_finite_f64
; CHECK: __xl_asin
; CHECK: blr
@@ -715,7 +715,7 @@ entry:
ret double %call
}
-define double @__atan2_finite_f64(double %a, double %b) #0 {
+define double @__atan2_finite_f64(double %a, double %b) {
; CHECK-LABEL: __atan2_finite_f64
; CHECK: __xl_atan2
; CHECK: blr
@@ -724,7 +724,7 @@ entry:
ret double %call
}
-define double @__atanh_finite_f64(double %a) #0 {
+define double @__atanh_finite_f64(double %a) {
; CHECK-LABEL: __atanh_finite_f64
; CHECK: __xl_atanh
; CHECK: blr
@@ -733,7 +733,7 @@ entry:
ret double %call
}
-define double @__cosh_finite_f64(double %a) #0 {
+define double @__cosh_finite_f64(double %a) {
; CHECK-LABEL: __cosh_finite_f64
; CHECK: __xl_cosh
; CHECK: blr
@@ -742,7 +742,7 @@ entry:
ret double %call
}
-define double @__exp_finite_f64(double %a) #0 {
+define double @__exp_finite_f64(double %a) {
; CHECK-LABEL: __exp_finite_f64
; CHECK: __xl_exp
; CHECK: blr
@@ -751,7 +751,7 @@ entry:
ret double %call
}
-define double @__log_finite_f64(double %a) #0 {
+define double @__log_finite_f64(double %a) {
; CHECK-LABEL: __log_finite_f64
; CHECK: __xl_log
; CHECK: blr
@@ -760,7 +760,7 @@ entry:
ret double %call
}
-define double @__log10_finite_f64(double %a) #0 {
+define double @__log10_finite_f64(double %a) {
; CHECK-LABEL: __log10_finite_f64
; CHECK: __xl_log10
; CHECK: blr
@@ -769,7 +769,7 @@ entry:
ret double %call
}
-define double @__pow_finite_f64(double %a, double %b) #0 {
+define double @__pow_finite_f64(double %a, double %b) {
; CHECK-LABEL: __pow_finite_f64
; CHECK: __xl_pow
; CHECK: blr
@@ -778,7 +778,7 @@ entry:
ret double %call
}
-define double @__sinh_finite_f64(double %a) #0 {
+define double @__sinh_finite_f64(double %a) {
; CHECK-LABEL: __sinh_finite_f64
; CHECK: __xl_sinh
; CHECK: blr
@@ -786,5 +786,3 @@ entry:
%call = tail call afn double @__sinh_finite(double %a)
ret double %call
}
-
-attributes #0 = { "approx-func-fp-math"="true" }
diff --git a/llvm/test/CodeGen/PowerPC/lrint-conv.ll b/llvm/test/CodeGen/PowerPC/lrint-conv.ll
index adfc994..bc77a20 100644
--- a/llvm/test/CodeGen/PowerPC/lrint-conv.ll
+++ b/llvm/test/CodeGen/PowerPC/lrint-conv.ll
@@ -1,4 +1,19 @@
; RUN: llc < %s -mtriple=powerpc64le | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc | FileCheck %s
+
+; FIXME: crash "Input type needs to be promoted!"
+; define signext i32 @testmswh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.lrint.i64.f16(half %x)
+; %conv = trunc i64 %0 to i32
+; ret i32 %conv
+; }
+
+; define i64 @testmsxh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.lrint.i64.f16(half %x)
+; ret i64 %0
+; }
; CHECK-LABEL: testmsws:
; CHECK: bl lrintf
@@ -51,6 +66,23 @@ entry:
ret i64 %0
}
+; CHECK-LABEL: testmswq:
+; CHECK: bl lrintf128
+define signext i32 @testmswq(fp128 %x) {
+entry:
+ %0 = tail call i64 @llvm.lrint.i64.f128(fp128 %x)
+ %conv = trunc i64 %0 to i32
+ ret i32 %conv
+}
+
+; CHECK-LABEL: testmslq:
+; CHECK: bl lrintf128
+define i64 @testmslq(fp128 %x) {
+entry:
+ %0 = tail call i64 @llvm.lrint.i64.f128(fp128 %x)
+ ret i64 %0
+}
+
declare i64 @llvm.lrint.i64.f32(float) nounwind readnone
declare i64 @llvm.lrint.i64.f64(double) nounwind readnone
declare i64 @llvm.lrint.i64.ppcf128(ppc_fp128) nounwind readnone
diff --git a/llvm/test/CodeGen/PowerPC/memintr32.ll b/llvm/test/CodeGen/PowerPC/milicode32.ll
index c07a5af..a2af6d4 100644
--- a/llvm/test/CodeGen/PowerPC/memintr32.ll
+++ b/llvm/test/CodeGen/PowerPC/milicode32.ll
@@ -11,7 +11,7 @@ define i32 @memcmp_test(ptr nocapture noundef readonly %ptr1, ptr nocapture noun
; CHECK-AIX-32-P9-NEXT: mflr r0
; CHECK-AIX-32-P9-NEXT: stwu r1, -64(r1)
; CHECK-AIX-32-P9-NEXT: stw r0, 72(r1)
-; CHECK-AIX-32-P9-NEXT: bl .memcmp[PR]
+; CHECK-AIX-32-P9-NEXT: bl .___memcmp[PR]
; CHECK-AIX-32-P9-NEXT: nop
; CHECK-AIX-32-P9-NEXT: addi r1, r1, 64
; CHECK-AIX-32-P9-NEXT: lwz r0, 8(r1)
@@ -35,5 +35,37 @@ entry:
declare i32 @memcmp(ptr noundef captures(none), ptr noundef captures(none), i32 noundef) nounwind
+define i32 @strlen_test(ptr noundef %str) nounwind {
+; CHECK-AIX-32-P9-LABEL: strlen_test:
+; CHECK-AIX-32-P9: # %bb.0: # %entry
+; CHECK-AIX-32-P9-NEXT: mflr r0
+; CHECK-AIX-32-P9-NEXT: stwu r1, -64(r1)
+; CHECK-AIX-32-P9-NEXT: stw r0, 72(r1)
+; CHECK-AIX-32-P9-NEXT: stw r3, 60(r1)
+; CHECK-AIX-32-P9-NEXT: bl .strlen[PR]
+; CHECK-AIX-32-P9-NEXT: nop
+; CHECK-AIX-32-P9-NEXT: addi r1, r1, 64
+; CHECK-AIX-32-P9-NEXT: lwz r0, 8(r1)
+; CHECK-AIX-32-P9-NEXT: mtlr r0
+; CHECK-AIX-32-P9-NEXT: blr
+;
+; CHECK-LINUX32-P9-LABEL: strlen_test:
+; CHECK-LINUX32-P9: # %bb.0: # %entry
+; CHECK-LINUX32-P9-NEXT: mflr r0
+; CHECK-LINUX32-P9-NEXT: stwu r1, -16(r1)
+; CHECK-LINUX32-P9-NEXT: stw r0, 20(r1)
+; CHECK-LINUX32-P9-NEXT: stw r3, 12(r1)
+; CHECK-LINUX32-P9-NEXT: bl strlen
+; CHECK-LINUX32-P9-NEXT: lwz r0, 20(r1)
+; CHECK-LINUX32-P9-NEXT: addi r1, r1, 16
+; CHECK-LINUX32-P9-NEXT: mtlr r0
+; CHECK-LINUX32-P9-NEXT: blr
+entry:
+ %str.addr = alloca ptr, align 4
+ store ptr %str, ptr %str.addr, align 4
+ %0 = load ptr, ptr %str.addr, align 4
+ %call = call i32 @strlen(ptr noundef %0)
+ ret i32 %call
+}
-
+declare i32 @strlen(ptr noundef) nounwind
diff --git a/llvm/test/CodeGen/PowerPC/memintr64.ll b/llvm/test/CodeGen/PowerPC/milicode64.ll
index b3a6650..0f0585d9 100644
--- a/llvm/test/CodeGen/PowerPC/memintr64.ll
+++ b/llvm/test/CodeGen/PowerPC/milicode64.ll
@@ -39,7 +39,7 @@ define noundef i32 @_Z11memcmp_testPKvS0_m(ptr noundef readonly captures(none) %
; CHECK-AIX-64-P9-NEXT: mflr r0
; CHECK-AIX-64-P9-NEXT: stdu r1, -112(r1)
; CHECK-AIX-64-P9-NEXT: std r0, 128(r1)
-; CHECK-AIX-64-P9-NEXT: bl .memcmp[PR]
+; CHECK-AIX-64-P9-NEXT: bl .___memcmp64[PR]
; CHECK-AIX-64-P9-NEXT: nop
; CHECK-AIX-64-P9-NEXT: addi r1, r1, 112
; CHECK-AIX-64-P9-NEXT: ld r0, 16(r1)
@@ -52,4 +52,51 @@ entry:
declare i32 @memcmp(ptr noundef captures(none), ptr noundef captures(none), i64 noundef) nounwind
+define i64 @strlen_test(ptr noundef %str) nounwind {
+; CHECK-LE-P9-LABEL: strlen_test:
+; CHECK-LE-P9: # %bb.0: # %entry
+; CHECK-LE-P9-NEXT: mflr r0
+; CHECK-LE-P9-NEXT: stdu r1, -48(r1)
+; CHECK-LE-P9-NEXT: std r0, 64(r1)
+; CHECK-LE-P9-NEXT: std r3, 40(r1)
+; CHECK-LE-P9-NEXT: bl strlen
+; CHECK-LE-P9-NEXT: nop
+; CHECK-LE-P9-NEXT: addi r1, r1, 48
+; CHECK-LE-P9-NEXT: ld r0, 16(r1)
+; CHECK-LE-P9-NEXT: mtlr r0
+; CHECK-LE-P9-NEXT: blr
+;
+; CHECK-BE-P9-LABEL: strlen_test:
+; CHECK-BE-P9: # %bb.0: # %entry
+; CHECK-BE-P9-NEXT: mflr r0
+; CHECK-BE-P9-NEXT: stdu r1, -128(r1)
+; CHECK-BE-P9-NEXT: std r0, 144(r1)
+; CHECK-BE-P9-NEXT: std r3, 120(r1)
+; CHECK-BE-P9-NEXT: bl strlen
+; CHECK-BE-P9-NEXT: nop
+; CHECK-BE-P9-NEXT: addi r1, r1, 128
+; CHECK-BE-P9-NEXT: ld r0, 16(r1)
+; CHECK-BE-P9-NEXT: mtlr r0
+; CHECK-BE-P9-NEXT: blr
+;
+; CHECK-AIX-64-P9-LABEL: strlen_test:
+; CHECK-AIX-64-P9: # %bb.0: # %entry
+; CHECK-AIX-64-P9-NEXT: mflr r0
+; CHECK-AIX-64-P9-NEXT: stdu r1, -128(r1)
+; CHECK-AIX-64-P9-NEXT: std r0, 144(r1)
+; CHECK-AIX-64-P9-NEXT: std r3, 120(r1)
+; CHECK-AIX-64-P9-NEXT: bl .strlen[PR]
+; CHECK-AIX-64-P9-NEXT: nop
+; CHECK-AIX-64-P9-NEXT: addi r1, r1, 128
+; CHECK-AIX-64-P9-NEXT: ld r0, 16(r1)
+; CHECK-AIX-64-P9-NEXT: mtlr r0
+; CHECK-AIX-64-P9-NEXT: blr
+entry:
+ %str.addr = alloca ptr, align 8
+ store ptr %str, ptr %str.addr, align 8
+ %0 = load ptr, ptr %str.addr, align 8
+ %call = call i64 @strlen(ptr noundef %0)
+ ret i64 %call
+}
+declare i64 @strlen(ptr noundef) nounwind
diff --git a/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll b/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll
index 232014d..a9503f7 100644
--- a/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll
+++ b/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll
@@ -2,22 +2,87 @@
; Verify whether the generated assembly for the following function includes the mtvsrbmi instruction.
; vector unsigned char v00FF()
; {
-; vector unsigned char x = { 0xFF, 0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 };
-; return x;
+; vector unsigned char x = { 0xFF, 0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 };
+; return x;
+; }
+; vector unsigned short short00FF()
+; {
+; vector unsigned short x = { 0xFF, 0,0,0, 0,0,0,0};
+; return x;
+; }
+; vector unsigned int int00FF()
+; {
+; vector unsigned int x = { 0xFF, 0,0,0};
+; return x;
+; }
+; vector unsigned long long longlong00FF()
+; {
+; vector unsigned long long x = { 0xFF, 0};
+; return x;
; }
; RUN: llc < %s -ppc-asm-full-reg-names -mtriple=powerpc-ibm-aix -mcpu=pwr10 -verify-machineinstrs \
-; RUN: | FileCheck %s --check-prefix=CHECK
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-BE
+
+; RUN: llc < %s -ppc-asm-full-reg-names -mtriple=powerpc64le-unknown-gnu-linux -mcpu=pwr10 -verify-machineinstrs \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-LE
+
+; CHECK-NOT: .byte 255
+; CHECK-NOT: .byte 0
define dso_local noundef range(i8 -1, 1) <16 x i8> @_Z5v00FFv() {
-; CHECK-NOT: L..CPI0_0:
-; CHECK-NOT: .byte 255 # 0xff
-; CHECK-NOT: .byte 0 # 0x0
-
-; CHECK-LABEL: _Z5v00FFv:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: mtvsrbmi v2, 1
-; CHECK-NEXT: blr
+; CHECK-BE-LABEL: _Z5v00FFv:
+; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: mtvsrbmi v2, 32768
+; CHECK-BE-NEXT: blr
+;
+; CHECK-LE-LABEL: _Z5v00FFv:
+; CHECK-LE: # %bb.0: # %entry
+; CHECK-LE-NEXT: mtvsrbmi v2, 1
+; CHECK-LE-NEXT: blr
+
entry:
ret <16 x i8> <i8 -1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
}
+
+define dso_local noundef range(i16 0, 256) <8 x i16> @_Z9short00FFv() {
+; CHECK-BE-LABEL: _Z9short00FFv:
+; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: mtvsrbmi v2, 16384
+; CHECK-BE-NEXT: blr
+;
+; CHECK-LE-LABEL: _Z9short00FFv:
+; CHECK-LE: # %bb.0: # %entry
+; CHECK-LE-NEXT: mtvsrbmi v2, 1
+; CHECK-LE-NEXT: blr
+entry:
+ ret <8 x i16> <i16 255, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
+}
+
+define dso_local noundef range(i32 0, 256) <4 x i32> @_Z7int00FFv() {
+; CHECK-BE-LABEL: _Z7int00FFv:
+; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: mtvsrbmi v2, 4096
+; CHECK-BE-NEXT: blr
+;
+; CHECK-LE-LABEL: _Z7int00FFv:
+; CHECK-LE: # %bb.0: # %entry
+; CHECK-LE-NEXT: mtvsrbmi v2, 1
+; CHECK-LE-NEXT: blr
+entry:
+ ret <4 x i32> <i32 255, i32 0, i32 0, i32 0>
+}
+
+define dso_local noundef range(i64 0, 256) <2 x i64> @_Z12longlong00FFv() {
+; CHECK-BE-LABEL: _Z12longlong00FFv:
+; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: mtvsrbmi v2, 256
+; CHECK-BE-NEXT: blr
+;
+; CHECK-LE-LABEL: _Z12longlong00FFv:
+; CHECK-LE: # %bb.0: # %entry
+; CHECK-LE-NEXT: mtvsrbmi v2, 1
+; CHECK-LE-NEXT: blr
+entry:
+ ret <2 x i64> <i64 255, i64 0>
+}
diff --git a/llvm/test/CodeGen/PowerPC/nofpclass.ll b/llvm/test/CodeGen/PowerPC/nofpclass.ll
new file mode 100644
index 0000000..b08e810
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/nofpclass.ll
@@ -0,0 +1,15 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-ibm-aix-xcoff < %s | FileCheck %s
+
+; TODO: Update this test after adding the proper expansion of nofpclass for
+; ppc_fp128 to test with more masks and to demonstrate preserving nofpclass
+; after legalization.
+
+define ppc_fp128 @f(ppc_fp128 nofpclass(nan) %s) {
+; CHECK-LABEL: f:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: blr
+entry:
+ ret ppc_fp128 %s
+}
diff --git a/llvm/test/CodeGen/PowerPC/p10-spill-crlt.ll b/llvm/test/CodeGen/PowerPC/p10-spill-crlt.ll
index c733a01..4b03278 100644
--- a/llvm/test/CodeGen/PowerPC/p10-spill-crlt.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-spill-crlt.ll
@@ -30,16 +30,14 @@ define dso_local void @P10_Spill_CR_LT() local_unnamed_addr {
; CHECK-NEXT: mflr r0
; CHECK-NEXT: std r0, 16(r1)
; CHECK-NEXT: stw r12, 8(r1)
-; CHECK-NEXT: stdu r1, -64(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NEXT: stdu r1, -48(r1)
+; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset r29, -24
; CHECK-NEXT: .cfi_offset r30, -16
; CHECK-NEXT: .cfi_offset cr2, 8
; CHECK-NEXT: .cfi_offset cr3, 8
; CHECK-NEXT: .cfi_offset cr4, 8
-; CHECK-NEXT: std r29, 40(r1) # 8-byte Folded Spill
-; CHECK-NEXT: std r30, 48(r1) # 8-byte Folded Spill
+; CHECK-NEXT: std r30, 32(r1) # 8-byte Folded Spill
; CHECK-NEXT: bl call_2@notoc
; CHECK-NEXT: bc 12, 4*cr5+lt, .LBB0_13
; CHECK-NEXT: # %bb.1: # %bb
@@ -67,11 +65,10 @@ define dso_local void @P10_Spill_CR_LT() local_unnamed_addr {
; CHECK-NEXT: bc 12, 4*cr3+eq, .LBB0_11
; CHECK-NEXT: # %bb.6: # %bb32
; CHECK-NEXT: #
+; CHECK-NEXT: rlwinm r30, r30, 0, 24, 22
; CHECK-NEXT: andi. r3, r30, 2
-; CHECK-NEXT: rlwinm r29, r30, 0, 24, 22
; CHECK-NEXT: mcrf cr2, cr0
; CHECK-NEXT: bl call_4@notoc
-; CHECK-NEXT: mr r30, r29
; CHECK-NEXT: beq+ cr2, .LBB0_3
; CHECK-NEXT: # %bb.7: # %bb37
; CHECK-NEXT: .LBB0_8: # %bb22
@@ -92,13 +89,11 @@ define dso_local void @P10_Spill_CR_LT() local_unnamed_addr {
; CHECK-BE-NEXT: stdu r1, -144(r1)
; CHECK-BE-NEXT: .cfi_def_cfa_offset 144
; CHECK-BE-NEXT: .cfi_offset lr, 16
-; CHECK-BE-NEXT: .cfi_offset r28, -32
; CHECK-BE-NEXT: .cfi_offset r29, -24
; CHECK-BE-NEXT: .cfi_offset r30, -16
; CHECK-BE-NEXT: .cfi_offset cr2, 8
; CHECK-BE-NEXT: .cfi_offset cr2, 8
; CHECK-BE-NEXT: .cfi_offset cr2, 8
-; CHECK-BE-NEXT: std r28, 112(r1) # 8-byte Folded Spill
; CHECK-BE-NEXT: std r29, 120(r1) # 8-byte Folded Spill
; CHECK-BE-NEXT: std r30, 128(r1) # 8-byte Folded Spill
; CHECK-BE-NEXT: bl call_2
@@ -131,12 +126,11 @@ define dso_local void @P10_Spill_CR_LT() local_unnamed_addr {
; CHECK-BE-NEXT: bc 12, 4*cr3+eq, .LBB0_11
; CHECK-BE-NEXT: # %bb.6: # %bb32
; CHECK-BE-NEXT: #
+; CHECK-BE-NEXT: rlwinm r29, r29, 0, 24, 22
; CHECK-BE-NEXT: andi. r3, r29, 2
-; CHECK-BE-NEXT: rlwinm r28, r29, 0, 24, 22
; CHECK-BE-NEXT: mcrf cr2, cr0
; CHECK-BE-NEXT: bl call_4
; CHECK-BE-NEXT: nop
-; CHECK-BE-NEXT: mr r29, r28
; CHECK-BE-NEXT: beq+ cr2, .LBB0_3
; CHECK-BE-NEXT: # %bb.7: # %bb37
; CHECK-BE-NEXT: .LBB0_8: # %bb22
diff --git a/llvm/test/CodeGen/PowerPC/peephole-phi-acc.mir b/llvm/test/CodeGen/PowerPC/peephole-phi-acc.mir
index f615fcf..4333473 100644
--- a/llvm/test/CodeGen/PowerPC/peephole-phi-acc.mir
+++ b/llvm/test/CodeGen/PowerPC/peephole-phi-acc.mir
@@ -223,8 +223,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
callSites: []
@@ -337,8 +337,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
callSites: []
@@ -485,8 +485,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
callSites: []
@@ -696,8 +696,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
callSites: []
diff --git a/llvm/test/CodeGen/PowerPC/peephole-replaceInstr-after-eliminate-extsw.mir b/llvm/test/CodeGen/PowerPC/peephole-replaceInstr-after-eliminate-extsw.mir
index 1d898a4..ceecdc5 100644
--- a/llvm/test/CodeGen/PowerPC/peephole-replaceInstr-after-eliminate-extsw.mir
+++ b/llvm/test/CodeGen/PowerPC/peephole-replaceInstr-after-eliminate-extsw.mir
@@ -447,8 +447,8 @@ frameInfo:
hasMustTailInVarArgFunc: false
hasTailCall: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
entry_values: []
diff --git a/llvm/test/CodeGen/PowerPC/phi-eliminate.mir b/llvm/test/CodeGen/PowerPC/phi-eliminate.mir
index 72f7782..17060a8 100644
--- a/llvm/test/CodeGen/PowerPC/phi-eliminate.mir
+++ b/llvm/test/CodeGen/PowerPC/phi-eliminate.mir
@@ -122,8 +122,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
callSites: []
diff --git a/llvm/test/CodeGen/PowerPC/pow-025-075-intrinsic-scalar-mass-fast.ll b/llvm/test/CodeGen/PowerPC/pow-025-075-intrinsic-scalar-mass-fast.ll
index 58e228a..ff8c7ff 100644
--- a/llvm/test/CodeGen/PowerPC/pow-025-075-intrinsic-scalar-mass-fast.ll
+++ b/llvm/test/CodeGen/PowerPC/pow-025-075-intrinsic-scalar-mass-fast.ll
@@ -309,4 +309,4 @@ entry:
%call = tail call nnan ninf afn nsz double @llvm.pow.f64(double %a, double 5.000000e-01)
ret double %call
}
-attributes #1 = { "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "approx-func-fp-math"="true" }
+attributes #1 = { "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" }
diff --git a/llvm/test/CodeGen/PowerPC/pow-025-075-nointrinsic-scalar-mass-fast.ll b/llvm/test/CodeGen/PowerPC/pow-025-075-nointrinsic-scalar-mass-fast.ll
index c43bccc..3e0cdb0 100644
--- a/llvm/test/CodeGen/PowerPC/pow-025-075-nointrinsic-scalar-mass-fast.ll
+++ b/llvm/test/CodeGen/PowerPC/pow-025-075-nointrinsic-scalar-mass-fast.ll
@@ -453,4 +453,4 @@ entry:
ret double %call
}
-attributes #1 = { "approx-func-fp-math"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" }
+attributes #1 = { "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" }
diff --git a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
index b540948..eaab932 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
@@ -190,1000 +190,25 @@ entry:
ret <8 x i16> %6
}
-; FIXME: This does not produce ISD::ABS. This does not even vectorize correctly!
-; This function should look like sub_absv_32 and sub_absv_16 except that the type is v16i8.
-; Function Attrs: norecurse nounwind readnone
define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr {
-; CHECK-PWR9-LE-LABEL: sub_absv_8_ext:
-; CHECK-PWR9-LE: # %bb.0: # %entry
-; CHECK-PWR9-LE-NEXT: li r3, 0
-; CHECK-PWR9-LE-NEXT: li r5, 2
-; CHECK-PWR9-LE-NEXT: li r4, 1
-; CHECK-PWR9-LE-NEXT: std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-PWR9-LE-NEXT: vextubrx r6, r3, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r3, r3, v3
-; CHECK-PWR9-LE-NEXT: vextubrx r8, r5, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r5, r5, v3
-; CHECK-PWR9-LE-NEXT: std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-PWR9-LE-NEXT: std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-PWR9-LE-NEXT: std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-PWR9-LE-NEXT: std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-PWR9-LE-NEXT: std r25, -56(r1) # 8-byte Folded Spill
-; CHECK-PWR9-LE-NEXT: clrlwi r6, r6, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r3, r3, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r8, r8, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r5, r5, 24
-; CHECK-PWR9-LE-NEXT: vextubrx r7, r4, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r4, r4, v3
-; CHECK-PWR9-LE-NEXT: sub r3, r6, r3
-; CHECK-PWR9-LE-NEXT: sub r6, r8, r5
-; CHECK-PWR9-LE-NEXT: clrlwi r7, r7, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r4, r4, 24
-; CHECK-PWR9-LE-NEXT: sub r4, r7, r4
-; CHECK-PWR9-LE-NEXT: srawi r5, r3, 31
-; CHECK-PWR9-LE-NEXT: srawi r7, r4, 31
-; CHECK-PWR9-LE-NEXT: xor r3, r3, r5
-; CHECK-PWR9-LE-NEXT: xor r4, r4, r7
-; CHECK-PWR9-LE-NEXT: sub r5, r3, r5
-; CHECK-PWR9-LE-NEXT: srawi r3, r6, 31
-; CHECK-PWR9-LE-NEXT: sub r4, r4, r7
-; CHECK-PWR9-LE-NEXT: xor r6, r6, r3
-; CHECK-PWR9-LE-NEXT: sub r3, r6, r3
-; CHECK-PWR9-LE-NEXT: li r6, 3
-; CHECK-PWR9-LE-NEXT: vextubrx r7, r6, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r6, r6, v3
-; CHECK-PWR9-LE-NEXT: clrlwi r7, r7, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r6, r6, 24
-; CHECK-PWR9-LE-NEXT: sub r6, r7, r6
-; CHECK-PWR9-LE-NEXT: srawi r7, r6, 31
-; CHECK-PWR9-LE-NEXT: xor r6, r6, r7
-; CHECK-PWR9-LE-NEXT: sub r6, r6, r7
-; CHECK-PWR9-LE-NEXT: li r7, 4
-; CHECK-PWR9-LE-NEXT: vextubrx r8, r7, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r7, r7, v3
-; CHECK-PWR9-LE-NEXT: mtvsrd v4, r6
-; CHECK-PWR9-LE-NEXT: clrlwi r8, r8, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r7, r7, 24
-; CHECK-PWR9-LE-NEXT: sub r7, r8, r7
-; CHECK-PWR9-LE-NEXT: srawi r8, r7, 31
-; CHECK-PWR9-LE-NEXT: xor r7, r7, r8
-; CHECK-PWR9-LE-NEXT: sub r7, r7, r8
-; CHECK-PWR9-LE-NEXT: li r8, 5
-; CHECK-PWR9-LE-NEXT: vextubrx r9, r8, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r8, r8, v3
-; CHECK-PWR9-LE-NEXT: clrlwi r9, r9, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r8, r8, 24
-; CHECK-PWR9-LE-NEXT: sub r8, r9, r8
-; CHECK-PWR9-LE-NEXT: srawi r9, r8, 31
-; CHECK-PWR9-LE-NEXT: xor r8, r8, r9
-; CHECK-PWR9-LE-NEXT: sub r8, r8, r9
-; CHECK-PWR9-LE-NEXT: li r9, 6
-; CHECK-PWR9-LE-NEXT: vextubrx r10, r9, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r9, r9, v3
-; CHECK-PWR9-LE-NEXT: clrlwi r10, r10, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r9, r9, 24
-; CHECK-PWR9-LE-NEXT: sub r9, r10, r9
-; CHECK-PWR9-LE-NEXT: srawi r10, r9, 31
-; CHECK-PWR9-LE-NEXT: xor r9, r9, r10
-; CHECK-PWR9-LE-NEXT: sub r9, r9, r10
-; CHECK-PWR9-LE-NEXT: li r10, 7
-; CHECK-PWR9-LE-NEXT: vextubrx r11, r10, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r10, r10, v3
-; CHECK-PWR9-LE-NEXT: clrlwi r11, r11, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r10, r10, 24
-; CHECK-PWR9-LE-NEXT: sub r10, r11, r10
-; CHECK-PWR9-LE-NEXT: srawi r11, r10, 31
-; CHECK-PWR9-LE-NEXT: xor r10, r10, r11
-; CHECK-PWR9-LE-NEXT: sub r10, r10, r11
-; CHECK-PWR9-LE-NEXT: li r11, 8
-; CHECK-PWR9-LE-NEXT: vextubrx r12, r11, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r11, r11, v3
-; CHECK-PWR9-LE-NEXT: mtvsrd v5, r10
-; CHECK-PWR9-LE-NEXT: clrlwi r12, r12, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r11, r11, 24
-; CHECK-PWR9-LE-NEXT: sub r11, r12, r11
-; CHECK-PWR9-LE-NEXT: srawi r12, r11, 31
-; CHECK-PWR9-LE-NEXT: xor r11, r11, r12
-; CHECK-PWR9-LE-NEXT: sub r11, r11, r12
-; CHECK-PWR9-LE-NEXT: li r12, 9
-; CHECK-PWR9-LE-NEXT: vextubrx r0, r12, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r12, r12, v3
-; CHECK-PWR9-LE-NEXT: clrlwi r0, r0, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r12, r12, 24
-; CHECK-PWR9-LE-NEXT: sub r12, r0, r12
-; CHECK-PWR9-LE-NEXT: srawi r0, r12, 31
-; CHECK-PWR9-LE-NEXT: xor r12, r12, r0
-; CHECK-PWR9-LE-NEXT: sub r12, r12, r0
-; CHECK-PWR9-LE-NEXT: li r0, 10
-; CHECK-PWR9-LE-NEXT: vextubrx r30, r0, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r0, r0, v3
-; CHECK-PWR9-LE-NEXT: clrlwi r30, r30, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r0, r0, 24
-; CHECK-PWR9-LE-NEXT: sub r0, r30, r0
-; CHECK-PWR9-LE-NEXT: srawi r30, r0, 31
-; CHECK-PWR9-LE-NEXT: xor r0, r0, r30
-; CHECK-PWR9-LE-NEXT: sub r0, r0, r30
-; CHECK-PWR9-LE-NEXT: li r30, 11
-; CHECK-PWR9-LE-NEXT: vextubrx r29, r30, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r30, r30, v3
-; CHECK-PWR9-LE-NEXT: clrlwi r29, r29, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r30, r30, 24
-; CHECK-PWR9-LE-NEXT: sub r30, r29, r30
-; CHECK-PWR9-LE-NEXT: srawi r29, r30, 31
-; CHECK-PWR9-LE-NEXT: xor r30, r30, r29
-; CHECK-PWR9-LE-NEXT: sub r30, r30, r29
-; CHECK-PWR9-LE-NEXT: li r29, 12
-; CHECK-PWR9-LE-NEXT: vextubrx r28, r29, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r29, r29, v3
-; CHECK-PWR9-LE-NEXT: clrlwi r28, r28, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r29, r29, 24
-; CHECK-PWR9-LE-NEXT: sub r29, r28, r29
-; CHECK-PWR9-LE-NEXT: srawi r28, r29, 31
-; CHECK-PWR9-LE-NEXT: xor r29, r29, r28
-; CHECK-PWR9-LE-NEXT: sub r29, r29, r28
-; CHECK-PWR9-LE-NEXT: li r28, 13
-; CHECK-PWR9-LE-NEXT: vextubrx r27, r28, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r28, r28, v3
-; CHECK-PWR9-LE-NEXT: clrlwi r27, r27, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r28, r28, 24
-; CHECK-PWR9-LE-NEXT: sub r28, r27, r28
-; CHECK-PWR9-LE-NEXT: srawi r27, r28, 31
-; CHECK-PWR9-LE-NEXT: xor r28, r28, r27
-; CHECK-PWR9-LE-NEXT: sub r28, r28, r27
-; CHECK-PWR9-LE-NEXT: li r27, 14
-; CHECK-PWR9-LE-NEXT: vextubrx r26, r27, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r27, r27, v3
-; CHECK-PWR9-LE-NEXT: clrlwi r26, r26, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r27, r27, 24
-; CHECK-PWR9-LE-NEXT: sub r27, r26, r27
-; CHECK-PWR9-LE-NEXT: srawi r26, r27, 31
-; CHECK-PWR9-LE-NEXT: xor r27, r27, r26
-; CHECK-PWR9-LE-NEXT: sub r27, r27, r26
-; CHECK-PWR9-LE-NEXT: li r26, 15
-; CHECK-PWR9-LE-NEXT: vextubrx r25, r26, v2
-; CHECK-PWR9-LE-NEXT: vextubrx r26, r26, v3
-; CHECK-PWR9-LE-NEXT: mtvsrd v2, r5
-; CHECK-PWR9-LE-NEXT: mtvsrd v3, r4
-; CHECK-PWR9-LE-NEXT: vmrghb v2, v3, v2
-; CHECK-PWR9-LE-NEXT: mtvsrd v3, r3
-; CHECK-PWR9-LE-NEXT: clrlwi r25, r25, 24
-; CHECK-PWR9-LE-NEXT: clrlwi r26, r26, 24
-; CHECK-PWR9-LE-NEXT: vmrghb v3, v4, v3
-; CHECK-PWR9-LE-NEXT: mtvsrd v4, r8
-; CHECK-PWR9-LE-NEXT: sub r26, r25, r26
-; CHECK-PWR9-LE-NEXT: vmrglh v2, v3, v2
-; CHECK-PWR9-LE-NEXT: mtvsrd v3, r7
-; CHECK-PWR9-LE-NEXT: srawi r25, r26, 31
-; CHECK-PWR9-LE-NEXT: vmrghb v3, v4, v3
-; CHECK-PWR9-LE-NEXT: mtvsrd v4, r9
-; CHECK-PWR9-LE-NEXT: xor r26, r26, r25
-; CHECK-PWR9-LE-NEXT: vmrghb v4, v5, v4
-; CHECK-PWR9-LE-NEXT: sub r26, r26, r25
-; CHECK-PWR9-LE-NEXT: ld r25, -56(r1) # 8-byte Folded Reload
-; CHECK-PWR9-LE-NEXT: mtvsrd v5, r26
-; CHECK-PWR9-LE-NEXT: ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-PWR9-LE-NEXT: vmrglh v3, v4, v3
-; CHECK-PWR9-LE-NEXT: mtvsrd v4, r30
-; CHECK-PWR9-LE-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-PWR9-LE-NEXT: xxmrglw vs0, v3, v2
-; CHECK-PWR9-LE-NEXT: mtvsrd v2, r11
-; CHECK-PWR9-LE-NEXT: mtvsrd v3, r12
-; CHECK-PWR9-LE-NEXT: vmrghb v2, v3, v2
-; CHECK-PWR9-LE-NEXT: mtvsrd v3, r0
-; CHECK-PWR9-LE-NEXT: vmrghb v3, v4, v3
-; CHECK-PWR9-LE-NEXT: mtvsrd v4, r28
-; CHECK-PWR9-LE-NEXT: ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-PWR9-LE-NEXT: vmrglh v2, v3, v2
-; CHECK-PWR9-LE-NEXT: mtvsrd v3, r29
-; CHECK-PWR9-LE-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-PWR9-LE-NEXT: vmrghb v3, v4, v3
-; CHECK-PWR9-LE-NEXT: mtvsrd v4, r27
-; CHECK-PWR9-LE-NEXT: ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-PWR9-LE-NEXT: vmrghb v4, v5, v4
-; CHECK-PWR9-LE-NEXT: vmrglh v3, v4, v3
-; CHECK-PWR9-LE-NEXT: xxmrglw vs1, v3, v2
-; CHECK-PWR9-LE-NEXT: xxmrgld v2, vs1, vs0
-; CHECK-PWR9-LE-NEXT: blr
-;
-; CHECK-PWR9-BE-LABEL: sub_absv_8_ext:
-; CHECK-PWR9-BE: # %bb.0: # %entry
-; CHECK-PWR9-BE-NEXT: li r3, 0
-; CHECK-PWR9-BE-NEXT: li r4, 1
-; CHECK-PWR9-BE-NEXT: li r5, 2
-; CHECK-PWR9-BE-NEXT: std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-PWR9-BE-NEXT: vextublx r6, r3, v2
-; CHECK-PWR9-BE-NEXT: vextublx r3, r3, v3
-; CHECK-PWR9-BE-NEXT: vextublx r7, r4, v2
-; CHECK-PWR9-BE-NEXT: vextublx r4, r4, v3
-; CHECK-PWR9-BE-NEXT: std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-PWR9-BE-NEXT: std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-PWR9-BE-NEXT: std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-PWR9-BE-NEXT: std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-PWR9-BE-NEXT: std r25, -56(r1) # 8-byte Folded Spill
-; CHECK-PWR9-BE-NEXT: clrlwi r6, r6, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r3, r3, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r7, r7, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r4, r4, 24
-; CHECK-PWR9-BE-NEXT: vextublx r8, r5, v2
-; CHECK-PWR9-BE-NEXT: vextublx r5, r5, v3
-; CHECK-PWR9-BE-NEXT: sub r3, r6, r3
-; CHECK-PWR9-BE-NEXT: sub r4, r7, r4
-; CHECK-PWR9-BE-NEXT: clrlwi r8, r8, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r5, r5, 24
-; CHECK-PWR9-BE-NEXT: sub r5, r8, r5
-; CHECK-PWR9-BE-NEXT: srawi r6, r3, 31
-; CHECK-PWR9-BE-NEXT: srawi r7, r4, 31
-; CHECK-PWR9-BE-NEXT: srawi r8, r5, 31
-; CHECK-PWR9-BE-NEXT: xor r3, r3, r6
-; CHECK-PWR9-BE-NEXT: xor r4, r4, r7
-; CHECK-PWR9-BE-NEXT: xor r5, r5, r8
-; CHECK-PWR9-BE-NEXT: sub r3, r3, r6
-; CHECK-PWR9-BE-NEXT: li r6, 3
-; CHECK-PWR9-BE-NEXT: sub r4, r4, r7
-; CHECK-PWR9-BE-NEXT: sub r5, r5, r8
-; CHECK-PWR9-BE-NEXT: vextublx r7, r6, v2
-; CHECK-PWR9-BE-NEXT: vextublx r6, r6, v3
-; CHECK-PWR9-BE-NEXT: clrlwi r7, r7, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r6, r6, 24
-; CHECK-PWR9-BE-NEXT: sub r6, r7, r6
-; CHECK-PWR9-BE-NEXT: srawi r7, r6, 31
-; CHECK-PWR9-BE-NEXT: xor r6, r6, r7
-; CHECK-PWR9-BE-NEXT: sub r6, r6, r7
-; CHECK-PWR9-BE-NEXT: li r7, 4
-; CHECK-PWR9-BE-NEXT: vextublx r8, r7, v2
-; CHECK-PWR9-BE-NEXT: vextublx r7, r7, v3
-; CHECK-PWR9-BE-NEXT: clrlwi r8, r8, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r7, r7, 24
-; CHECK-PWR9-BE-NEXT: sub r7, r8, r7
-; CHECK-PWR9-BE-NEXT: srawi r8, r7, 31
-; CHECK-PWR9-BE-NEXT: xor r7, r7, r8
-; CHECK-PWR9-BE-NEXT: sub r7, r7, r8
-; CHECK-PWR9-BE-NEXT: li r8, 5
-; CHECK-PWR9-BE-NEXT: vextublx r9, r8, v2
-; CHECK-PWR9-BE-NEXT: vextublx r8, r8, v3
-; CHECK-PWR9-BE-NEXT: clrlwi r9, r9, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r8, r8, 24
-; CHECK-PWR9-BE-NEXT: sub r8, r9, r8
-; CHECK-PWR9-BE-NEXT: srawi r9, r8, 31
-; CHECK-PWR9-BE-NEXT: xor r8, r8, r9
-; CHECK-PWR9-BE-NEXT: sub r8, r8, r9
-; CHECK-PWR9-BE-NEXT: li r9, 6
-; CHECK-PWR9-BE-NEXT: vextublx r10, r9, v2
-; CHECK-PWR9-BE-NEXT: vextublx r9, r9, v3
-; CHECK-PWR9-BE-NEXT: clrlwi r10, r10, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r9, r9, 24
-; CHECK-PWR9-BE-NEXT: sub r9, r10, r9
-; CHECK-PWR9-BE-NEXT: srawi r10, r9, 31
-; CHECK-PWR9-BE-NEXT: xor r9, r9, r10
-; CHECK-PWR9-BE-NEXT: sub r9, r9, r10
-; CHECK-PWR9-BE-NEXT: li r10, 7
-; CHECK-PWR9-BE-NEXT: vextublx r11, r10, v2
-; CHECK-PWR9-BE-NEXT: vextublx r10, r10, v3
-; CHECK-PWR9-BE-NEXT: mtfprwz f2, r9
-; CHECK-PWR9-BE-NEXT: clrlwi r11, r11, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r10, r10, 24
-; CHECK-PWR9-BE-NEXT: sub r10, r11, r10
-; CHECK-PWR9-BE-NEXT: srawi r11, r10, 31
-; CHECK-PWR9-BE-NEXT: xor r10, r10, r11
-; CHECK-PWR9-BE-NEXT: sub r10, r10, r11
-; CHECK-PWR9-BE-NEXT: li r11, 8
-; CHECK-PWR9-BE-NEXT: vextublx r12, r11, v2
-; CHECK-PWR9-BE-NEXT: vextublx r11, r11, v3
-; CHECK-PWR9-BE-NEXT: clrlwi r12, r12, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r11, r11, 24
-; CHECK-PWR9-BE-NEXT: sub r11, r12, r11
-; CHECK-PWR9-BE-NEXT: srawi r12, r11, 31
-; CHECK-PWR9-BE-NEXT: xor r11, r11, r12
-; CHECK-PWR9-BE-NEXT: sub r11, r11, r12
-; CHECK-PWR9-BE-NEXT: li r12, 9
-; CHECK-PWR9-BE-NEXT: vextublx r0, r12, v2
-; CHECK-PWR9-BE-NEXT: vextublx r12, r12, v3
-; CHECK-PWR9-BE-NEXT: clrlwi r0, r0, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r12, r12, 24
-; CHECK-PWR9-BE-NEXT: sub r12, r0, r12
-; CHECK-PWR9-BE-NEXT: srawi r0, r12, 31
-; CHECK-PWR9-BE-NEXT: xor r12, r12, r0
-; CHECK-PWR9-BE-NEXT: sub r12, r12, r0
-; CHECK-PWR9-BE-NEXT: li r0, 10
-; CHECK-PWR9-BE-NEXT: vextublx r30, r0, v2
-; CHECK-PWR9-BE-NEXT: vextublx r0, r0, v3
-; CHECK-PWR9-BE-NEXT: mtvsrwz v4, r12
-; CHECK-PWR9-BE-NEXT: clrlwi r30, r30, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r0, r0, 24
-; CHECK-PWR9-BE-NEXT: sub r0, r30, r0
-; CHECK-PWR9-BE-NEXT: srawi r30, r0, 31
-; CHECK-PWR9-BE-NEXT: xor r0, r0, r30
-; CHECK-PWR9-BE-NEXT: sub r0, r0, r30
-; CHECK-PWR9-BE-NEXT: li r30, 11
-; CHECK-PWR9-BE-NEXT: vextublx r29, r30, v2
-; CHECK-PWR9-BE-NEXT: vextublx r30, r30, v3
-; CHECK-PWR9-BE-NEXT: clrlwi r29, r29, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r30, r30, 24
-; CHECK-PWR9-BE-NEXT: sub r30, r29, r30
-; CHECK-PWR9-BE-NEXT: srawi r29, r30, 31
-; CHECK-PWR9-BE-NEXT: xor r30, r30, r29
-; CHECK-PWR9-BE-NEXT: sub r30, r30, r29
-; CHECK-PWR9-BE-NEXT: li r29, 12
-; CHECK-PWR9-BE-NEXT: vextublx r28, r29, v2
-; CHECK-PWR9-BE-NEXT: vextublx r29, r29, v3
-; CHECK-PWR9-BE-NEXT: clrlwi r28, r28, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r29, r29, 24
-; CHECK-PWR9-BE-NEXT: sub r29, r28, r29
-; CHECK-PWR9-BE-NEXT: srawi r28, r29, 31
-; CHECK-PWR9-BE-NEXT: xor r29, r29, r28
-; CHECK-PWR9-BE-NEXT: sub r29, r29, r28
-; CHECK-PWR9-BE-NEXT: li r28, 13
-; CHECK-PWR9-BE-NEXT: vextublx r27, r28, v2
-; CHECK-PWR9-BE-NEXT: vextublx r28, r28, v3
-; CHECK-PWR9-BE-NEXT: clrlwi r27, r27, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r28, r28, 24
-; CHECK-PWR9-BE-NEXT: sub r28, r27, r28
-; CHECK-PWR9-BE-NEXT: srawi r27, r28, 31
-; CHECK-PWR9-BE-NEXT: xor r28, r28, r27
-; CHECK-PWR9-BE-NEXT: sub r28, r28, r27
-; CHECK-PWR9-BE-NEXT: li r27, 14
-; CHECK-PWR9-BE-NEXT: vextublx r26, r27, v2
-; CHECK-PWR9-BE-NEXT: vextublx r27, r27, v3
-; CHECK-PWR9-BE-NEXT: clrlwi r26, r26, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r27, r27, 24
-; CHECK-PWR9-BE-NEXT: sub r27, r26, r27
-; CHECK-PWR9-BE-NEXT: srawi r26, r27, 31
-; CHECK-PWR9-BE-NEXT: xor r27, r27, r26
-; CHECK-PWR9-BE-NEXT: sub r27, r27, r26
-; CHECK-PWR9-BE-NEXT: li r26, 15
-; CHECK-PWR9-BE-NEXT: vextublx r25, r26, v2
-; CHECK-PWR9-BE-NEXT: vextublx r26, r26, v3
-; CHECK-PWR9-BE-NEXT: mtfprwz f0, r27
-; CHECK-PWR9-BE-NEXT: addis r27, r2, .LCPI9_0@toc@ha
-; CHECK-PWR9-BE-NEXT: mtvsrwz v3, r28
-; CHECK-PWR9-BE-NEXT: ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-PWR9-BE-NEXT: addi r27, r27, .LCPI9_0@toc@l
-; CHECK-PWR9-BE-NEXT: clrlwi r25, r25, 24
-; CHECK-PWR9-BE-NEXT: clrlwi r26, r26, 24
-; CHECK-PWR9-BE-NEXT: lxv vs1, 0(r27)
-; CHECK-PWR9-BE-NEXT: ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-PWR9-BE-NEXT: sub r26, r25, r26
-; CHECK-PWR9-BE-NEXT: srawi r25, r26, 31
-; CHECK-PWR9-BE-NEXT: xor r26, r26, r25
-; CHECK-PWR9-BE-NEXT: sub r26, r26, r25
-; CHECK-PWR9-BE-NEXT: ld r25, -56(r1) # 8-byte Folded Reload
-; CHECK-PWR9-BE-NEXT: mtvsrwz v2, r26
-; CHECK-PWR9-BE-NEXT: ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-PWR9-BE-NEXT: xxperm v2, vs0, vs1
-; CHECK-PWR9-BE-NEXT: mtfprwz f0, r29
-; CHECK-PWR9-BE-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-PWR9-BE-NEXT: xxperm v3, vs0, vs1
-; CHECK-PWR9-BE-NEXT: mtfprwz f0, r0
-; CHECK-PWR9-BE-NEXT: vmrghh v2, v3, v2
-; CHECK-PWR9-BE-NEXT: mtvsrwz v3, r30
-; CHECK-PWR9-BE-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-PWR9-BE-NEXT: xxperm v3, vs0, vs1
-; CHECK-PWR9-BE-NEXT: mtfprwz f0, r11
-; CHECK-PWR9-BE-NEXT: xxperm v4, vs0, vs1
-; CHECK-PWR9-BE-NEXT: vmrghh v3, v4, v3
-; CHECK-PWR9-BE-NEXT: mtvsrwz v4, r4
-; CHECK-PWR9-BE-NEXT: xxmrghw vs0, v3, v2
-; CHECK-PWR9-BE-NEXT: mtvsrwz v2, r10
-; CHECK-PWR9-BE-NEXT: mtvsrwz v3, r8
-; CHECK-PWR9-BE-NEXT: xxperm v2, vs2, vs1
-; CHECK-PWR9-BE-NEXT: mtfprwz f2, r7
-; CHECK-PWR9-BE-NEXT: xxperm v3, vs2, vs1
-; CHECK-PWR9-BE-NEXT: mtfprwz f2, r5
-; CHECK-PWR9-BE-NEXT: vmrghh v2, v3, v2
-; CHECK-PWR9-BE-NEXT: mtvsrwz v3, r6
-; CHECK-PWR9-BE-NEXT: xxperm v3, vs2, vs1
-; CHECK-PWR9-BE-NEXT: mtfprwz f2, r3
-; CHECK-PWR9-BE-NEXT: xxperm v4, vs2, vs1
-; CHECK-PWR9-BE-NEXT: vmrghh v3, v4, v3
-; CHECK-PWR9-BE-NEXT: xxmrghw vs1, v3, v2
-; CHECK-PWR9-BE-NEXT: xxmrghd v2, vs1, vs0
-; CHECK-PWR9-BE-NEXT: blr
-;
-; CHECK-PWR8-LABEL: sub_absv_8_ext:
-; CHECK-PWR8: # %bb.0: # %entry
-; CHECK-PWR8-NEXT: xxswapd vs0, v2
-; CHECK-PWR8-NEXT: xxswapd vs1, v3
-; CHECK-PWR8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT: std r28, -32(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT: std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT: std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT: mffprd r11, f0
-; CHECK-PWR8-NEXT: mffprd r8, f1
-; CHECK-PWR8-NEXT: std r27, -40(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT: std r25, -56(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT: clrldi r3, r11, 56
-; CHECK-PWR8-NEXT: clrldi r4, r8, 56
-; CHECK-PWR8-NEXT: rldicl r5, r11, 56, 56
-; CHECK-PWR8-NEXT: rldicl r6, r8, 56, 56
-; CHECK-PWR8-NEXT: rldicl r7, r11, 48, 56
-; CHECK-PWR8-NEXT: rldicl r9, r8, 48, 56
-; CHECK-PWR8-NEXT: rldicl r0, r11, 32, 56
-; CHECK-PWR8-NEXT: rldicl r30, r8, 32, 56
-; CHECK-PWR8-NEXT: rldicl r29, r11, 24, 56
-; CHECK-PWR8-NEXT: rldicl r28, r8, 24, 56
-; CHECK-PWR8-NEXT: rldicl r10, r11, 40, 56
-; CHECK-PWR8-NEXT: rldicl r12, r8, 40, 56
-; CHECK-PWR8-NEXT: rldicl r27, r11, 16, 56
-; CHECK-PWR8-NEXT: rldicl r11, r11, 8, 56
-; CHECK-PWR8-NEXT: std r24, -64(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT: clrlwi r3, r3, 24
-; CHECK-PWR8-NEXT: clrlwi r4, r4, 24
-; CHECK-PWR8-NEXT: clrlwi r5, r5, 24
-; CHECK-PWR8-NEXT: clrlwi r6, r6, 24
-; CHECK-PWR8-NEXT: clrlwi r7, r7, 24
-; CHECK-PWR8-NEXT: clrlwi r9, r9, 24
-; CHECK-PWR8-NEXT: sub r3, r3, r4
-; CHECK-PWR8-NEXT: clrlwi r0, r0, 24
-; CHECK-PWR8-NEXT: clrlwi r30, r30, 24
-; CHECK-PWR8-NEXT: sub r4, r5, r6
-; CHECK-PWR8-NEXT: sub r5, r7, r9
-; CHECK-PWR8-NEXT: clrlwi r29, r29, 24
-; CHECK-PWR8-NEXT: clrlwi r28, r28, 24
-; CHECK-PWR8-NEXT: sub r7, r0, r30
-; CHECK-PWR8-NEXT: sub r9, r29, r28
-; CHECK-PWR8-NEXT: clrlwi r10, r10, 24
-; CHECK-PWR8-NEXT: clrlwi r12, r12, 24
-; CHECK-PWR8-NEXT: sub r6, r10, r12
-; CHECK-PWR8-NEXT: clrlwi r27, r27, 24
-; CHECK-PWR8-NEXT: clrlwi r11, r11, 24
-; CHECK-PWR8-NEXT: srawi r0, r5, 31
-; CHECK-PWR8-NEXT: srawi r29, r7, 31
-; CHECK-PWR8-NEXT: srawi r12, r4, 31
-; CHECK-PWR8-NEXT: srawi r28, r9, 31
-; CHECK-PWR8-NEXT: srawi r30, r6, 31
-; CHECK-PWR8-NEXT: srawi r10, r3, 31
-; CHECK-PWR8-NEXT: xor r5, r5, r0
-; CHECK-PWR8-NEXT: xor r26, r7, r29
-; CHECK-PWR8-NEXT: sub r7, r5, r0
-; CHECK-PWR8-NEXT: rldicl r5, r8, 16, 56
-; CHECK-PWR8-NEXT: rldicl r8, r8, 8, 56
-; CHECK-PWR8-NEXT: xor r4, r4, r12
-; CHECK-PWR8-NEXT: xor r25, r9, r28
-; CHECK-PWR8-NEXT: sub r9, r4, r12
-; CHECK-PWR8-NEXT: sub r4, r26, r29
-; CHECK-PWR8-NEXT: mtvsrd v1, r9
-; CHECK-PWR8-NEXT: clrlwi r5, r5, 24
-; CHECK-PWR8-NEXT: sub r5, r27, r5
-; CHECK-PWR8-NEXT: clrlwi r8, r8, 24
-; CHECK-PWR8-NEXT: sub r8, r11, r8
-; CHECK-PWR8-NEXT: xor r6, r6, r30
-; CHECK-PWR8-NEXT: sub r6, r6, r30
-; CHECK-PWR8-NEXT: xor r3, r3, r10
-; CHECK-PWR8-NEXT: sub r10, r3, r10
-; CHECK-PWR8-NEXT: sub r3, r25, r28
-; CHECK-PWR8-NEXT: mtvsrd v6, r6
-; CHECK-PWR8-NEXT: mtvsrd v7, r3
-; CHECK-PWR8-NEXT: srawi r12, r5, 31
-; CHECK-PWR8-NEXT: srawi r11, r8, 31
-; CHECK-PWR8-NEXT: xor r5, r5, r12
-; CHECK-PWR8-NEXT: xor r8, r8, r11
-; CHECK-PWR8-NEXT: sub r5, r5, r12
-; CHECK-PWR8-NEXT: sub r8, r8, r11
-; CHECK-PWR8-NEXT: mfvsrd r11, v2
-; CHECK-PWR8-NEXT: mfvsrd r12, v3
-; CHECK-PWR8-NEXT: mtvsrd v8, r8
-; CHECK-PWR8-NEXT: clrldi r0, r11, 56
-; CHECK-PWR8-NEXT: clrldi r30, r12, 56
-; CHECK-PWR8-NEXT: rldicl r29, r12, 56, 56
-; CHECK-PWR8-NEXT: rldicl r28, r12, 48, 56
-; CHECK-PWR8-NEXT: rldicl r27, r12, 40, 56
-; CHECK-PWR8-NEXT: rldicl r26, r12, 32, 56
-; CHECK-PWR8-NEXT: rldicl r25, r12, 24, 56
-; CHECK-PWR8-NEXT: rldicl r24, r12, 16, 56
-; CHECK-PWR8-NEXT: rldicl r12, r12, 8, 56
-; CHECK-PWR8-NEXT: clrlwi r0, r0, 24
-; CHECK-PWR8-NEXT: clrlwi r30, r30, 24
-; CHECK-PWR8-NEXT: clrlwi r29, r29, 24
-; CHECK-PWR8-NEXT: clrlwi r28, r28, 24
-; CHECK-PWR8-NEXT: clrlwi r27, r27, 24
-; CHECK-PWR8-NEXT: clrlwi r26, r26, 24
-; CHECK-PWR8-NEXT: clrlwi r25, r25, 24
-; CHECK-PWR8-NEXT: clrlwi r24, r24, 24
-; CHECK-PWR8-NEXT: clrlwi r12, r12, 24
-; CHECK-PWR8-NEXT: sub r0, r0, r30
-; CHECK-PWR8-NEXT: srawi r30, r0, 31
-; CHECK-PWR8-NEXT: xor r0, r0, r30
-; CHECK-PWR8-NEXT: sub r0, r0, r30
-; CHECK-PWR8-NEXT: rldicl r30, r11, 56, 56
-; CHECK-PWR8-NEXT: clrlwi r30, r30, 24
-; CHECK-PWR8-NEXT: mtvsrd v2, r0
-; CHECK-PWR8-NEXT: sub r30, r30, r29
-; CHECK-PWR8-NEXT: srawi r29, r30, 31
-; CHECK-PWR8-NEXT: xor r30, r30, r29
-; CHECK-PWR8-NEXT: sub r30, r30, r29
-; CHECK-PWR8-NEXT: rldicl r29, r11, 48, 56
-; CHECK-PWR8-NEXT: clrlwi r29, r29, 24
-; CHECK-PWR8-NEXT: mtvsrd v3, r30
-; CHECK-PWR8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-PWR8-NEXT: sub r29, r29, r28
-; CHECK-PWR8-NEXT: srawi r28, r29, 31
-; CHECK-PWR8-NEXT: xor r29, r29, r28
-; CHECK-PWR8-NEXT: sub r29, r29, r28
-; CHECK-PWR8-NEXT: rldicl r28, r11, 40, 56
-; CHECK-PWR8-NEXT: clrlwi r28, r28, 24
-; CHECK-PWR8-NEXT: sub r28, r28, r27
-; CHECK-PWR8-NEXT: srawi r27, r28, 31
-; CHECK-PWR8-NEXT: xor r28, r28, r27
-; CHECK-PWR8-NEXT: sub r28, r28, r27
-; CHECK-PWR8-NEXT: rldicl r27, r11, 32, 56
-; CHECK-PWR8-NEXT: clrlwi r27, r27, 24
-; CHECK-PWR8-NEXT: mtvsrd v4, r28
-; CHECK-PWR8-NEXT: ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-PWR8-NEXT: sub r27, r27, r26
-; CHECK-PWR8-NEXT: srawi r26, r27, 31
-; CHECK-PWR8-NEXT: xor r27, r27, r26
-; CHECK-PWR8-NEXT: sub r27, r27, r26
-; CHECK-PWR8-NEXT: rldicl r26, r11, 24, 56
-; CHECK-PWR8-NEXT: clrlwi r26, r26, 24
-; CHECK-PWR8-NEXT: sub r26, r26, r25
-; CHECK-PWR8-NEXT: srawi r25, r26, 31
-; CHECK-PWR8-NEXT: xor r26, r26, r25
-; CHECK-PWR8-NEXT: sub r26, r26, r25
-; CHECK-PWR8-NEXT: rldicl r25, r11, 16, 56
-; CHECK-PWR8-NEXT: rldicl r11, r11, 8, 56
-; CHECK-PWR8-NEXT: clrlwi r25, r25, 24
-; CHECK-PWR8-NEXT: clrlwi r11, r11, 24
-; CHECK-PWR8-NEXT: mtvsrd v5, r26
-; CHECK-PWR8-NEXT: ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-PWR8-NEXT: sub r25, r25, r24
-; CHECK-PWR8-NEXT: sub r11, r11, r12
-; CHECK-PWR8-NEXT: srawi r24, r25, 31
-; CHECK-PWR8-NEXT: srawi r12, r11, 31
-; CHECK-PWR8-NEXT: xor r25, r25, r24
-; CHECK-PWR8-NEXT: xor r11, r11, r12
-; CHECK-PWR8-NEXT: sub r25, r25, r24
-; CHECK-PWR8-NEXT: sub r11, r11, r12
-; CHECK-PWR8-NEXT: ld r24, -64(r1) # 8-byte Folded Reload
-; CHECK-PWR8-NEXT: mtvsrd v0, r11
-; CHECK-PWR8-NEXT: vmrghb v2, v3, v2
-; CHECK-PWR8-NEXT: mtvsrd v3, r29
-; CHECK-PWR8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
-; CHECK-PWR8-NEXT: vmrghb v3, v4, v3
-; CHECK-PWR8-NEXT: mtvsrd v4, r27
-; CHECK-PWR8-NEXT: ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-PWR8-NEXT: vmrglh v2, v3, v2
-; CHECK-PWR8-NEXT: vmrghb v4, v5, v4
-; CHECK-PWR8-NEXT: mtvsrd v5, r25
-; CHECK-PWR8-NEXT: ld r25, -56(r1) # 8-byte Folded Reload
-; CHECK-PWR8-NEXT: vmrghb v5, v0, v5
-; CHECK-PWR8-NEXT: mtvsrd v0, r10
-; CHECK-PWR8-NEXT: vmrglh v3, v5, v4
-; CHECK-PWR8-NEXT: xxmrglw vs0, v3, v2
-; CHECK-PWR8-NEXT: vmrghb v0, v1, v0
-; CHECK-PWR8-NEXT: mtvsrd v1, r7
-; CHECK-PWR8-NEXT: vmrghb v1, v6, v1
-; CHECK-PWR8-NEXT: mtvsrd v6, r4
-; CHECK-PWR8-NEXT: vmrglh v4, v1, v0
-; CHECK-PWR8-NEXT: vmrghb v6, v7, v6
-; CHECK-PWR8-NEXT: mtvsrd v7, r5
-; CHECK-PWR8-NEXT: vmrghb v7, v8, v7
-; CHECK-PWR8-NEXT: vmrglh v5, v7, v6
-; CHECK-PWR8-NEXT: xxmrglw vs1, v5, v4
-; CHECK-PWR8-NEXT: xxmrgld v2, vs0, vs1
-; CHECK-PWR8-NEXT: blr
+; CHECK-PWR9-LABEL: sub_absv_8_ext:
+; CHECK-PWR9: # %bb.0: # %entry
+; CHECK-PWR9-NEXT: vabsdub v2, v2, v3
+; CHECK-PWR9-NEXT: blr
;
-; CHECK-PWR7-LABEL: sub_absv_8_ext:
-; CHECK-PWR7: # %bb.0: # %entry
-; CHECK-PWR7-NEXT: stdu r1, -512(r1)
-; CHECK-PWR7-NEXT: .cfi_def_cfa_offset 512
-; CHECK-PWR7-NEXT: .cfi_offset r14, -144
-; CHECK-PWR7-NEXT: .cfi_offset r15, -136
-; CHECK-PWR7-NEXT: .cfi_offset r16, -128
-; CHECK-PWR7-NEXT: .cfi_offset r17, -120
-; CHECK-PWR7-NEXT: .cfi_offset r18, -112
-; CHECK-PWR7-NEXT: .cfi_offset r19, -104
-; CHECK-PWR7-NEXT: .cfi_offset r20, -96
-; CHECK-PWR7-NEXT: .cfi_offset r21, -88
-; CHECK-PWR7-NEXT: .cfi_offset r22, -80
-; CHECK-PWR7-NEXT: .cfi_offset r23, -72
-; CHECK-PWR7-NEXT: .cfi_offset r24, -64
-; CHECK-PWR7-NEXT: .cfi_offset r25, -56
-; CHECK-PWR7-NEXT: .cfi_offset r26, -48
-; CHECK-PWR7-NEXT: .cfi_offset r27, -40
-; CHECK-PWR7-NEXT: .cfi_offset r28, -32
-; CHECK-PWR7-NEXT: .cfi_offset r29, -24
-; CHECK-PWR7-NEXT: .cfi_offset r30, -16
-; CHECK-PWR7-NEXT: .cfi_offset r31, -8
-; CHECK-PWR7-NEXT: .cfi_offset r2, -152
-; CHECK-PWR7-NEXT: addi r3, r1, 320
-; CHECK-PWR7-NEXT: std r14, 368(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r15, 376(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r16, 384(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r17, 392(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r18, 400(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r19, 408(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r20, 416(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r21, 424(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r22, 432(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r23, 440(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r24, 448(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r25, 456(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r26, 464(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r27, 472(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r28, 480(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r29, 488(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r30, 496(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r31, 504(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: std r2, 360(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT: stxvw4x v2, 0, r3
-; CHECK-PWR7-NEXT: lbz r3, 320(r1)
-; CHECK-PWR7-NEXT: addi r4, r1, 336
-; CHECK-PWR7-NEXT: stw r3, 60(r1) # 4-byte Folded Spill
-; CHECK-PWR7-NEXT: stxvw4x v3, 0, r4
-; CHECK-PWR7-NEXT: lbz r15, 334(r1)
-; CHECK-PWR7-NEXT: lbz r14, 350(r1)
-; CHECK-PWR7-NEXT: lbz r31, 335(r1)
-; CHECK-PWR7-NEXT: lbz r2, 351(r1)
-; CHECK-PWR7-NEXT: sub r15, r15, r14
-; CHECK-PWR7-NEXT: sub r14, r31, r2
-; CHECK-PWR7-NEXT: srawi r2, r14, 31
-; CHECK-PWR7-NEXT: xor r14, r14, r2
-; CHECK-PWR7-NEXT: lbz r3, 333(r1)
-; CHECK-PWR7-NEXT: lbz r19, 331(r1)
-; CHECK-PWR7-NEXT: lbz r18, 347(r1)
-; CHECK-PWR7-NEXT: sub r19, r19, r18
-; CHECK-PWR7-NEXT: lbz r17, 332(r1)
-; CHECK-PWR7-NEXT: lbz r16, 348(r1)
-; CHECK-PWR7-NEXT: sub r17, r17, r16
-; CHECK-PWR7-NEXT: lbz r23, 329(r1)
-; CHECK-PWR7-NEXT: sub r14, r14, r2
-; CHECK-PWR7-NEXT: lbz r2, 349(r1)
-; CHECK-PWR7-NEXT: lbz r22, 345(r1)
-; CHECK-PWR7-NEXT: lbz r4, 336(r1)
-; CHECK-PWR7-NEXT: lbz r5, 321(r1)
-; CHECK-PWR7-NEXT: lbz r6, 337(r1)
-; CHECK-PWR7-NEXT: lbz r7, 322(r1)
-; CHECK-PWR7-NEXT: lbz r8, 338(r1)
-; CHECK-PWR7-NEXT: lbz r9, 323(r1)
-; CHECK-PWR7-NEXT: lbz r10, 339(r1)
-; CHECK-PWR7-NEXT: lbz r11, 324(r1)
-; CHECK-PWR7-NEXT: lbz r12, 340(r1)
-; CHECK-PWR7-NEXT: lbz r0, 325(r1)
-; CHECK-PWR7-NEXT: lbz r30, 341(r1)
-; CHECK-PWR7-NEXT: lbz r29, 326(r1)
-; CHECK-PWR7-NEXT: lbz r28, 342(r1)
-; CHECK-PWR7-NEXT: lbz r27, 327(r1)
-; CHECK-PWR7-NEXT: lbz r26, 343(r1)
-; CHECK-PWR7-NEXT: sub r3, r3, r2
-; CHECK-PWR7-NEXT: lbz r25, 328(r1)
-; CHECK-PWR7-NEXT: lbz r24, 344(r1)
-; CHECK-PWR7-NEXT: lbz r21, 330(r1)
-; CHECK-PWR7-NEXT: lbz r20, 346(r1)
-; CHECK-PWR7-NEXT: sub r5, r5, r6
-; CHECK-PWR7-NEXT: srawi r18, r3, 31
-; CHECK-PWR7-NEXT: sub r7, r7, r8
-; CHECK-PWR7-NEXT: sub r9, r9, r10
-; CHECK-PWR7-NEXT: sub r11, r11, r12
-; CHECK-PWR7-NEXT: sub r0, r0, r30
-; CHECK-PWR7-NEXT: sub r29, r29, r28
-; CHECK-PWR7-NEXT: sub r27, r27, r26
-; CHECK-PWR7-NEXT: sub r25, r25, r24
-; CHECK-PWR7-NEXT: srawi r31, r15, 31
-; CHECK-PWR7-NEXT: ld r2, 360(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: xor r3, r3, r18
-; CHECK-PWR7-NEXT: srawi r6, r5, 31
-; CHECK-PWR7-NEXT: srawi r8, r7, 31
-; CHECK-PWR7-NEXT: srawi r10, r9, 31
-; CHECK-PWR7-NEXT: srawi r12, r11, 31
-; CHECK-PWR7-NEXT: srawi r30, r0, 31
-; CHECK-PWR7-NEXT: sub r3, r3, r18
-; CHECK-PWR7-NEXT: srawi r18, r19, 31
-; CHECK-PWR7-NEXT: srawi r28, r29, 31
-; CHECK-PWR7-NEXT: ld r16, 384(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: sldi r3, r3, 56
-; CHECK-PWR7-NEXT: srawi r26, r27, 31
-; CHECK-PWR7-NEXT: srawi r24, r25, 31
-; CHECK-PWR7-NEXT: xor r19, r19, r18
-; CHECK-PWR7-NEXT: xor r15, r15, r31
-; CHECK-PWR7-NEXT: xor r5, r5, r6
-; CHECK-PWR7-NEXT: std r3, 272(r1)
-; CHECK-PWR7-NEXT: std r3, 280(r1)
-; CHECK-PWR7-NEXT: srawi r3, r17, 31
-; CHECK-PWR7-NEXT: sub r19, r19, r18
-; CHECK-PWR7-NEXT: xor r7, r7, r8
-; CHECK-PWR7-NEXT: sub r15, r15, r31
-; CHECK-PWR7-NEXT: xor r17, r17, r3
-; CHECK-PWR7-NEXT: xor r9, r9, r10
-; CHECK-PWR7-NEXT: xor r11, r11, r12
-; CHECK-PWR7-NEXT: xor r0, r0, r30
-; CHECK-PWR7-NEXT: xor r29, r29, r28
-; CHECK-PWR7-NEXT: xor r27, r27, r26
-; CHECK-PWR7-NEXT: sub r3, r17, r3
-; CHECK-PWR7-NEXT: xor r25, r25, r24
-; CHECK-PWR7-NEXT: sub r25, r25, r24
-; CHECK-PWR7-NEXT: sub r27, r27, r26
-; CHECK-PWR7-NEXT: sub r29, r29, r28
-; CHECK-PWR7-NEXT: sldi r3, r3, 56
-; CHECK-PWR7-NEXT: sub r0, r0, r30
-; CHECK-PWR7-NEXT: sub r11, r11, r12
-; CHECK-PWR7-NEXT: sub r9, r9, r10
-; CHECK-PWR7-NEXT: sub r7, r7, r8
-; CHECK-PWR7-NEXT: sub r5, r5, r6
-; CHECK-PWR7-NEXT: sldi r14, r14, 56
-; CHECK-PWR7-NEXT: sldi r15, r15, 56
-; CHECK-PWR7-NEXT: ld r31, 504(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: std r3, 256(r1)
-; CHECK-PWR7-NEXT: std r3, 264(r1)
-; CHECK-PWR7-NEXT: sldi r3, r19, 56
-; CHECK-PWR7-NEXT: sldi r25, r25, 56
-; CHECK-PWR7-NEXT: sldi r27, r27, 56
-; CHECK-PWR7-NEXT: std r3, 240(r1)
-; CHECK-PWR7-NEXT: std r3, 248(r1)
-; CHECK-PWR7-NEXT: sub r3, r23, r22
-; CHECK-PWR7-NEXT: srawi r23, r3, 31
-; CHECK-PWR7-NEXT: sub r22, r21, r20
-; CHECK-PWR7-NEXT: srawi r21, r22, 31
-; CHECK-PWR7-NEXT: sldi r29, r29, 56
-; CHECK-PWR7-NEXT: sldi r0, r0, 56
-; CHECK-PWR7-NEXT: sldi r11, r11, 56
-; CHECK-PWR7-NEXT: xor r3, r3, r23
-; CHECK-PWR7-NEXT: xor r22, r22, r21
-; CHECK-PWR7-NEXT: sldi r9, r9, 56
-; CHECK-PWR7-NEXT: sldi r7, r7, 56
-; CHECK-PWR7-NEXT: sldi r5, r5, 56
-; CHECK-PWR7-NEXT: ld r30, 496(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: ld r28, 480(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: sub r3, r3, r23
-; CHECK-PWR7-NEXT: sub r22, r22, r21
-; CHECK-PWR7-NEXT: std r14, 304(r1)
-; CHECK-PWR7-NEXT: ld r26, 464(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: sldi r3, r3, 56
-; CHECK-PWR7-NEXT: sldi r22, r22, 56
-; CHECK-PWR7-NEXT: ld r24, 448(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: ld r23, 440(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: std r14, 312(r1)
-; CHECK-PWR7-NEXT: std r15, 288(r1)
-; CHECK-PWR7-NEXT: std r3, 208(r1)
-; CHECK-PWR7-NEXT: std r3, 216(r1)
-; CHECK-PWR7-NEXT: lwz r3, 60(r1) # 4-byte Folded Reload
-; CHECK-PWR7-NEXT: std r15, 296(r1)
-; CHECK-PWR7-NEXT: ld r21, 424(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: ld r20, 416(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: std r22, 224(r1)
-; CHECK-PWR7-NEXT: std r22, 232(r1)
-; CHECK-PWR7-NEXT: sub r4, r3, r4
-; CHECK-PWR7-NEXT: std r25, 192(r1)
-; CHECK-PWR7-NEXT: ld r22, 432(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: ld r19, 408(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: srawi r3, r4, 31
-; CHECK-PWR7-NEXT: std r25, 200(r1)
-; CHECK-PWR7-NEXT: ld r25, 456(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: std r27, 176(r1)
-; CHECK-PWR7-NEXT: std r27, 184(r1)
-; CHECK-PWR7-NEXT: xor r4, r4, r3
-; CHECK-PWR7-NEXT: std r29, 160(r1)
-; CHECK-PWR7-NEXT: ld r27, 472(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: std r29, 168(r1)
-; CHECK-PWR7-NEXT: std r0, 144(r1)
-; CHECK-PWR7-NEXT: sub r3, r4, r3
-; CHECK-PWR7-NEXT: std r0, 152(r1)
-; CHECK-PWR7-NEXT: ld r29, 488(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: ld r18, 400(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: sldi r3, r3, 56
-; CHECK-PWR7-NEXT: std r11, 128(r1)
-; CHECK-PWR7-NEXT: ld r17, 392(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: std r11, 136(r1)
-; CHECK-PWR7-NEXT: std r9, 112(r1)
-; CHECK-PWR7-NEXT: std r3, 64(r1)
-; CHECK-PWR7-NEXT: std r3, 72(r1)
-; CHECK-PWR7-NEXT: addi r3, r1, 304
-; CHECK-PWR7-NEXT: std r9, 120(r1)
-; CHECK-PWR7-NEXT: ld r15, 376(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: std r7, 96(r1)
-; CHECK-PWR7-NEXT: std r7, 104(r1)
-; CHECK-PWR7-NEXT: std r5, 80(r1)
-; CHECK-PWR7-NEXT: std r5, 88(r1)
-; CHECK-PWR7-NEXT: lxvw4x v2, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 288
-; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 272
-; CHECK-PWR7-NEXT: ld r14, 368(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT: vmrghb v2, v3, v2
-; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 256
-; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 240
-; CHECK-PWR7-NEXT: vmrghb v3, v4, v3
-; CHECK-PWR7-NEXT: vmrghh v2, v3, v2
-; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 224
-; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 208
-; CHECK-PWR7-NEXT: vmrghb v3, v4, v3
-; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 192
-; CHECK-PWR7-NEXT: lxvw4x v5, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 176
-; CHECK-PWR7-NEXT: vmrghb v4, v5, v4
-; CHECK-PWR7-NEXT: vmrghh v3, v4, v3
-; CHECK-PWR7-NEXT: xxmrghw vs0, v3, v2
-; CHECK-PWR7-NEXT: lxvw4x v2, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 160
-; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 144
-; CHECK-PWR7-NEXT: vmrghb v2, v3, v2
-; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 128
-; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3
-; CHECK-PWR7-NEXT: vmrghb v3, v4, v3
-; CHECK-PWR7-NEXT: addi r3, r1, 112
-; CHECK-PWR7-NEXT: vmrghh v2, v3, v2
-; CHECK-PWR7-NEXT: lxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 96
-; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 80
-; CHECK-PWR7-NEXT: vmrghb v3, v4, v3
-; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3
-; CHECK-PWR7-NEXT: addi r3, r1, 64
-; CHECK-PWR7-NEXT: lxvw4x v5, 0, r3
-; CHECK-PWR7-NEXT: vmrghb v4, v5, v4
-; CHECK-PWR7-NEXT: vmrghh v3, v4, v3
-; CHECK-PWR7-NEXT: xxmrghw vs1, v3, v2
-; CHECK-PWR7-NEXT: xxmrghd v2, vs1, vs0
-; CHECK-PWR7-NEXT: addi r1, r1, 512
-; CHECK-PWR7-NEXT: blr
+; CHECK-PWR78-LABEL: sub_absv_8_ext:
+; CHECK-PWR78: # %bb.0: # %entry
+; CHECK-PWR78-NEXT: vminub v4, v2, v3
+; CHECK-PWR78-NEXT: vmaxub v2, v2, v3
+; CHECK-PWR78-NEXT: vsububm v2, v2, v4
+; CHECK-PWR78-NEXT: blr
entry:
- %vecext = extractelement <16 x i8> %a, i32 0
- %conv = zext i8 %vecext to i32
- %vecext1 = extractelement <16 x i8> %b, i32 0
- %conv2 = zext i8 %vecext1 to i32
- %sub = sub nsw i32 %conv, %conv2
- %ispos = icmp sgt i32 %sub, -1
- %neg = sub nsw i32 0, %sub
- %0 = select i1 %ispos, i32 %sub, i32 %neg
- %conv3 = trunc i32 %0 to i8
- %vecins = insertelement <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, i8 %conv3, i32 0
- %vecext4 = extractelement <16 x i8> %a, i32 1
- %conv5 = zext i8 %vecext4 to i32
- %vecext6 = extractelement <16 x i8> %b, i32 1
- %conv7 = zext i8 %vecext6 to i32
- %sub8 = sub nsw i32 %conv5, %conv7
- %ispos171 = icmp sgt i32 %sub8, -1
- %neg172 = sub nsw i32 0, %sub8
- %1 = select i1 %ispos171, i32 %sub8, i32 %neg172
- %conv10 = trunc i32 %1 to i8
- %vecins11 = insertelement <16 x i8> %vecins, i8 %conv10, i32 1
- %vecext12 = extractelement <16 x i8> %a, i32 2
- %conv13 = zext i8 %vecext12 to i32
- %vecext14 = extractelement <16 x i8> %b, i32 2
- %conv15 = zext i8 %vecext14 to i32
- %sub16 = sub nsw i32 %conv13, %conv15
- %ispos173 = icmp sgt i32 %sub16, -1
- %neg174 = sub nsw i32 0, %sub16
- %2 = select i1 %ispos173, i32 %sub16, i32 %neg174
- %conv18 = trunc i32 %2 to i8
- %vecins19 = insertelement <16 x i8> %vecins11, i8 %conv18, i32 2
- %vecext20 = extractelement <16 x i8> %a, i32 3
- %conv21 = zext i8 %vecext20 to i32
- %vecext22 = extractelement <16 x i8> %b, i32 3
- %conv23 = zext i8 %vecext22 to i32
- %sub24 = sub nsw i32 %conv21, %conv23
- %ispos175 = icmp sgt i32 %sub24, -1
- %neg176 = sub nsw i32 0, %sub24
- %3 = select i1 %ispos175, i32 %sub24, i32 %neg176
- %conv26 = trunc i32 %3 to i8
- %vecins27 = insertelement <16 x i8> %vecins19, i8 %conv26, i32 3
- %vecext28 = extractelement <16 x i8> %a, i32 4
- %conv29 = zext i8 %vecext28 to i32
- %vecext30 = extractelement <16 x i8> %b, i32 4
- %conv31 = zext i8 %vecext30 to i32
- %sub32 = sub nsw i32 %conv29, %conv31
- %ispos177 = icmp sgt i32 %sub32, -1
- %neg178 = sub nsw i32 0, %sub32
- %4 = select i1 %ispos177, i32 %sub32, i32 %neg178
- %conv34 = trunc i32 %4 to i8
- %vecins35 = insertelement <16 x i8> %vecins27, i8 %conv34, i32 4
- %vecext36 = extractelement <16 x i8> %a, i32 5
- %conv37 = zext i8 %vecext36 to i32
- %vecext38 = extractelement <16 x i8> %b, i32 5
- %conv39 = zext i8 %vecext38 to i32
- %sub40 = sub nsw i32 %conv37, %conv39
- %ispos179 = icmp sgt i32 %sub40, -1
- %neg180 = sub nsw i32 0, %sub40
- %5 = select i1 %ispos179, i32 %sub40, i32 %neg180
- %conv42 = trunc i32 %5 to i8
- %vecins43 = insertelement <16 x i8> %vecins35, i8 %conv42, i32 5
- %vecext44 = extractelement <16 x i8> %a, i32 6
- %conv45 = zext i8 %vecext44 to i32
- %vecext46 = extractelement <16 x i8> %b, i32 6
- %conv47 = zext i8 %vecext46 to i32
- %sub48 = sub nsw i32 %conv45, %conv47
- %ispos181 = icmp sgt i32 %sub48, -1
- %neg182 = sub nsw i32 0, %sub48
- %6 = select i1 %ispos181, i32 %sub48, i32 %neg182
- %conv50 = trunc i32 %6 to i8
- %vecins51 = insertelement <16 x i8> %vecins43, i8 %conv50, i32 6
- %vecext52 = extractelement <16 x i8> %a, i32 7
- %conv53 = zext i8 %vecext52 to i32
- %vecext54 = extractelement <16 x i8> %b, i32 7
- %conv55 = zext i8 %vecext54 to i32
- %sub56 = sub nsw i32 %conv53, %conv55
- %ispos183 = icmp sgt i32 %sub56, -1
- %neg184 = sub nsw i32 0, %sub56
- %7 = select i1 %ispos183, i32 %sub56, i32 %neg184
- %conv58 = trunc i32 %7 to i8
- %vecins59 = insertelement <16 x i8> %vecins51, i8 %conv58, i32 7
- %vecext60 = extractelement <16 x i8> %a, i32 8
- %conv61 = zext i8 %vecext60 to i32
- %vecext62 = extractelement <16 x i8> %b, i32 8
- %conv63 = zext i8 %vecext62 to i32
- %sub64 = sub nsw i32 %conv61, %conv63
- %ispos185 = icmp sgt i32 %sub64, -1
- %neg186 = sub nsw i32 0, %sub64
- %8 = select i1 %ispos185, i32 %sub64, i32 %neg186
- %conv66 = trunc i32 %8 to i8
- %vecins67 = insertelement <16 x i8> %vecins59, i8 %conv66, i32 8
- %vecext68 = extractelement <16 x i8> %a, i32 9
- %conv69 = zext i8 %vecext68 to i32
- %vecext70 = extractelement <16 x i8> %b, i32 9
- %conv71 = zext i8 %vecext70 to i32
- %sub72 = sub nsw i32 %conv69, %conv71
- %ispos187 = icmp sgt i32 %sub72, -1
- %neg188 = sub nsw i32 0, %sub72
- %9 = select i1 %ispos187, i32 %sub72, i32 %neg188
- %conv74 = trunc i32 %9 to i8
- %vecins75 = insertelement <16 x i8> %vecins67, i8 %conv74, i32 9
- %vecext76 = extractelement <16 x i8> %a, i32 10
- %conv77 = zext i8 %vecext76 to i32
- %vecext78 = extractelement <16 x i8> %b, i32 10
- %conv79 = zext i8 %vecext78 to i32
- %sub80 = sub nsw i32 %conv77, %conv79
- %ispos189 = icmp sgt i32 %sub80, -1
- %neg190 = sub nsw i32 0, %sub80
- %10 = select i1 %ispos189, i32 %sub80, i32 %neg190
- %conv82 = trunc i32 %10 to i8
- %vecins83 = insertelement <16 x i8> %vecins75, i8 %conv82, i32 10
- %vecext84 = extractelement <16 x i8> %a, i32 11
- %conv85 = zext i8 %vecext84 to i32
- %vecext86 = extractelement <16 x i8> %b, i32 11
- %conv87 = zext i8 %vecext86 to i32
- %sub88 = sub nsw i32 %conv85, %conv87
- %ispos191 = icmp sgt i32 %sub88, -1
- %neg192 = sub nsw i32 0, %sub88
- %11 = select i1 %ispos191, i32 %sub88, i32 %neg192
- %conv90 = trunc i32 %11 to i8
- %vecins91 = insertelement <16 x i8> %vecins83, i8 %conv90, i32 11
- %vecext92 = extractelement <16 x i8> %a, i32 12
- %conv93 = zext i8 %vecext92 to i32
- %vecext94 = extractelement <16 x i8> %b, i32 12
- %conv95 = zext i8 %vecext94 to i32
- %sub96 = sub nsw i32 %conv93, %conv95
- %ispos193 = icmp sgt i32 %sub96, -1
- %neg194 = sub nsw i32 0, %sub96
- %12 = select i1 %ispos193, i32 %sub96, i32 %neg194
- %conv98 = trunc i32 %12 to i8
- %vecins99 = insertelement <16 x i8> %vecins91, i8 %conv98, i32 12
- %vecext100 = extractelement <16 x i8> %a, i32 13
- %conv101 = zext i8 %vecext100 to i32
- %vecext102 = extractelement <16 x i8> %b, i32 13
- %conv103 = zext i8 %vecext102 to i32
- %sub104 = sub nsw i32 %conv101, %conv103
- %ispos195 = icmp sgt i32 %sub104, -1
- %neg196 = sub nsw i32 0, %sub104
- %13 = select i1 %ispos195, i32 %sub104, i32 %neg196
- %conv106 = trunc i32 %13 to i8
- %vecins107 = insertelement <16 x i8> %vecins99, i8 %conv106, i32 13
- %vecext108 = extractelement <16 x i8> %a, i32 14
- %conv109 = zext i8 %vecext108 to i32
- %vecext110 = extractelement <16 x i8> %b, i32 14
- %conv111 = zext i8 %vecext110 to i32
- %sub112 = sub nsw i32 %conv109, %conv111
- %ispos197 = icmp sgt i32 %sub112, -1
- %neg198 = sub nsw i32 0, %sub112
- %14 = select i1 %ispos197, i32 %sub112, i32 %neg198
- %conv114 = trunc i32 %14 to i8
- %vecins115 = insertelement <16 x i8> %vecins107, i8 %conv114, i32 14
- %vecext116 = extractelement <16 x i8> %a, i32 15
- %conv117 = zext i8 %vecext116 to i32
- %vecext118 = extractelement <16 x i8> %b, i32 15
- %conv119 = zext i8 %vecext118 to i32
- %sub120 = sub nsw i32 %conv117, %conv119
- %ispos199 = icmp sgt i32 %sub120, -1
- %neg200 = sub nsw i32 0, %sub120
- %15 = select i1 %ispos199, i32 %sub120, i32 %neg200
- %conv122 = trunc i32 %15 to i8
- %vecins123 = insertelement <16 x i8> %vecins115, i8 %conv122, i32 15
- ret <16 x i8> %vecins123
+ %0 = zext <16 x i8> %a to <16 x i32>
+ %1 = zext <16 x i8> %b to <16 x i32>
+ %2 = sub nsw <16 x i32> %0, %1
+ %3 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %2, i1 true)
+ %4 = trunc <16 x i32> %3 to <16 x i8>
+ ret <16 x i8> %4
}
define <4 x i32> @sub_absv_vec_32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr {
diff --git a/llvm/test/CodeGen/PowerPC/ppc_reduce_cr_logicals.ll b/llvm/test/CodeGen/PowerPC/ppc_reduce_cr_logicals.ll
new file mode 100644
index 0000000..585ce89
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/ppc_reduce_cr_logicals.ll
@@ -0,0 +1,88 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=powerpc64-unknown-gnu-linux < %s | FileCheck %s -check-prefix=CHECK
+; RUN: llc -mtriple=powerpc-unknown-gnu-linux < %s | FileCheck %s -check-prefix=CHECKBE
+
+define i32 @xe_migrate_copy(ptr %m, ptr %dst, ptr %tile, ptr %0, ptr %primary_gt, i1 %tobool4, i1 %tobool9, i64 %1, i32 %conv55, i1 %tobool37.not) nounwind {
+; CHECK-LABEL: xe_migrate_copy:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: mflr 0
+; CHECK-NEXT: stdu 1, -128(1)
+; CHECK-NEXT: lbz 4, 255(1)
+; CHECK-NEXT: andi. 4, 4, 1
+; CHECK-NEXT: std 0, 144(1)
+; CHECK-NEXT: crmove 20, 1
+; CHECK-NEXT: andi. 4, 9, 1
+; CHECK-NEXT: lwz 9, 244(1)
+; CHECK-NEXT: crmove 21, 1
+; CHECK-NEXT: andi. 4, 8, 1
+; CHECK-NEXT: li 4, 0
+; CHECK-NEXT: std 4, 112(1)
+; CHECK-NEXT: crandc 21, 21, 20
+; CHECK-NEXT: bc 12, 21, .LBB0_2
+; CHECK-NEXT: # %bb.1: # %while.body
+; CHECK-NEXT: crand 20, 20, 1
+; CHECK-NEXT: li 8, 0
+; CHECK-NEXT: bc 4, 20, .LBB0_3
+; CHECK-NEXT: .LBB0_2: # %while.body
+; CHECK-NEXT: li 8, 1
+; CHECK-NEXT: .LBB0_3: # %while.body
+; CHECK-NEXT: li 5, 0
+; CHECK-NEXT: li 6, 0
+; CHECK-NEXT: mr 4, 3
+; CHECK-NEXT: li 7, 0
+; CHECK-NEXT: li 10, 0
+; CHECK-NEXT: bl xe_migrate_ccs_copy
+; CHECK-NEXT: nop
+; CHECK-NEXT: addi 1, 1, 128
+; CHECK-NEXT: ld 0, 16(1)
+; CHECK-NEXT: mtlr 0
+; CHECK-NEXT: blr
+;
+; CHECKBE-LABEL: xe_migrate_copy:
+; CHECKBE: # %bb.0: # %entry
+; CHECKBE-NEXT: mflr 0
+; CHECKBE-NEXT: stwu 1, -32(1)
+; CHECKBE-NEXT: lbz 4, 55(1)
+; CHECKBE-NEXT: li 5, 0
+; CHECKBE-NEXT: stw 0, 36(1)
+; CHECKBE-NEXT: andi. 4, 4, 1
+; CHECKBE-NEXT: crmove 20, 1
+; CHECKBE-NEXT: andi. 4, 9, 1
+; CHECKBE-NEXT: crmove 21, 1
+; CHECKBE-NEXT: andi. 4, 8, 1
+; CHECKBE-NEXT: lwz 4, 48(1)
+; CHECKBE-NEXT: crandc 21, 21, 20
+; CHECKBE-NEXT: stw 5, 24(1)
+; CHECKBE-NEXT: stw 5, 20(1)
+; CHECKBE-NEXT: stw 5, 16(1)
+; CHECKBE-NEXT: stw 4, 12(1)
+; CHECKBE-NEXT: bc 12, 21, .LBB0_2
+; CHECKBE-NEXT: # %bb.1: # %while.body
+; CHECKBE-NEXT: crand 20, 20, 1
+; CHECKBE-NEXT: li 8, 0
+; CHECKBE-NEXT: bc 4, 20, .LBB0_3
+; CHECKBE-NEXT: .LBB0_2: # %while.body
+; CHECKBE-NEXT: li 8, 1
+; CHECKBE-NEXT: .LBB0_3: # %while.body
+; CHECKBE-NEXT: mr 4, 3
+; CHECKBE-NEXT: li 6, 0
+; CHECKBE-NEXT: li 7, 0
+; CHECKBE-NEXT: li 9, 0
+; CHECKBE-NEXT: li 10, 0
+; CHECKBE-NEXT: stw 8, 8(1)
+; CHECKBE-NEXT: bl xe_migrate_ccs_copy
+; CHECKBE-NEXT: lwz 0, 36(1)
+; CHECKBE-NEXT: addi 1, 1, 32
+; CHECKBE-NEXT: mtlr 0
+; CHECKBE-NEXT: blr
+
+entry:
+ br label %while.body
+
+while.body:
+ %cond53.in = select i1 %tobool37.not, i1 %tobool4, i1 %tobool9
+ %call57 = call zeroext i32 @xe_migrate_ccs_copy(ptr noundef %m, ptr noundef %m, i64 0, i1 false, i64 0, i1 %cond53.in, i32 %conv55, i64 0, i1 false)
+ ret i32 %call57
+}
+
+declare i32 @xe_migrate_ccs_copy(ptr, ptr, i64, i1, i64, i1, i32, i64, i1)
diff --git a/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir b/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir
index f3ef95b..05b3056 100644
--- a/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir
+++ b/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir
@@ -99,8 +99,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
constants: []
diff --git a/llvm/test/CodeGen/PowerPC/remove-implicit-use.mir b/llvm/test/CodeGen/PowerPC/remove-implicit-use.mir
index f5b931e..f89475e 100644
--- a/llvm/test/CodeGen/PowerPC/remove-implicit-use.mir
+++ b/llvm/test/CodeGen/PowerPC/remove-implicit-use.mir
@@ -56,8 +56,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
constants: []
diff --git a/llvm/test/CodeGen/PowerPC/remove-redundant-li-skip-imp-kill.mir b/llvm/test/CodeGen/PowerPC/remove-redundant-li-skip-imp-kill.mir
index 913877b..2040494 100644
--- a/llvm/test/CodeGen/PowerPC/remove-redundant-li-skip-imp-kill.mir
+++ b/llvm/test/CodeGen/PowerPC/remove-redundant-li-skip-imp-kill.mir
@@ -55,8 +55,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
- { id: 0, type: spill-slot, offset: -80, size: 8, alignment: 16, stack-id: default,
callee-saved-register: '$x30', callee-saved-restored: true, debug-info-variable: '',
diff --git a/llvm/test/CodeGen/PowerPC/remove-self-copies.mir b/llvm/test/CodeGen/PowerPC/remove-self-copies.mir
index b5713a9..6c08390 100644
--- a/llvm/test/CodeGen/PowerPC/remove-self-copies.mir
+++ b/llvm/test/CodeGen/PowerPC/remove-self-copies.mir
@@ -65,8 +65,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
diff --git a/llvm/test/CodeGen/PowerPC/rlwinm_rldicl_to_andi.mir b/llvm/test/CodeGen/PowerPC/rlwinm_rldicl_to_andi.mir
index a1d8539d..584f2a9 100644
--- a/llvm/test/CodeGen/PowerPC/rlwinm_rldicl_to_andi.mir
+++ b/llvm/test/CodeGen/PowerPC/rlwinm_rldicl_to_andi.mir
@@ -107,8 +107,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -167,8 +167,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -227,8 +227,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -284,8 +284,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -338,8 +338,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
@@ -392,8 +392,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
diff --git a/llvm/test/CodeGen/PowerPC/schedule-addi-load.mir b/llvm/test/CodeGen/PowerPC/schedule-addi-load.mir
index 1717238..4414dfa 100644
--- a/llvm/test/CodeGen/PowerPC/schedule-addi-load.mir
+++ b/llvm/test/CodeGen/PowerPC/schedule-addi-load.mir
@@ -68,8 +68,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
constants: []
diff --git a/llvm/test/CodeGen/PowerPC/setcr_bc.mir b/llvm/test/CodeGen/PowerPC/setcr_bc.mir
index bc8bb55..76f9d5e 100644
--- a/llvm/test/CodeGen/PowerPC/setcr_bc.mir
+++ b/llvm/test/CodeGen/PowerPC/setcr_bc.mir
@@ -64,8 +64,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
- { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, stack-id: default,
callee-saved-register: '$x30', callee-saved-restored: true, debug-info-variable: '',
diff --git a/llvm/test/CodeGen/PowerPC/setcr_bc2.mir b/llvm/test/CodeGen/PowerPC/setcr_bc2.mir
index 5986c88..433ea63 100644
--- a/llvm/test/CodeGen/PowerPC/setcr_bc2.mir
+++ b/llvm/test/CodeGen/PowerPC/setcr_bc2.mir
@@ -64,8 +64,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
- { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, stack-id: default,
callee-saved-register: '$x30', callee-saved-restored: true, debug-info-variable: '',
diff --git a/llvm/test/CodeGen/PowerPC/setcr_bc3.mir b/llvm/test/CodeGen/PowerPC/setcr_bc3.mir
index 2d037d0..942ac69 100644
--- a/llvm/test/CodeGen/PowerPC/setcr_bc3.mir
+++ b/llvm/test/CodeGen/PowerPC/setcr_bc3.mir
@@ -37,8 +37,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
- { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, stack-id: default,
callee-saved-register: '$x30', callee-saved-restored: true, debug-info-variable: '',
diff --git a/llvm/test/CodeGen/PowerPC/stack-protector-target.ll b/llvm/test/CodeGen/PowerPC/stack-protector-target.ll
index 03ffa0b..9b93040 100644
--- a/llvm/test/CodeGen/PowerPC/stack-protector-target.ll
+++ b/llvm/test/CodeGen/PowerPC/stack-protector-target.ll
@@ -65,8 +65,8 @@ define void @func() sspreq nounwind {
; LINUX32: # %bb.0:
; LINUX32-NEXT: mflr 0
; LINUX32-NEXT: stwu 1, -16(1)
-; LINUX32-NEXT: stw 0, 20(1)
; LINUX32-NEXT: lwz 3, -28680(2)
+; LINUX32-NEXT: stw 0, 20(1)
; LINUX32-NEXT: stw 3, 12(1)
; LINUX32-NEXT: addi 3, 1, 8
; LINUX32-NEXT: bl capture
@@ -86,8 +86,8 @@ define void @func() sspreq nounwind {
; LINUX64: # %bb.0:
; LINUX64-NEXT: mflr 0
; LINUX64-NEXT: stdu 1, -128(1)
-; LINUX64-NEXT: std 0, 144(1)
; LINUX64-NEXT: ld 3, -28688(13)
+; LINUX64-NEXT: std 0, 144(1)
; LINUX64-NEXT: std 3, 120(1)
; LINUX64-NEXT: addi 3, 1, 116
; LINUX64-NEXT: bl capture
diff --git a/llvm/test/CodeGen/PowerPC/swaps-le-1.ll b/llvm/test/CodeGen/PowerPC/swaps-le-1.ll
index f3e3410..5d5445f 100644
--- a/llvm/test/CodeGen/PowerPC/swaps-le-1.ll
+++ b/llvm/test/CodeGen/PowerPC/swaps-le-1.ll
@@ -187,34 +187,34 @@ define void @foo() {
; CHECK-P9-NEXT: .p2align 4
; CHECK-P9-NEXT: .LBB0_1: # %vector.body
; CHECK-P9-NEXT: #
-; CHECK-P9-NEXT: lxv 2, -32(6)
-; CHECK-P9-NEXT: lxv 3, -32(5)
-; CHECK-P9-NEXT: lxv 4, -16(5)
-; CHECK-P9-NEXT: vadduwm 2, 3, 2
+; CHECK-P9-NEXT: lxv 2, -32(3)
; CHECK-P9-NEXT: lxv 3, -32(4)
+; CHECK-P9-NEXT: lxv 4, -16(4)
+; CHECK-P9-NEXT: vadduwm 2, 3, 2
+; CHECK-P9-NEXT: lxv 3, -32(5)
; CHECK-P9-NEXT: vmuluwm 2, 2, 3
-; CHECK-P9-NEXT: lxv 3, -16(6)
-; CHECK-P9-NEXT: vadduwm 3, 4, 3
-; CHECK-P9-NEXT: lxv 4, 0(5)
-; CHECK-P9-NEXT: stxv 2, -32(3)
-; CHECK-P9-NEXT: lxv 2, -16(4)
-; CHECK-P9-NEXT: vmuluwm 2, 3, 2
-; CHECK-P9-NEXT: lxv 3, 0(6)
+; CHECK-P9-NEXT: lxv 3, -16(3)
; CHECK-P9-NEXT: vadduwm 3, 4, 3
-; CHECK-P9-NEXT: lxv 4, 16(5)
-; CHECK-P9-NEXT: addi 5, 5, 64
-; CHECK-P9-NEXT: stxv 2, -16(3)
-; CHECK-P9-NEXT: lxv 2, 0(4)
+; CHECK-P9-NEXT: lxv 4, 0(4)
+; CHECK-P9-NEXT: stxv 2, -32(6)
+; CHECK-P9-NEXT: lxv 2, -16(5)
; CHECK-P9-NEXT: vmuluwm 2, 3, 2
-; CHECK-P9-NEXT: lxv 3, 16(6)
-; CHECK-P9-NEXT: addi 6, 6, 64
+; CHECK-P9-NEXT: lxv 3, 0(3)
; CHECK-P9-NEXT: vadduwm 3, 4, 3
-; CHECK-P9-NEXT: stxv 2, 0(3)
-; CHECK-P9-NEXT: lxv 2, 16(4)
+; CHECK-P9-NEXT: lxv 4, 16(4)
; CHECK-P9-NEXT: addi 4, 4, 64
+; CHECK-P9-NEXT: stxv 2, -16(6)
+; CHECK-P9-NEXT: lxv 2, 0(5)
; CHECK-P9-NEXT: vmuluwm 2, 3, 2
-; CHECK-P9-NEXT: stxv 2, 16(3)
+; CHECK-P9-NEXT: lxv 3, 16(3)
; CHECK-P9-NEXT: addi 3, 3, 64
+; CHECK-P9-NEXT: vadduwm 3, 4, 3
+; CHECK-P9-NEXT: stxv 2, 0(6)
+; CHECK-P9-NEXT: lxv 2, 16(5)
+; CHECK-P9-NEXT: addi 5, 5, 64
+; CHECK-P9-NEXT: vmuluwm 2, 3, 2
+; CHECK-P9-NEXT: stxv 2, 16(6)
+; CHECK-P9-NEXT: addi 6, 6, 64
; CHECK-P9-NEXT: bdnz .LBB0_1
; CHECK-P9-NEXT: # %bb.2: # %for.end
; CHECK-P9-NEXT: blr
diff --git a/llvm/test/CodeGen/PowerPC/tls-picgot.ll b/llvm/test/CodeGen/PowerPC/tls-picgot.ll
new file mode 100644
index 0000000..6562d86
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/tls-picgot.ll
@@ -0,0 +1,31 @@
+; RUN: llc -verify-machineinstrs -relocation-model=pic < %s | FileCheck %s
+
+target triple = "powerpc-unknown-linux-gnu"
+
+; Test that LR is preserved when PPC32PICGOT clobbers it with a local "bl".
+
+@TLS = external thread_local global i8
+
+; CHECK-LABEL: tls_addr:
+; CHECK: mflr [[SAVED_REG:[0-9]+]]
+
+; CHECK: bl [[JUMP:\.L[[:alnum:]_]+]]
+; CHECK-NEXT: [[OFFSET:\.L[[:alnum:]_]+]]:
+; CHECK-NEXT: .long _GLOBAL_OFFSET_TABLE_-[[OFFSET]]
+; CHECK-NEXT: [[JUMP]]
+; CHECK-NEXT: mflr {{[0-9]+}}
+
+; CHECK: mtlr [[SAVED_REG]]
+; CHECK-NEXT: blr
+
+define ptr @tls_addr() unnamed_addr {
+ %1 = call ptr @llvm.threadlocal.address.p0(ptr @TLS)
+ ret ptr %1
+}
+
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull)
+
+!llvm.module.flags = !{!0, !1}
+
+!0 = !{i32 8, !"PIC Level", i32 2}
+!1 = !{i32 7, !"PIE Level", i32 2}
diff --git a/llvm/test/CodeGen/PowerPC/tls_get_addr_fence1.mir b/llvm/test/CodeGen/PowerPC/tls_get_addr_fence1.mir
index 73bd475..a55cb04 100644
--- a/llvm/test/CodeGen/PowerPC/tls_get_addr_fence1.mir
+++ b/llvm/test/CodeGen/PowerPC/tls_get_addr_fence1.mir
@@ -43,8 +43,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
diff --git a/llvm/test/CodeGen/PowerPC/tls_get_addr_fence2.mir b/llvm/test/CodeGen/PowerPC/tls_get_addr_fence2.mir
index ffeb066..49d4a15 100644
--- a/llvm/test/CodeGen/PowerPC/tls_get_addr_fence2.mir
+++ b/llvm/test/CodeGen/PowerPC/tls_get_addr_fence2.mir
@@ -43,8 +43,8 @@ frameInfo:
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack:
stack:
constants:
diff --git a/llvm/test/CodeGen/PowerPC/two-address-crash.mir b/llvm/test/CodeGen/PowerPC/two-address-crash.mir
index cd2e69d..21f08cb0 100644
--- a/llvm/test/CodeGen/PowerPC/two-address-crash.mir
+++ b/llvm/test/CodeGen/PowerPC/two-address-crash.mir
@@ -62,8 +62,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
- savePoint: ''
- restorePoint: ''
+ savePoint: []
+ restorePoint: []
fixedStack: []
stack: []
callSites: []
diff --git a/llvm/test/CodeGen/PowerPC/vector-llrint.ll b/llvm/test/CodeGen/PowerPC/vector-llrint.ll
index 9229fef..8a9e48e 100644
--- a/llvm/test/CodeGen/PowerPC/vector-llrint.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-llrint.ll
@@ -1,4 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; FIXME: crash "Input type needs to be promoted!"
+; SKIP: llc -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
+; SKIP: -mtriple=powerpc-unknown-unknown -verify-machineinstrs < %s | \
+; SKIP: FileCheck %s --check-prefix=PPC32
; RUN: llc -mcpu=pwr7 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
; RUN: -mtriple=powerpc64-unknown-unknown -verify-machineinstrs < %s | \
; RUN: FileCheck %s --check-prefix=BE
@@ -9,14 +13,12 @@
; RUN: -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs < %s \
; RUN: --enable-unsafe-fp-math | FileCheck %s --check-prefix=FAST
-define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) {
+define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) nounwind {
; BE-LABEL: llrint_v1i64_v1f16:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -112(r1)
; BE-NEXT: std r0, 128(r1)
-; BE-NEXT: .cfi_def_cfa_offset 112
-; BE-NEXT: .cfi_offset lr, 16
; BE-NEXT: bl __truncsfhf2
; BE-NEXT: nop
; BE-NEXT: clrldi r3, r3, 48
@@ -34,8 +36,6 @@ define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) {
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -32(r1)
; CHECK-NEXT: std r0, 48(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset lr, 16
; CHECK-NEXT: bl __truncsfhf2
; CHECK-NEXT: nop
; CHECK-NEXT: clrldi r3, r3, 48
@@ -53,8 +53,6 @@ define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) {
; FAST-NEXT: mflr r0
; FAST-NEXT: stdu r1, -32(r1)
; FAST-NEXT: std r0, 48(r1)
-; FAST-NEXT: .cfi_def_cfa_offset 32
-; FAST-NEXT: .cfi_offset lr, 16
; FAST-NEXT: bl __truncsfhf2
; FAST-NEXT: nop
; FAST-NEXT: clrldi r3, r3, 48
@@ -71,16 +69,12 @@ define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) {
}
declare <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half>)
-define <2 x i64> @llrint_v1i64_v2f16(<2 x half> %x) {
+define <2 x i64> @llrint_v1i64_v2f16(<2 x half> %x) nounwind {
; BE-LABEL: llrint_v1i64_v2f16:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -160(r1)
; BE-NEXT: std r0, 176(r1)
-; BE-NEXT: .cfi_def_cfa_offset 160
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset r30, -24
-; BE-NEXT: .cfi_offset f31, -8
; BE-NEXT: stfd f31, 152(r1) # 8-byte Folded Spill
; BE-NEXT: fmr f31, f1
; BE-NEXT: fmr f1, f2
@@ -118,17 +112,12 @@ define <2 x i64> @llrint_v1i64_v2f16(<2 x half> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -96(r1)
-; CHECK-NEXT: std r0, 112(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 96
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset r30, -24
-; CHECK-NEXT: .cfi_offset f31, -8
-; CHECK-NEXT: .cfi_offset v31, -48
; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 112(r1)
; CHECK-NEXT: std r30, 72(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f31, 88(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f31, f2
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: bl __truncsfhf2
; CHECK-NEXT: nop
; CHECK-NEXT: fmr f1, f31
@@ -153,7 +142,7 @@ define <2 x i64> @llrint_v1i64_v2f16(<2 x half> %x) {
; CHECK-NEXT: lfd f31, 88(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r30, 72(r1) # 8-byte Folded Reload
; CHECK-NEXT: xxmrghd v2, vs0, v31
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 96
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -162,10 +151,6 @@ define <2 x i64> @llrint_v1i64_v2f16(<2 x half> %x) {
; FAST-LABEL: llrint_v1i64_v2f16:
; FAST: # %bb.0:
; FAST-NEXT: mflr r0
-; FAST-NEXT: .cfi_def_cfa_offset 48
-; FAST-NEXT: .cfi_offset lr, 16
-; FAST-NEXT: .cfi_offset f30, -16
-; FAST-NEXT: .cfi_offset f31, -8
; FAST-NEXT: stfd f30, -16(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f31, -8(r1) # 8-byte Folded Spill
; FAST-NEXT: stdu r1, -48(r1)
@@ -202,20 +187,12 @@ define <2 x i64> @llrint_v1i64_v2f16(<2 x half> %x) {
}
declare <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half>)
-define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) {
+define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) nounwind {
; BE-LABEL: llrint_v4i64_v4f16:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -208(r1)
; BE-NEXT: std r0, 224(r1)
-; BE-NEXT: .cfi_def_cfa_offset 208
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset r28, -56
-; BE-NEXT: .cfi_offset r29, -48
-; BE-NEXT: .cfi_offset r30, -40
-; BE-NEXT: .cfi_offset f29, -24
-; BE-NEXT: .cfi_offset f30, -16
-; BE-NEXT: .cfi_offset f31, -8
; BE-NEXT: stfd f29, 184(r1) # 8-byte Folded Spill
; BE-NEXT: fmr f29, f1
; BE-NEXT: fmr f1, f2
@@ -289,18 +266,8 @@ define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -144(r1)
-; CHECK-NEXT: std r0, 160(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 144
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset r28, -56
-; CHECK-NEXT: .cfi_offset r29, -48
-; CHECK-NEXT: .cfi_offset r30, -40
-; CHECK-NEXT: .cfi_offset f29, -24
-; CHECK-NEXT: .cfi_offset f30, -16
-; CHECK-NEXT: .cfi_offset f31, -8
-; CHECK-NEXT: .cfi_offset v30, -96
-; CHECK-NEXT: .cfi_offset v31, -80
; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 160(r1)
; CHECK-NEXT: std r28, 88(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r29, 96(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r30, 104(r1) # 8-byte Folded Spill
@@ -308,11 +275,11 @@ define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) {
; CHECK-NEXT: fmr f29, f2
; CHECK-NEXT: stfd f30, 128(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f30, f3
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: stfd f31, 136(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f31, f4
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: bl __truncsfhf2
; CHECK-NEXT: nop
; CHECK-NEXT: fmr f1, f29
@@ -365,11 +332,11 @@ define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) {
; CHECK-NEXT: lfd f29, 120(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r30, 104(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r29, 96(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
; CHECK-NEXT: ld r28, 88(r1) # 8-byte Folded Reload
; CHECK-NEXT: xxmrghd v3, vs0, v30
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 144
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -378,12 +345,6 @@ define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) {
; FAST-LABEL: llrint_v4i64_v4f16:
; FAST: # %bb.0:
; FAST-NEXT: mflr r0
-; FAST-NEXT: .cfi_def_cfa_offset 64
-; FAST-NEXT: .cfi_offset lr, 16
-; FAST-NEXT: .cfi_offset f28, -32
-; FAST-NEXT: .cfi_offset f29, -24
-; FAST-NEXT: .cfi_offset f30, -16
-; FAST-NEXT: .cfi_offset f31, -8
; FAST-NEXT: stfd f28, -32(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f29, -24(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f30, -16(r1) # 8-byte Folded Spill
@@ -447,28 +408,12 @@ define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) {
}
declare <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half>)
-define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) {
+define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) nounwind {
; BE-LABEL: llrint_v8i64_v8f16:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -304(r1)
; BE-NEXT: std r0, 320(r1)
-; BE-NEXT: .cfi_def_cfa_offset 304
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset r24, -120
-; BE-NEXT: .cfi_offset r25, -112
-; BE-NEXT: .cfi_offset r26, -104
-; BE-NEXT: .cfi_offset r27, -96
-; BE-NEXT: .cfi_offset r28, -88
-; BE-NEXT: .cfi_offset r29, -80
-; BE-NEXT: .cfi_offset r30, -72
-; BE-NEXT: .cfi_offset f25, -56
-; BE-NEXT: .cfi_offset f26, -48
-; BE-NEXT: .cfi_offset f27, -40
-; BE-NEXT: .cfi_offset f28, -32
-; BE-NEXT: .cfi_offset f29, -24
-; BE-NEXT: .cfi_offset f30, -16
-; BE-NEXT: .cfi_offset f31, -8
; BE-NEXT: stfd f25, 248(r1) # 8-byte Folded Spill
; BE-NEXT: fmr f25, f1
; BE-NEXT: fmr f1, f2
@@ -614,44 +559,24 @@ define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -240(r1)
-; CHECK-NEXT: std r0, 256(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 240
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset r24, -120
-; CHECK-NEXT: .cfi_offset r25, -112
-; CHECK-NEXT: .cfi_offset r26, -104
-; CHECK-NEXT: .cfi_offset r27, -96
-; CHECK-NEXT: .cfi_offset r28, -88
-; CHECK-NEXT: .cfi_offset r29, -80
-; CHECK-NEXT: .cfi_offset r30, -72
-; CHECK-NEXT: .cfi_offset f25, -56
-; CHECK-NEXT: .cfi_offset f26, -48
-; CHECK-NEXT: .cfi_offset f27, -40
-; CHECK-NEXT: .cfi_offset f28, -32
-; CHECK-NEXT: .cfi_offset f29, -24
-; CHECK-NEXT: .cfi_offset f30, -16
-; CHECK-NEXT: .cfi_offset f31, -8
-; CHECK-NEXT: .cfi_offset v28, -192
-; CHECK-NEXT: .cfi_offset v29, -176
-; CHECK-NEXT: .cfi_offset v30, -160
-; CHECK-NEXT: .cfi_offset v31, -144
; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 256(r1)
; CHECK-NEXT: std r24, 120(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r25, 128(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r26, 136(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r27, 144(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r28, 152(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r29, 160(r1) # 8-byte Folded Spill
-; CHECK-NEXT: std r30, 168(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stvx v28, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: std r30, 168(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f25, 184(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f25, f2
; CHECK-NEXT: stfd f26, 192(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f26, f3
; CHECK-NEXT: stfd f27, 200(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f27, f4
-; CHECK-NEXT: stvx v29, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 80
; CHECK-NEXT: stfd f28, 208(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f28, f5
@@ -659,11 +584,11 @@ define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) {
; CHECK-NEXT: fmr f29, f6
; CHECK-NEXT: stfd f30, 224(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f30, f7
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 96
; CHECK-NEXT: stfd f31, 232(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f31, f8
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: bl __truncsfhf2
; CHECK-NEXT: nop
; CHECK-NEXT: fmr f1, f25
@@ -766,7 +691,7 @@ define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) {
; CHECK-NEXT: vmr v4, v29
; CHECK-NEXT: lfd f30, 224(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f29, 216(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 80
; CHECK-NEXT: lfd f28, 208(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f27, 200(r1) # 8-byte Folded Reload
@@ -774,7 +699,7 @@ define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) {
; CHECK-NEXT: lfd f25, 184(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r30, 168(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r29, 160(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: ld r28, 152(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r27, 144(r1) # 8-byte Folded Reload
@@ -782,9 +707,9 @@ define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) {
; CHECK-NEXT: ld r26, 136(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r25, 128(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r24, 120(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: lvx v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 240
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -793,16 +718,6 @@ define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) {
; FAST-LABEL: llrint_v8i64_v8f16:
; FAST: # %bb.0:
; FAST-NEXT: mflr r0
-; FAST-NEXT: .cfi_def_cfa_offset 96
-; FAST-NEXT: .cfi_offset lr, 16
-; FAST-NEXT: .cfi_offset f24, -64
-; FAST-NEXT: .cfi_offset f25, -56
-; FAST-NEXT: .cfi_offset f26, -48
-; FAST-NEXT: .cfi_offset f27, -40
-; FAST-NEXT: .cfi_offset f28, -32
-; FAST-NEXT: .cfi_offset f29, -24
-; FAST-NEXT: .cfi_offset f30, -16
-; FAST-NEXT: .cfi_offset f31, -8
; FAST-NEXT: stfd f24, -64(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f25, -56(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f26, -48(r1) # 8-byte Folded Spill
@@ -920,44 +835,12 @@ define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) {
}
declare <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half>)
-define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
+define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) nounwind {
; BE-LABEL: llrint_v16i64_v16f16:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -496(r1)
; BE-NEXT: std r0, 512(r1)
-; BE-NEXT: .cfi_def_cfa_offset 496
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset r16, -248
-; BE-NEXT: .cfi_offset r17, -240
-; BE-NEXT: .cfi_offset r18, -232
-; BE-NEXT: .cfi_offset r19, -224
-; BE-NEXT: .cfi_offset r20, -216
-; BE-NEXT: .cfi_offset r21, -208
-; BE-NEXT: .cfi_offset r22, -200
-; BE-NEXT: .cfi_offset r23, -192
-; BE-NEXT: .cfi_offset r24, -184
-; BE-NEXT: .cfi_offset r25, -176
-; BE-NEXT: .cfi_offset r26, -168
-; BE-NEXT: .cfi_offset r27, -160
-; BE-NEXT: .cfi_offset r28, -152
-; BE-NEXT: .cfi_offset r29, -144
-; BE-NEXT: .cfi_offset r30, -136
-; BE-NEXT: .cfi_offset f17, -120
-; BE-NEXT: .cfi_offset f18, -112
-; BE-NEXT: .cfi_offset f19, -104
-; BE-NEXT: .cfi_offset f20, -96
-; BE-NEXT: .cfi_offset f21, -88
-; BE-NEXT: .cfi_offset f22, -80
-; BE-NEXT: .cfi_offset f23, -72
-; BE-NEXT: .cfi_offset f24, -64
-; BE-NEXT: .cfi_offset f25, -56
-; BE-NEXT: .cfi_offset f26, -48
-; BE-NEXT: .cfi_offset f27, -40
-; BE-NEXT: .cfi_offset f28, -32
-; BE-NEXT: .cfi_offset f29, -24
-; BE-NEXT: .cfi_offset f30, -16
-; BE-NEXT: .cfi_offset f31, -8
; BE-NEXT: stfd f20, 400(r1) # 8-byte Folded Spill
; BE-NEXT: fmr f20, f1
; BE-NEXT: fmr f1, f2
@@ -1244,105 +1127,65 @@ define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -432(r1)
-; CHECK-NEXT: std r0, 448(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 432
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset r16, -248
-; CHECK-NEXT: .cfi_offset r17, -240
-; CHECK-NEXT: .cfi_offset r18, -232
-; CHECK-NEXT: .cfi_offset r19, -224
-; CHECK-NEXT: .cfi_offset r20, -216
-; CHECK-NEXT: .cfi_offset r21, -208
-; CHECK-NEXT: .cfi_offset r22, -200
-; CHECK-NEXT: .cfi_offset r23, -192
-; CHECK-NEXT: .cfi_offset r24, -184
-; CHECK-NEXT: .cfi_offset r25, -176
-; CHECK-NEXT: .cfi_offset r26, -168
-; CHECK-NEXT: .cfi_offset r27, -160
-; CHECK-NEXT: .cfi_offset r28, -152
-; CHECK-NEXT: .cfi_offset r29, -144
-; CHECK-NEXT: .cfi_offset r30, -136
-; CHECK-NEXT: .cfi_offset f17, -120
-; CHECK-NEXT: .cfi_offset f18, -112
-; CHECK-NEXT: .cfi_offset f19, -104
-; CHECK-NEXT: .cfi_offset f20, -96
-; CHECK-NEXT: .cfi_offset f21, -88
-; CHECK-NEXT: .cfi_offset f22, -80
-; CHECK-NEXT: .cfi_offset f23, -72
-; CHECK-NEXT: .cfi_offset f24, -64
-; CHECK-NEXT: .cfi_offset f25, -56
-; CHECK-NEXT: .cfi_offset f26, -48
-; CHECK-NEXT: .cfi_offset f27, -40
-; CHECK-NEXT: .cfi_offset f28, -32
-; CHECK-NEXT: .cfi_offset f29, -24
-; CHECK-NEXT: .cfi_offset f30, -16
-; CHECK-NEXT: .cfi_offset f31, -8
-; CHECK-NEXT: .cfi_offset v24, -384
-; CHECK-NEXT: .cfi_offset v25, -368
-; CHECK-NEXT: .cfi_offset v26, -352
-; CHECK-NEXT: .cfi_offset v27, -336
-; CHECK-NEXT: .cfi_offset v28, -320
-; CHECK-NEXT: .cfi_offset v29, -304
-; CHECK-NEXT: .cfi_offset v30, -288
-; CHECK-NEXT: .cfi_offset v31, -272
; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 448(r1)
; CHECK-NEXT: std r16, 184(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r17, 192(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r18, 200(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r19, 208(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r20, 216(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r21, 224(r1) # 8-byte Folded Spill
-; CHECK-NEXT: std r22, 232(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stvx v24, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v24, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: std r22, 232(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r23, 240(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r24, 248(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r25, 256(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r26, 264(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r27, 272(r1) # 8-byte Folded Spill
-; CHECK-NEXT: std r28, 280(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stvx v25, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 80
+; CHECK-NEXT: std r28, 280(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r29, 288(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r30, 296(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f17, 312(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f18, 320(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f19, 328(r1) # 8-byte Folded Spill
+; CHECK-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 96
; CHECK-NEXT: stfd f20, 336(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f20, f2
-; CHECK-NEXT: stvx v26, r1, r3 # 16-byte Folded Spill
-; CHECK-NEXT: li r3, 96
; CHECK-NEXT: stfd f21, 344(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f21, f3
; CHECK-NEXT: stfd f22, 352(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f22, f4
+; CHECK-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 112
; CHECK-NEXT: stfd f23, 360(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f23, f5
-; CHECK-NEXT: stvx v27, r1, r3 # 16-byte Folded Spill
-; CHECK-NEXT: li r3, 112
; CHECK-NEXT: stfd f24, 368(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f24, f6
; CHECK-NEXT: stfd f25, 376(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f25, f7
+; CHECK-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 128
; CHECK-NEXT: stfd f26, 384(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f26, f8
-; CHECK-NEXT: stvx v28, r1, r3 # 16-byte Folded Spill
-; CHECK-NEXT: li r3, 128
; CHECK-NEXT: stfd f27, 392(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f27, f9
; CHECK-NEXT: stfd f28, 400(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f28, f10
+; CHECK-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 144
; CHECK-NEXT: stfd f29, 408(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f29, f11
-; CHECK-NEXT: stvx v29, r1, r3 # 16-byte Folded Spill
-; CHECK-NEXT: li r3, 144
; CHECK-NEXT: stfd f30, 416(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f30, f12
; CHECK-NEXT: stfd f31, 424(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f31, f13
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 160
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: bl __truncsfhf2
; CHECK-NEXT: nop
; CHECK-NEXT: fmr f1, f20
@@ -1545,7 +1388,7 @@ define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
; CHECK-NEXT: vmr v4, v29
; CHECK-NEXT: lfd f30, 416(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f29, 408(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 144
; CHECK-NEXT: vmr v5, v28
; CHECK-NEXT: vmr v6, v27
@@ -1553,7 +1396,7 @@ define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
; CHECK-NEXT: vmr v8, v25
; CHECK-NEXT: lfd f28, 400(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f27, 392(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 128
; CHECK-NEXT: lfd f26, 384(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f25, 376(r1) # 8-byte Folded Reload
@@ -1561,7 +1404,7 @@ define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
; CHECK-NEXT: lfd f24, 368(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f23, 360(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f22, 352(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 112
; CHECK-NEXT: lfd f21, 344(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r30, 296(r1) # 8-byte Folded Reload
@@ -1569,7 +1412,7 @@ define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
; CHECK-NEXT: lfd f19, 328(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r29, 288(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r28, 280(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 96
; CHECK-NEXT: lfd f18, 320(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r27, 272(r1) # 8-byte Folded Reload
@@ -1577,7 +1420,7 @@ define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
; CHECK-NEXT: ld r26, 264(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r25, 256(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r24, 248(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v27, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 80
; CHECK-NEXT: ld r23, 240(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r22, 232(r1) # 8-byte Folded Reload
@@ -1585,13 +1428,13 @@ define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
; CHECK-NEXT: ld r20, 216(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r19, 208(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r18, 200(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v26, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: ld r17, 192(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r16, 184(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v25, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: lvx v24, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 432
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -1600,24 +1443,6 @@ define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
; FAST-LABEL: llrint_v16i64_v16f16:
; FAST: # %bb.0:
; FAST-NEXT: mflr r0
-; FAST-NEXT: .cfi_def_cfa_offset 160
-; FAST-NEXT: .cfi_offset lr, 16
-; FAST-NEXT: .cfi_offset f16, -128
-; FAST-NEXT: .cfi_offset f17, -120
-; FAST-NEXT: .cfi_offset f18, -112
-; FAST-NEXT: .cfi_offset f19, -104
-; FAST-NEXT: .cfi_offset f20, -96
-; FAST-NEXT: .cfi_offset f21, -88
-; FAST-NEXT: .cfi_offset f22, -80
-; FAST-NEXT: .cfi_offset f23, -72
-; FAST-NEXT: .cfi_offset f24, -64
-; FAST-NEXT: .cfi_offset f25, -56
-; FAST-NEXT: .cfi_offset f26, -48
-; FAST-NEXT: .cfi_offset f27, -40
-; FAST-NEXT: .cfi_offset f28, -32
-; FAST-NEXT: .cfi_offset f29, -24
-; FAST-NEXT: .cfi_offset f30, -16
-; FAST-NEXT: .cfi_offset f31, -8
; FAST-NEXT: stfd f16, -128(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f17, -120(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f18, -112(r1) # 8-byte Folded Spill
@@ -1841,50 +1666,12 @@ define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
}
declare <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half>)
-define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
+define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) nounwind {
; BE-LABEL: llrint_v32i64_v32f16:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -864(r1)
; BE-NEXT: std r0, 880(r1)
-; BE-NEXT: .cfi_def_cfa_offset 864
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset r14, -288
-; BE-NEXT: .cfi_offset r15, -280
-; BE-NEXT: .cfi_offset r16, -272
-; BE-NEXT: .cfi_offset r17, -264
-; BE-NEXT: .cfi_offset r18, -256
-; BE-NEXT: .cfi_offset r19, -248
-; BE-NEXT: .cfi_offset r20, -240
-; BE-NEXT: .cfi_offset r21, -232
-; BE-NEXT: .cfi_offset r22, -224
-; BE-NEXT: .cfi_offset r23, -216
-; BE-NEXT: .cfi_offset r24, -208
-; BE-NEXT: .cfi_offset r25, -200
-; BE-NEXT: .cfi_offset r26, -192
-; BE-NEXT: .cfi_offset r27, -184
-; BE-NEXT: .cfi_offset r28, -176
-; BE-NEXT: .cfi_offset r29, -168
-; BE-NEXT: .cfi_offset r30, -160
-; BE-NEXT: .cfi_offset r31, -152
-; BE-NEXT: .cfi_offset f14, -144
-; BE-NEXT: .cfi_offset f15, -136
-; BE-NEXT: .cfi_offset f16, -128
-; BE-NEXT: .cfi_offset f17, -120
-; BE-NEXT: .cfi_offset f18, -112
-; BE-NEXT: .cfi_offset f19, -104
-; BE-NEXT: .cfi_offset f20, -96
-; BE-NEXT: .cfi_offset f21, -88
-; BE-NEXT: .cfi_offset f22, -80
-; BE-NEXT: .cfi_offset f23, -72
-; BE-NEXT: .cfi_offset f24, -64
-; BE-NEXT: .cfi_offset f25, -56
-; BE-NEXT: .cfi_offset f26, -48
-; BE-NEXT: .cfi_offset f27, -40
-; BE-NEXT: .cfi_offset f28, -32
-; BE-NEXT: .cfi_offset f29, -24
-; BE-NEXT: .cfi_offset f30, -16
-; BE-NEXT: .cfi_offset f31, -8
; BE-NEXT: stfd f20, 768(r1) # 8-byte Folded Spill
; BE-NEXT: fmr f20, f1
; BE-NEXT: fmr f1, f2
@@ -1924,6 +1711,7 @@ define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
; BE-NEXT: stfd f30, 848(r1) # 8-byte Folded Spill
; BE-NEXT: stfd f31, 856(r1) # 8-byte Folded Spill
; BE-NEXT: fmr f31, f13
+; BE-NEXT: mr r30, r3
; BE-NEXT: fmr f29, f12
; BE-NEXT: fmr f30, f11
; BE-NEXT: fmr f28, f10
@@ -1934,7 +1722,6 @@ define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
; BE-NEXT: fmr f23, f5
; BE-NEXT: fmr f22, f4
; BE-NEXT: fmr f21, f3
-; BE-NEXT: mr r30, r3
; BE-NEXT: bl __truncsfhf2
; BE-NEXT: nop
; BE-NEXT: fmr f1, f20
@@ -2437,98 +2224,48 @@ define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -688(r1)
-; CHECK-NEXT: std r0, 704(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 688
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset r14, -288
-; CHECK-NEXT: .cfi_offset r15, -280
-; CHECK-NEXT: .cfi_offset r16, -272
-; CHECK-NEXT: .cfi_offset r17, -264
-; CHECK-NEXT: .cfi_offset r18, -256
-; CHECK-NEXT: .cfi_offset r19, -248
-; CHECK-NEXT: .cfi_offset r20, -240
-; CHECK-NEXT: .cfi_offset r21, -232
-; CHECK-NEXT: .cfi_offset r22, -224
-; CHECK-NEXT: .cfi_offset r23, -216
-; CHECK-NEXT: .cfi_offset r24, -208
-; CHECK-NEXT: .cfi_offset r25, -200
-; CHECK-NEXT: .cfi_offset r26, -192
-; CHECK-NEXT: .cfi_offset r27, -184
-; CHECK-NEXT: .cfi_offset r28, -176
-; CHECK-NEXT: .cfi_offset r29, -168
-; CHECK-NEXT: .cfi_offset r30, -160
-; CHECK-NEXT: .cfi_offset r31, -152
-; CHECK-NEXT: .cfi_offset f14, -144
-; CHECK-NEXT: .cfi_offset f15, -136
-; CHECK-NEXT: .cfi_offset f16, -128
-; CHECK-NEXT: .cfi_offset f17, -120
-; CHECK-NEXT: .cfi_offset f18, -112
-; CHECK-NEXT: .cfi_offset f19, -104
-; CHECK-NEXT: .cfi_offset f20, -96
-; CHECK-NEXT: .cfi_offset f21, -88
-; CHECK-NEXT: .cfi_offset f22, -80
-; CHECK-NEXT: .cfi_offset f23, -72
-; CHECK-NEXT: .cfi_offset f24, -64
-; CHECK-NEXT: .cfi_offset f25, -56
-; CHECK-NEXT: .cfi_offset f26, -48
-; CHECK-NEXT: .cfi_offset f27, -40
-; CHECK-NEXT: .cfi_offset f28, -32
-; CHECK-NEXT: .cfi_offset f29, -24
-; CHECK-NEXT: .cfi_offset f30, -16
-; CHECK-NEXT: .cfi_offset f31, -8
-; CHECK-NEXT: .cfi_offset v20, -480
-; CHECK-NEXT: .cfi_offset v21, -464
-; CHECK-NEXT: .cfi_offset v22, -448
-; CHECK-NEXT: .cfi_offset v23, -432
-; CHECK-NEXT: .cfi_offset v24, -416
-; CHECK-NEXT: .cfi_offset v25, -400
-; CHECK-NEXT: .cfi_offset v26, -384
-; CHECK-NEXT: .cfi_offset v27, -368
-; CHECK-NEXT: .cfi_offset v28, -352
-; CHECK-NEXT: .cfi_offset v29, -336
-; CHECK-NEXT: .cfi_offset v30, -320
-; CHECK-NEXT: .cfi_offset v31, -304
; CHECK-NEXT: li r4, 208
+; CHECK-NEXT: std r0, 704(r1)
; CHECK-NEXT: std r14, 400(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r15, 408(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r16, 416(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r17, 424(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r18, 432(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r19, 440(r1) # 8-byte Folded Spill
-; CHECK-NEXT: std r20, 448(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stvx v20, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v20, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 224
+; CHECK-NEXT: std r20, 448(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r21, 456(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r22, 464(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r23, 472(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r24, 480(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r25, 488(r1) # 8-byte Folded Spill
-; CHECK-NEXT: std r26, 496(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stvx v21, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v21, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 240
+; CHECK-NEXT: std r26, 496(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r27, 504(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r28, 512(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r29, 520(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r30, 528(r1) # 8-byte Folded Spill
; CHECK-NEXT: mr r30, r3
-; CHECK-NEXT: std r31, 536(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stvx v22, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v22, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 256
+; CHECK-NEXT: std r31, 536(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f14, 544(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f15, 552(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f16, 560(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f17, 568(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f18, 576(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stfd f19, 584(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stvx v23, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v23, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 272
+; CHECK-NEXT: stfd f19, 584(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f20, 592(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f20, f2
; CHECK-NEXT: stfd f21, 600(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f21, f3
; CHECK-NEXT: stfd f22, 608(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f22, f4
-; CHECK-NEXT: stvx v24, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v24, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 288
; CHECK-NEXT: stfd f23, 616(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f23, f5
@@ -2536,7 +2273,7 @@ define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: fmr f24, f6
; CHECK-NEXT: stfd f25, 632(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f25, f7
-; CHECK-NEXT: stvx v25, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v25, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 304
; CHECK-NEXT: stfd f26, 640(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f26, f8
@@ -2544,7 +2281,7 @@ define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: fmr f27, f9
; CHECK-NEXT: stfd f28, 656(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f28, f10
-; CHECK-NEXT: stvx v26, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v26, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 320
; CHECK-NEXT: stfd f29, 664(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f29, f11
@@ -2552,15 +2289,15 @@ define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: fmr f30, f12
; CHECK-NEXT: stfd f31, 680(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f31, f13
-; CHECK-NEXT: stvx v27, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v27, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 336
-; CHECK-NEXT: stvx v28, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v28, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 352
-; CHECK-NEXT: stvx v29, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v29, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 368
-; CHECK-NEXT: stvx v30, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 384
-; CHECK-NEXT: stvx v31, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: bl __truncsfhf2
; CHECK-NEXT: nop
; CHECK-NEXT: fmr f1, f20
@@ -3039,7 +2776,7 @@ define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: li r3, 384
; CHECK-NEXT: xxswapd vs4, vs4
; CHECK-NEXT: stxvd2x vs4, 0, r30
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 368
; CHECK-NEXT: lfd f31, 680(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f30, 672(r1) # 8-byte Folded Reload
@@ -3057,7 +2794,7 @@ define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: lfd f18, 576(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f17, 568(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f16, 560(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 352
; CHECK-NEXT: lfd f15, 552(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f14, 544(r1) # 8-byte Folded Reload
@@ -3065,7 +2802,7 @@ define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: ld r30, 528(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r29, 520(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r28, 512(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 336
; CHECK-NEXT: ld r27, 504(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r26, 496(r1) # 8-byte Folded Reload
@@ -3073,7 +2810,7 @@ define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: ld r24, 480(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r23, 472(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r22, 464(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 320
; CHECK-NEXT: ld r21, 456(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r20, 448(r1) # 8-byte Folded Reload
@@ -3081,23 +2818,23 @@ define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: ld r18, 432(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r17, 424(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r16, 416(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v27, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 304
; CHECK-NEXT: ld r15, 408(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r14, 400(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v26, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 288
-; CHECK-NEXT: lvx v25, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 272
-; CHECK-NEXT: lvx v24, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 256
-; CHECK-NEXT: lvx v23, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v23, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 240
-; CHECK-NEXT: lvx v22, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v22, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 224
-; CHECK-NEXT: lvx v21, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v21, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 208
-; CHECK-NEXT: lvx v20, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v20, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 688
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -3107,95 +2844,62 @@ define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
; FAST: # %bb.0:
; FAST-NEXT: mflr r0
; FAST-NEXT: stdu r1, -480(r1)
-; FAST-NEXT: std r0, 496(r1)
-; FAST-NEXT: .cfi_def_cfa_offset 480
-; FAST-NEXT: .cfi_offset lr, 16
-; FAST-NEXT: .cfi_offset r30, -160
-; FAST-NEXT: .cfi_offset f14, -144
-; FAST-NEXT: .cfi_offset f15, -136
-; FAST-NEXT: .cfi_offset f16, -128
-; FAST-NEXT: .cfi_offset f17, -120
-; FAST-NEXT: .cfi_offset f18, -112
-; FAST-NEXT: .cfi_offset f19, -104
-; FAST-NEXT: .cfi_offset f20, -96
-; FAST-NEXT: .cfi_offset f21, -88
-; FAST-NEXT: .cfi_offset f22, -80
-; FAST-NEXT: .cfi_offset f23, -72
-; FAST-NEXT: .cfi_offset f24, -64
-; FAST-NEXT: .cfi_offset f25, -56
-; FAST-NEXT: .cfi_offset f26, -48
-; FAST-NEXT: .cfi_offset f27, -40
-; FAST-NEXT: .cfi_offset f28, -32
-; FAST-NEXT: .cfi_offset f29, -24
-; FAST-NEXT: .cfi_offset f30, -16
-; FAST-NEXT: .cfi_offset f31, -8
-; FAST-NEXT: .cfi_offset v20, -352
-; FAST-NEXT: .cfi_offset v21, -336
-; FAST-NEXT: .cfi_offset v22, -320
-; FAST-NEXT: .cfi_offset v23, -304
-; FAST-NEXT: .cfi_offset v24, -288
-; FAST-NEXT: .cfi_offset v25, -272
-; FAST-NEXT: .cfi_offset v26, -256
-; FAST-NEXT: .cfi_offset v27, -240
-; FAST-NEXT: .cfi_offset v28, -224
-; FAST-NEXT: .cfi_offset v29, -208
-; FAST-NEXT: .cfi_offset v30, -192
-; FAST-NEXT: .cfi_offset v31, -176
; FAST-NEXT: li r4, 128
+; FAST-NEXT: std r0, 496(r1)
; FAST-NEXT: std r30, 320(r1) # 8-byte Folded Spill
+; FAST-NEXT: mr r30, r3
; FAST-NEXT: stfd f14, 336(r1) # 8-byte Folded Spill
-; FAST-NEXT: fmr f14, f5
; FAST-NEXT: stfd f15, 344(r1) # 8-byte Folded Spill
+; FAST-NEXT: fmr f14, f5
; FAST-NEXT: stfd f16, 352(r1) # 8-byte Folded Spill
-; FAST-NEXT: fmr f16, f4
-; FAST-NEXT: mr r30, r3
-; FAST-NEXT: stvx v20, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v20, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 144
+; FAST-NEXT: fmr f16, f4
; FAST-NEXT: stfd f17, 360(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f18, 368(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f19, 376(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f20, 384(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f21, 392(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f22, 400(r1) # 8-byte Folded Spill
-; FAST-NEXT: stvx v21, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v21, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 160
+; FAST-NEXT: stfd f22, 400(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f23, 408(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f24, 416(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f25, 424(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f26, 432(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f27, 440(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f28, 448(r1) # 8-byte Folded Spill
-; FAST-NEXT: stvx v22, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v22, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 176
; FAST-NEXT: xxlor v22, f3, f3
+; FAST-NEXT: stfd f28, 448(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f29, 456(r1) # 8-byte Folded Spill
; FAST-NEXT: fmr f29, f9
; FAST-NEXT: stfd f30, 464(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f31, 472(r1) # 8-byte Folded Spill
-; FAST-NEXT: stvx v23, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v23, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 192
; FAST-NEXT: xxlor v23, f2, f2
-; FAST-NEXT: stvx v24, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v24, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 208
-; FAST-NEXT: stvx v25, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v25, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 224
; FAST-NEXT: xxlor v25, f13, f13
-; FAST-NEXT: stvx v26, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v26, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 240
; FAST-NEXT: xxlor v26, f12, f12
-; FAST-NEXT: stvx v27, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v27, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 256
; FAST-NEXT: xxlor v27, f11, f11
-; FAST-NEXT: stvx v28, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v28, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 272
; FAST-NEXT: xxlor v28, f10, f10
-; FAST-NEXT: stvx v29, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v29, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 288
; FAST-NEXT: xxlor v29, f8, f8
-; FAST-NEXT: stvx v30, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v30, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 304
; FAST-NEXT: xxlor v30, f7, f7
-; FAST-NEXT: stvx v31, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v31, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 44
; FAST-NEXT: xxlor v31, f6, f6
; FAST-NEXT: stxsspx f1, r1, r4 # 4-byte Folded Spill
@@ -3624,30 +3328,30 @@ define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
; FAST-NEXT: lfd f16, 352(r1) # 8-byte Folded Reload
; FAST-NEXT: lfd f15, 344(r1) # 8-byte Folded Reload
; FAST-NEXT: lfd f14, 336(r1) # 8-byte Folded Reload
-; FAST-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 288
; FAST-NEXT: ld r30, 320(r1) # 8-byte Folded Reload
-; FAST-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 272
-; FAST-NEXT: lvx v29, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 256
-; FAST-NEXT: lvx v28, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 240
-; FAST-NEXT: lvx v27, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 224
-; FAST-NEXT: lvx v26, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 208
-; FAST-NEXT: lvx v25, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 192
-; FAST-NEXT: lvx v24, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 176
-; FAST-NEXT: lvx v23, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v23, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 160
-; FAST-NEXT: lvx v22, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v22, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 144
-; FAST-NEXT: lvx v21, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v21, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 128
-; FAST-NEXT: lvx v20, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v20, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: addi r1, r1, 480
; FAST-NEXT: ld r0, 16(r1)
; FAST-NEXT: mtlr r0
@@ -3657,14 +3361,12 @@ define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
}
declare <32 x i64> @llvm.llrint.v32i64.v32f16(<32 x half>)
-define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
+define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) nounwind {
; BE-LABEL: llrint_v1i64_v1f32:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -112(r1)
; BE-NEXT: std r0, 128(r1)
-; BE-NEXT: .cfi_def_cfa_offset 112
-; BE-NEXT: .cfi_offset lr, 16
; BE-NEXT: bl llrintf
; BE-NEXT: nop
; BE-NEXT: addi r1, r1, 112
@@ -3677,8 +3379,6 @@ define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -32(r1)
; CHECK-NEXT: std r0, 48(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset lr, 16
; CHECK-NEXT: bl llrintf
; CHECK-NEXT: nop
; CHECK-NEXT: addi r1, r1, 32
@@ -3696,15 +3396,13 @@ define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
}
declare <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float>)
-define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
+define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) nounwind {
; BE-LABEL: llrint_v2i64_v2f32:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -144(r1)
-; BE-NEXT: std r0, 160(r1)
-; BE-NEXT: .cfi_def_cfa_offset 144
-; BE-NEXT: .cfi_offset lr, 16
; BE-NEXT: addi r3, r1, 112
+; BE-NEXT: std r0, 160(r1)
; BE-NEXT: stxvw4x v2, 0, r3
; BE-NEXT: lfs f1, 116(r1)
; BE-NEXT: bl llrintf
@@ -3725,14 +3423,11 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -64(r1)
-; CHECK-NEXT: std r0, 80(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset v31, -16
; CHECK-NEXT: xxsldwi vs0, v2, v2, 3
; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 80(r1)
; CHECK-NEXT: xscvspdpn f1, vs0
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v31, v2
; CHECK-NEXT: bl llrintf
; CHECK-NEXT: nop
@@ -3744,7 +3439,7 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
; CHECK-NEXT: mtfprd f0, r3
; CHECK-NEXT: li r3, 48
; CHECK-NEXT: xxmrghd v2, vs0, v31
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 64
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -3769,15 +3464,13 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
}
declare <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float>)
-define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
+define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) nounwind {
; BE-LABEL: llrint_v4i64_v4f32:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -160(r1)
-; BE-NEXT: std r0, 176(r1)
-; BE-NEXT: .cfi_def_cfa_offset 160
-; BE-NEXT: .cfi_offset lr, 16
; BE-NEXT: addi r3, r1, 112
+; BE-NEXT: std r0, 176(r1)
; BE-NEXT: stxvw4x v2, 0, r3
; BE-NEXT: lfs f1, 116(r1)
; BE-NEXT: bl llrintf
@@ -3808,17 +3501,13 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -80(r1)
-; CHECK-NEXT: std r0, 96(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 80
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset v30, -32
-; CHECK-NEXT: .cfi_offset v31, -16
; CHECK-NEXT: xxsldwi vs0, v2, v2, 3
; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 96(r1)
; CHECK-NEXT: xscvspdpn f1, vs0
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 64
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v31, v2
; CHECK-NEXT: bl llrintf
; CHECK-NEXT: nop
@@ -3841,9 +3530,9 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: vmr v2, v30
; CHECK-NEXT: xxmrghd v3, v31, vs0
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 80
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -3879,15 +3568,13 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
}
declare <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float>)
-define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
+define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) nounwind {
; BE-LABEL: llrint_v8i64_v8f32:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -208(r1)
-; BE-NEXT: std r0, 224(r1)
-; BE-NEXT: .cfi_def_cfa_offset 208
-; BE-NEXT: .cfi_offset lr, 16
; BE-NEXT: addi r3, r1, 112
+; BE-NEXT: std r0, 224(r1)
; BE-NEXT: stxvw4x v2, 0, r3
; BE-NEXT: addi r3, r1, 128
; BE-NEXT: stxvw4x v3, 0, r3
@@ -3940,24 +3627,18 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -112(r1)
-; CHECK-NEXT: std r0, 128(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 112
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset v28, -64
-; CHECK-NEXT: .cfi_offset v29, -48
-; CHECK-NEXT: .cfi_offset v30, -32
-; CHECK-NEXT: .cfi_offset v31, -16
; CHECK-NEXT: li r3, 48
; CHECK-NEXT: xxsldwi vs0, v2, v2, 3
-; CHECK-NEXT: stvx v28, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: std r0, 128(r1)
+; CHECK-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: xscvspdpn f1, vs0
-; CHECK-NEXT: stvx v29, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 80
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 96
; CHECK-NEXT: vmr v30, v2
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v31, v3
; CHECK-NEXT: bl llrintf
; CHECK-NEXT: nop
@@ -4003,13 +3684,13 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
; CHECK-NEXT: vmr v2, v29
; CHECK-NEXT: vmr v4, v28
; CHECK-NEXT: xxmrghd v5, v31, vs0
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 80
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 64
-; CHECK-NEXT: lvx v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: lvx v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 112
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -4067,15 +3748,13 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
}
declare <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float>)
-define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
+define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) nounwind {
; BE-LABEL: llrint_v16i64_v16f32:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -304(r1)
-; BE-NEXT: std r0, 320(r1)
-; BE-NEXT: .cfi_def_cfa_offset 304
-; BE-NEXT: .cfi_offset lr, 16
; BE-NEXT: addi r3, r1, 112
+; BE-NEXT: std r0, 320(r1)
; BE-NEXT: stxvw4x v2, 0, r3
; BE-NEXT: addi r3, r1, 128
; BE-NEXT: stxvw4x v3, 0, r3
@@ -4172,38 +3851,28 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -176(r1)
-; CHECK-NEXT: std r0, 192(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 176
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset v24, -128
-; CHECK-NEXT: .cfi_offset v25, -112
-; CHECK-NEXT: .cfi_offset v26, -96
-; CHECK-NEXT: .cfi_offset v27, -80
-; CHECK-NEXT: .cfi_offset v28, -64
-; CHECK-NEXT: .cfi_offset v29, -48
-; CHECK-NEXT: .cfi_offset v30, -32
-; CHECK-NEXT: .cfi_offset v31, -16
; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 192(r1)
; CHECK-NEXT: xxsldwi vs0, v2, v2, 3
-; CHECK-NEXT: stvx v24, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v24, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: xscvspdpn f1, vs0
-; CHECK-NEXT: stvx v25, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 80
-; CHECK-NEXT: stvx v26, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 96
; CHECK-NEXT: vmr v26, v3
-; CHECK-NEXT: stvx v27, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 112
-; CHECK-NEXT: stvx v28, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 128
; CHECK-NEXT: vmr v28, v4
-; CHECK-NEXT: stvx v29, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 144
; CHECK-NEXT: vmr v29, v2
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 160
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v31, v5
; CHECK-NEXT: bl llrintf
; CHECK-NEXT: nop
@@ -4295,21 +3964,21 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
; CHECK-NEXT: vmr v6, v25
; CHECK-NEXT: vmr v8, v24
; CHECK-NEXT: xxmrghd v9, v31, vs0
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 144
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 128
-; CHECK-NEXT: lvx v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 112
-; CHECK-NEXT: lvx v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 96
-; CHECK-NEXT: lvx v27, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 80
-; CHECK-NEXT: lvx v26, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 64
-; CHECK-NEXT: lvx v25, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: lvx v24, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 176
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -4411,14 +4080,12 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
}
declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>)
-define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) {
+define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) nounwind {
; BE-LABEL: llrint_v1i64_v1f64:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -112(r1)
; BE-NEXT: std r0, 128(r1)
-; BE-NEXT: .cfi_def_cfa_offset 112
-; BE-NEXT: .cfi_offset lr, 16
; BE-NEXT: bl llrint
; BE-NEXT: nop
; BE-NEXT: addi r1, r1, 112
@@ -4431,8 +4098,6 @@ define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) {
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -32(r1)
; CHECK-NEXT: std r0, 48(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset lr, 16
; CHECK-NEXT: bl llrint
; CHECK-NEXT: nop
; CHECK-NEXT: addi r1, r1, 32
@@ -4450,16 +4115,13 @@ define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) {
}
declare <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double>)
-define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
+define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) nounwind {
; BE-LABEL: llrint_v2i64_v2f64:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -160(r1)
-; BE-NEXT: std r0, 176(r1)
-; BE-NEXT: .cfi_def_cfa_offset 160
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset v31, -16
; BE-NEXT: li r3, 144
+; BE-NEXT: std r0, 176(r1)
; BE-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; BE-NEXT: vmr v31, v2
; BE-NEXT: xxlor f1, v31, v31
@@ -4483,12 +4145,9 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -64(r1)
-; CHECK-NEXT: std r0, 80(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset v31, -16
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: std r0, 80(r1)
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v31, v2
; CHECK-NEXT: xxlor f1, v31, v31
; CHECK-NEXT: bl llrint
@@ -4500,7 +4159,7 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
; CHECK-NEXT: mtfprd f0, r3
; CHECK-NEXT: li r3, 48
; CHECK-NEXT: xxmrghd v2, v31, vs0
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 64
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -4523,17 +4182,13 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
}
declare <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double>)
-define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
+define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) nounwind {
; BE-LABEL: llrint_v4i64_v4f64:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -192(r1)
-; BE-NEXT: std r0, 208(r1)
-; BE-NEXT: .cfi_def_cfa_offset 192
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset v30, -32
-; BE-NEXT: .cfi_offset v31, -16
; BE-NEXT: li r3, 160
+; BE-NEXT: std r0, 208(r1)
; BE-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; BE-NEXT: vmr v30, v2
; BE-NEXT: li r3, 176
@@ -4572,17 +4227,13 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -80(r1)
-; CHECK-NEXT: std r0, 96(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 80
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset v30, -32
-; CHECK-NEXT: .cfi_offset v31, -16
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: std r0, 96(r1)
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v30, v2
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: xxlor f1, v30, v30
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v31, v3
; CHECK-NEXT: bl llrint
; CHECK-NEXT: nop
@@ -4603,9 +4254,9 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: vmr v2, v30
; CHECK-NEXT: xxmrghd v3, v31, vs0
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 80
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -4637,25 +4288,19 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
}
declare <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double>)
-define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
+define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) nounwind {
; BE-LABEL: llrint_v8i64_v8f64:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -256(r1)
-; BE-NEXT: std r0, 272(r1)
-; BE-NEXT: .cfi_def_cfa_offset 256
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset v28, -64
-; BE-NEXT: .cfi_offset v29, -48
-; BE-NEXT: .cfi_offset v30, -32
-; BE-NEXT: .cfi_offset v31, -16
; BE-NEXT: li r3, 192
+; BE-NEXT: std r0, 272(r1)
; BE-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
; BE-NEXT: li r3, 208
; BE-NEXT: vmr v28, v2
-; BE-NEXT: xxlor f1, v28, v28
; BE-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
; BE-NEXT: li r3, 224
+; BE-NEXT: xxlor f1, v28, v28
; BE-NEXT: vmr v29, v3
; BE-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; BE-NEXT: li r3, 240
@@ -4718,25 +4363,19 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -112(r1)
-; CHECK-NEXT: std r0, 128(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 112
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset v28, -64
-; CHECK-NEXT: .cfi_offset v29, -48
-; CHECK-NEXT: .cfi_offset v30, -32
-; CHECK-NEXT: .cfi_offset v31, -16
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: stvx v28, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: std r0, 128(r1)
+; CHECK-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: vmr v28, v2
-; CHECK-NEXT: stvx v29, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 80
; CHECK-NEXT: xxlor f1, v28, v28
; CHECK-NEXT: vmr v29, v3
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 96
; CHECK-NEXT: vmr v30, v4
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v31, v5
; CHECK-NEXT: bl llrint
; CHECK-NEXT: nop
@@ -4777,13 +4416,13 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
; CHECK-NEXT: vmr v3, v29
; CHECK-NEXT: vmr v2, v28
; CHECK-NEXT: xxmrghd v5, v31, vs0
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 80
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 64
-; CHECK-NEXT: lvx v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: lvx v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 112
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -4832,3 +4471,536 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
ret <8 x i64> %a
}
declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>)
+
+define <1 x i64> @llrint_v1i64_v1f128(<1 x fp128> %x) nounwind {
+; BE-LABEL: llrint_v1i64_v1f128:
+; BE: # %bb.0:
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -112(r1)
+; BE-NEXT: std r0, 128(r1)
+; BE-NEXT: bl llrintf128
+; BE-NEXT: nop
+; BE-NEXT: addi r1, r1, 112
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: llrint_v1i64_v1f128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mflr r0
+; CHECK-NEXT: stdu r1, -32(r1)
+; CHECK-NEXT: std r0, 48(r1)
+; CHECK-NEXT: bl llrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: addi r1, r1, 32
+; CHECK-NEXT: ld r0, 16(r1)
+; CHECK-NEXT: mtlr r0
+; CHECK-NEXT: blr
+;
+; FAST-LABEL: llrint_v1i64_v1f128:
+; FAST: # %bb.0:
+; FAST-NEXT: mflr r0
+; FAST-NEXT: stdu r1, -32(r1)
+; FAST-NEXT: std r0, 48(r1)
+; FAST-NEXT: bl llrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: addi r1, r1, 32
+; FAST-NEXT: ld r0, 16(r1)
+; FAST-NEXT: mtlr r0
+; FAST-NEXT: blr
+ %a = call <1 x i64> @llvm.llrint.v1i64.v1f128(<1 x fp128> %x)
+ ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.llrint.v1i64.v1f128(<1 x fp128>)
+
+define <2 x i64> @llrint_v2i64_v2f128(<2 x fp128> %x) nounwind {
+; BE-LABEL: llrint_v2i64_v2f128:
+; BE: # %bb.0:
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -160(r1)
+; BE-NEXT: li r3, 144
+; BE-NEXT: std r0, 176(r1)
+; BE-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: vmr v31, v2
+; BE-NEXT: vmr v2, v3
+; BE-NEXT: bl llrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v31
+; BE-NEXT: std r3, 136(r1)
+; BE-NEXT: bl llrintf128
+; BE-NEXT: nop
+; BE-NEXT: std r3, 128(r1)
+; BE-NEXT: addi r3, r1, 128
+; BE-NEXT: lxvd2x v2, 0, r3
+; BE-NEXT: li r3, 144
+; BE-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: addi r1, r1, 160
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: llrint_v2i64_v2f128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mflr r0
+; CHECK-NEXT: stdu r1, -80(r1)
+; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 96(r1)
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: vmr v31, v3
+; CHECK-NEXT: bl llrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v31
+; CHECK-NEXT: mtvsrd v30, r3
+; CHECK-NEXT: bl llrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: xxmrghd v2, vs0, v30
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: addi r1, r1, 80
+; CHECK-NEXT: ld r0, 16(r1)
+; CHECK-NEXT: mtlr r0
+; CHECK-NEXT: blr
+;
+; FAST-LABEL: llrint_v2i64_v2f128:
+; FAST: # %bb.0:
+; FAST-NEXT: mflr r0
+; FAST-NEXT: stdu r1, -80(r1)
+; FAST-NEXT: li r3, 48
+; FAST-NEXT: std r0, 96(r1)
+; FAST-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 64
+; FAST-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: vmr v31, v3
+; FAST-NEXT: bl llrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v31
+; FAST-NEXT: mtvsrd v30, r3
+; FAST-NEXT: bl llrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: li r3, 64
+; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 48
+; FAST-NEXT: xxmrghd v2, vs0, v30
+; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: addi r1, r1, 80
+; FAST-NEXT: ld r0, 16(r1)
+; FAST-NEXT: mtlr r0
+; FAST-NEXT: blr
+ %a = call <2 x i64> @llvm.llrint.v2i64.v2f128(<2 x fp128> %x)
+ ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.llrint.v2i64.v2f128(<2 x fp128>)
+
+define <4 x i64> @llrint_v4i64_v4f128(<4 x fp128> %x) nounwind {
+; BE-LABEL: llrint_v4i64_v4f128:
+; BE: # %bb.0:
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -208(r1)
+; BE-NEXT: li r3, 160
+; BE-NEXT: std r0, 224(r1)
+; BE-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 176
+; BE-NEXT: vmr v29, v2
+; BE-NEXT: vmr v2, v3
+; BE-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 192
+; BE-NEXT: vmr v30, v4
+; BE-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: vmr v31, v5
+; BE-NEXT: bl llrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v29
+; BE-NEXT: std r3, 136(r1)
+; BE-NEXT: bl llrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v31
+; BE-NEXT: std r3, 128(r1)
+; BE-NEXT: bl llrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v30
+; BE-NEXT: std r3, 152(r1)
+; BE-NEXT: bl llrintf128
+; BE-NEXT: nop
+; BE-NEXT: std r3, 144(r1)
+; BE-NEXT: addi r3, r1, 128
+; BE-NEXT: lxvd2x v2, 0, r3
+; BE-NEXT: addi r3, r1, 144
+; BE-NEXT: lxvd2x v3, 0, r3
+; BE-NEXT: li r3, 192
+; BE-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 176
+; BE-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 160
+; BE-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: addi r1, r1, 208
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: llrint_v4i64_v4f128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mflr r0
+; CHECK-NEXT: stdu r1, -112(r1)
+; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 128(r1)
+; CHECK-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 80
+; CHECK-NEXT: vmr v29, v3
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 96
+; CHECK-NEXT: vmr v30, v4
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: vmr v31, v5
+; CHECK-NEXT: bl llrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v29
+; CHECK-NEXT: mtvsrd v28, r3
+; CHECK-NEXT: bl llrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: vmr v2, v30
+; CHECK-NEXT: xxmrghd v29, vs0, v28
+; CHECK-NEXT: bl llrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v31
+; CHECK-NEXT: mtvsrd v30, r3
+; CHECK-NEXT: bl llrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: li r3, 96
+; CHECK-NEXT: vmr v2, v29
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 80
+; CHECK-NEXT: xxmrghd v3, vs0, v30
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: addi r1, r1, 112
+; CHECK-NEXT: ld r0, 16(r1)
+; CHECK-NEXT: mtlr r0
+; CHECK-NEXT: blr
+;
+; FAST-LABEL: llrint_v4i64_v4f128:
+; FAST: # %bb.0:
+; FAST-NEXT: mflr r0
+; FAST-NEXT: stdu r1, -112(r1)
+; FAST-NEXT: li r3, 48
+; FAST-NEXT: std r0, 128(r1)
+; FAST-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 64
+; FAST-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 80
+; FAST-NEXT: vmr v29, v3
+; FAST-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 96
+; FAST-NEXT: vmr v30, v4
+; FAST-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: vmr v31, v5
+; FAST-NEXT: bl llrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v29
+; FAST-NEXT: mtvsrd v28, r3
+; FAST-NEXT: bl llrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: vmr v2, v30
+; FAST-NEXT: xxmrghd v29, vs0, v28
+; FAST-NEXT: bl llrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v31
+; FAST-NEXT: mtvsrd v30, r3
+; FAST-NEXT: bl llrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: li r3, 96
+; FAST-NEXT: vmr v2, v29
+; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 80
+; FAST-NEXT: xxmrghd v3, vs0, v30
+; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 64
+; FAST-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 48
+; FAST-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: addi r1, r1, 112
+; FAST-NEXT: ld r0, 16(r1)
+; FAST-NEXT: mtlr r0
+; FAST-NEXT: blr
+ %a = call <4 x i64> @llvm.llrint.v4i64.v4f128(<4 x fp128> %x)
+ ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.llrint.v4i64.v4f128(<4 x fp128>)
+
+define <8 x i64> @llrint_v8i64_v8f128(<8 x fp128> %x) nounwind {
+; BE-LABEL: llrint_v8i64_v8f128:
+; BE: # %bb.0:
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -304(r1)
+; BE-NEXT: li r3, 192
+; BE-NEXT: std r0, 320(r1)
+; BE-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 208
+; BE-NEXT: vmr v25, v2
+; BE-NEXT: vmr v2, v3
+; BE-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 224
+; BE-NEXT: vmr v26, v4
+; BE-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 240
+; BE-NEXT: vmr v27, v5
+; BE-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 256
+; BE-NEXT: vmr v28, v6
+; BE-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 272
+; BE-NEXT: vmr v29, v7
+; BE-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 288
+; BE-NEXT: vmr v30, v8
+; BE-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: vmr v31, v9
+; BE-NEXT: bl llrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v25
+; BE-NEXT: std r3, 136(r1)
+; BE-NEXT: bl llrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v27
+; BE-NEXT: std r3, 128(r1)
+; BE-NEXT: bl llrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v26
+; BE-NEXT: std r3, 152(r1)
+; BE-NEXT: bl llrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v29
+; BE-NEXT: std r3, 144(r1)
+; BE-NEXT: bl llrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v28
+; BE-NEXT: std r3, 168(r1)
+; BE-NEXT: bl llrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v31
+; BE-NEXT: std r3, 160(r1)
+; BE-NEXT: bl llrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v30
+; BE-NEXT: std r3, 184(r1)
+; BE-NEXT: bl llrintf128
+; BE-NEXT: nop
+; BE-NEXT: std r3, 176(r1)
+; BE-NEXT: addi r3, r1, 128
+; BE-NEXT: lxvd2x v2, 0, r3
+; BE-NEXT: addi r3, r1, 144
+; BE-NEXT: lxvd2x v3, 0, r3
+; BE-NEXT: addi r3, r1, 160
+; BE-NEXT: lxvd2x v4, 0, r3
+; BE-NEXT: addi r3, r1, 176
+; BE-NEXT: lxvd2x v5, 0, r3
+; BE-NEXT: li r3, 288
+; BE-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 272
+; BE-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 256
+; BE-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 240
+; BE-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 224
+; BE-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 208
+; BE-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 192
+; BE-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: addi r1, r1, 304
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: llrint_v8i64_v8f128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mflr r0
+; CHECK-NEXT: stdu r1, -176(r1)
+; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 192(r1)
+; CHECK-NEXT: stxvd2x v24, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 80
+; CHECK-NEXT: vmr v25, v3
+; CHECK-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 96
+; CHECK-NEXT: vmr v26, v4
+; CHECK-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 112
+; CHECK-NEXT: vmr v27, v5
+; CHECK-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 128
+; CHECK-NEXT: vmr v28, v6
+; CHECK-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 144
+; CHECK-NEXT: vmr v29, v7
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 160
+; CHECK-NEXT: vmr v30, v8
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: vmr v31, v9
+; CHECK-NEXT: bl llrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v25
+; CHECK-NEXT: mtvsrd v24, r3
+; CHECK-NEXT: bl llrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: vmr v2, v26
+; CHECK-NEXT: xxmrghd v25, vs0, v24
+; CHECK-NEXT: bl llrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v27
+; CHECK-NEXT: mtvsrd v26, r3
+; CHECK-NEXT: bl llrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: vmr v2, v28
+; CHECK-NEXT: xxmrghd v27, vs0, v26
+; CHECK-NEXT: bl llrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v29
+; CHECK-NEXT: mtvsrd v28, r3
+; CHECK-NEXT: bl llrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: vmr v2, v30
+; CHECK-NEXT: xxmrghd v29, vs0, v28
+; CHECK-NEXT: bl llrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v31
+; CHECK-NEXT: mtvsrd v30, r3
+; CHECK-NEXT: bl llrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: li r3, 160
+; CHECK-NEXT: vmr v4, v29
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 144
+; CHECK-NEXT: vmr v3, v27
+; CHECK-NEXT: vmr v2, v25
+; CHECK-NEXT: xxmrghd v5, vs0, v30
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 128
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 112
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 96
+; CHECK-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 80
+; CHECK-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: addi r1, r1, 176
+; CHECK-NEXT: ld r0, 16(r1)
+; CHECK-NEXT: mtlr r0
+; CHECK-NEXT: blr
+;
+; FAST-LABEL: llrint_v8i64_v8f128:
+; FAST: # %bb.0:
+; FAST-NEXT: mflr r0
+; FAST-NEXT: stdu r1, -176(r1)
+; FAST-NEXT: li r3, 48
+; FAST-NEXT: std r0, 192(r1)
+; FAST-NEXT: stxvd2x v24, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 64
+; FAST-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 80
+; FAST-NEXT: vmr v25, v3
+; FAST-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 96
+; FAST-NEXT: vmr v26, v4
+; FAST-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 112
+; FAST-NEXT: vmr v27, v5
+; FAST-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 128
+; FAST-NEXT: vmr v28, v6
+; FAST-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 144
+; FAST-NEXT: vmr v29, v7
+; FAST-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 160
+; FAST-NEXT: vmr v30, v8
+; FAST-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: vmr v31, v9
+; FAST-NEXT: bl llrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v25
+; FAST-NEXT: mtvsrd v24, r3
+; FAST-NEXT: bl llrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: vmr v2, v26
+; FAST-NEXT: xxmrghd v25, vs0, v24
+; FAST-NEXT: bl llrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v27
+; FAST-NEXT: mtvsrd v26, r3
+; FAST-NEXT: bl llrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: vmr v2, v28
+; FAST-NEXT: xxmrghd v27, vs0, v26
+; FAST-NEXT: bl llrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v29
+; FAST-NEXT: mtvsrd v28, r3
+; FAST-NEXT: bl llrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: vmr v2, v30
+; FAST-NEXT: xxmrghd v29, vs0, v28
+; FAST-NEXT: bl llrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v31
+; FAST-NEXT: mtvsrd v30, r3
+; FAST-NEXT: bl llrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: li r3, 160
+; FAST-NEXT: vmr v4, v29
+; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 144
+; FAST-NEXT: vmr v3, v27
+; FAST-NEXT: vmr v2, v25
+; FAST-NEXT: xxmrghd v5, vs0, v30
+; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 128
+; FAST-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 112
+; FAST-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 96
+; FAST-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 80
+; FAST-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 64
+; FAST-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 48
+; FAST-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: addi r1, r1, 176
+; FAST-NEXT: ld r0, 16(r1)
+; FAST-NEXT: mtlr r0
+; FAST-NEXT: blr
+ %a = call <8 x i64> @llvm.llrint.v8i64.v8f128(<8 x fp128> %x)
+ ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.llrint.v8i64.v8f128(<8 x fp128>)
diff --git a/llvm/test/CodeGen/PowerPC/vector-lrint.ll b/llvm/test/CodeGen/PowerPC/vector-lrint.ll
index c2576d4..f437536 100644
--- a/llvm/test/CodeGen/PowerPC/vector-lrint.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-lrint.ll
@@ -1,4 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; FIXME: crash "Input type needs to be promoted!"
+; SKIP: sed 's/iXLen/i32/g' %s | llc -ppc-asm-full-reg-names \
+; SKIP: -ppc-vsr-nums-as-vr -mtriple=powerpc-unknown-unknown \
+; SKIP: -verify-machineinstrs | FileCheck %s --check-prefixes=PPC32
; RUN: sed 's/iXLen/i32/g' %s | llc -mcpu=pwr7 -ppc-asm-full-reg-names \
; RUN: -ppc-vsr-nums-as-vr -mtriple=powerpc64-unknown-unknown \
; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=BE
@@ -9,6 +13,10 @@
; RUN: -ppc-vsr-nums-as-vr -mtriple=powerpc64le-unknown-unknown \
; RUN: -verify-machineinstrs --enable-unsafe-fp-math | \
; RUN: FileCheck %s --check-prefixes=FAST
+; FIXME: crash "Input type needs to be promoted!"
+; SKIP: sed 's/iXLen/i64/g' %s | llc -ppc-asm-full-reg-names \
+; SKIP: -ppc-vsr-nums-as-vr -mtriple=powerpc-unknown-unknown \
+; SKIP: -verify-machineinstrs | FileCheck %s --check-prefixes=PPC32
; RUN: sed 's/iXLen/i64/g' %s | llc -mcpu=pwr7 -ppc-asm-full-reg-names \
; RUN: -ppc-vsr-nums-as-vr -mtriple=powerpc64-unknown-unknown \
; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=BE
@@ -20,14 +28,12 @@
; RUN: -verify-machineinstrs --enable-unsafe-fp-math | \
; RUN: FileCheck %s --check-prefixes=FAST
-define <1 x i64> @lrint_v1f16(<1 x half> %x) {
+define <1 x i64> @lrint_v1f16(<1 x half> %x) nounwind {
; BE-LABEL: lrint_v1f16:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -112(r1)
; BE-NEXT: std r0, 128(r1)
-; BE-NEXT: .cfi_def_cfa_offset 112
-; BE-NEXT: .cfi_offset lr, 16
; BE-NEXT: bl __truncsfhf2
; BE-NEXT: nop
; BE-NEXT: clrldi r3, r3, 48
@@ -45,8 +51,6 @@ define <1 x i64> @lrint_v1f16(<1 x half> %x) {
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -32(r1)
; CHECK-NEXT: std r0, 48(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset lr, 16
; CHECK-NEXT: bl __truncsfhf2
; CHECK-NEXT: nop
; CHECK-NEXT: clrldi r3, r3, 48
@@ -64,8 +68,6 @@ define <1 x i64> @lrint_v1f16(<1 x half> %x) {
; FAST-NEXT: mflr r0
; FAST-NEXT: stdu r1, -32(r1)
; FAST-NEXT: std r0, 48(r1)
-; FAST-NEXT: .cfi_def_cfa_offset 32
-; FAST-NEXT: .cfi_offset lr, 16
; FAST-NEXT: bl __truncsfhf2
; FAST-NEXT: nop
; FAST-NEXT: clrldi r3, r3, 48
@@ -82,16 +84,12 @@ define <1 x i64> @lrint_v1f16(<1 x half> %x) {
}
declare <1 x i64> @llvm.lrint.v1i64.v1f16(<1 x half>)
-define <2 x i64> @lrint_v2f16(<2 x half> %x) {
+define <2 x i64> @lrint_v2f16(<2 x half> %x) nounwind {
; BE-LABEL: lrint_v2f16:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -160(r1)
; BE-NEXT: std r0, 176(r1)
-; BE-NEXT: .cfi_def_cfa_offset 160
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset r30, -24
-; BE-NEXT: .cfi_offset f31, -8
; BE-NEXT: stfd f31, 152(r1) # 8-byte Folded Spill
; BE-NEXT: fmr f31, f1
; BE-NEXT: fmr f1, f2
@@ -129,17 +127,12 @@ define <2 x i64> @lrint_v2f16(<2 x half> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -96(r1)
-; CHECK-NEXT: std r0, 112(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 96
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset r30, -24
-; CHECK-NEXT: .cfi_offset f31, -8
-; CHECK-NEXT: .cfi_offset v31, -48
; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 112(r1)
; CHECK-NEXT: std r30, 72(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f31, 88(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f31, f2
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: bl __truncsfhf2
; CHECK-NEXT: nop
; CHECK-NEXT: fmr f1, f31
@@ -164,7 +157,7 @@ define <2 x i64> @lrint_v2f16(<2 x half> %x) {
; CHECK-NEXT: lfd f31, 88(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r30, 72(r1) # 8-byte Folded Reload
; CHECK-NEXT: xxmrghd v2, vs0, v31
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 96
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -173,10 +166,6 @@ define <2 x i64> @lrint_v2f16(<2 x half> %x) {
; FAST-LABEL: lrint_v2f16:
; FAST: # %bb.0:
; FAST-NEXT: mflr r0
-; FAST-NEXT: .cfi_def_cfa_offset 48
-; FAST-NEXT: .cfi_offset lr, 16
-; FAST-NEXT: .cfi_offset f30, -16
-; FAST-NEXT: .cfi_offset f31, -8
; FAST-NEXT: stfd f30, -16(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f31, -8(r1) # 8-byte Folded Spill
; FAST-NEXT: stdu r1, -48(r1)
@@ -213,20 +202,12 @@ define <2 x i64> @lrint_v2f16(<2 x half> %x) {
}
declare <2 x i64> @llvm.lrint.v2i64.v2f16(<2 x half>)
-define <4 x i64> @lrint_v4f16(<4 x half> %x) {
+define <4 x i64> @lrint_v4f16(<4 x half> %x) nounwind {
; BE-LABEL: lrint_v4f16:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -208(r1)
; BE-NEXT: std r0, 224(r1)
-; BE-NEXT: .cfi_def_cfa_offset 208
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset r28, -56
-; BE-NEXT: .cfi_offset r29, -48
-; BE-NEXT: .cfi_offset r30, -40
-; BE-NEXT: .cfi_offset f29, -24
-; BE-NEXT: .cfi_offset f30, -16
-; BE-NEXT: .cfi_offset f31, -8
; BE-NEXT: stfd f29, 184(r1) # 8-byte Folded Spill
; BE-NEXT: fmr f29, f1
; BE-NEXT: fmr f1, f2
@@ -300,18 +281,8 @@ define <4 x i64> @lrint_v4f16(<4 x half> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -144(r1)
-; CHECK-NEXT: std r0, 160(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 144
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset r28, -56
-; CHECK-NEXT: .cfi_offset r29, -48
-; CHECK-NEXT: .cfi_offset r30, -40
-; CHECK-NEXT: .cfi_offset f29, -24
-; CHECK-NEXT: .cfi_offset f30, -16
-; CHECK-NEXT: .cfi_offset f31, -8
-; CHECK-NEXT: .cfi_offset v30, -96
-; CHECK-NEXT: .cfi_offset v31, -80
; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 160(r1)
; CHECK-NEXT: std r28, 88(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r29, 96(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r30, 104(r1) # 8-byte Folded Spill
@@ -319,11 +290,11 @@ define <4 x i64> @lrint_v4f16(<4 x half> %x) {
; CHECK-NEXT: fmr f29, f2
; CHECK-NEXT: stfd f30, 128(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f30, f3
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: stfd f31, 136(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f31, f4
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: bl __truncsfhf2
; CHECK-NEXT: nop
; CHECK-NEXT: fmr f1, f29
@@ -376,11 +347,11 @@ define <4 x i64> @lrint_v4f16(<4 x half> %x) {
; CHECK-NEXT: lfd f29, 120(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r30, 104(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r29, 96(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
; CHECK-NEXT: ld r28, 88(r1) # 8-byte Folded Reload
; CHECK-NEXT: xxmrghd v3, vs0, v30
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 144
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -389,12 +360,6 @@ define <4 x i64> @lrint_v4f16(<4 x half> %x) {
; FAST-LABEL: lrint_v4f16:
; FAST: # %bb.0:
; FAST-NEXT: mflr r0
-; FAST-NEXT: .cfi_def_cfa_offset 64
-; FAST-NEXT: .cfi_offset lr, 16
-; FAST-NEXT: .cfi_offset f28, -32
-; FAST-NEXT: .cfi_offset f29, -24
-; FAST-NEXT: .cfi_offset f30, -16
-; FAST-NEXT: .cfi_offset f31, -8
; FAST-NEXT: stfd f28, -32(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f29, -24(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f30, -16(r1) # 8-byte Folded Spill
@@ -458,28 +423,12 @@ define <4 x i64> @lrint_v4f16(<4 x half> %x) {
}
declare <4 x i64> @llvm.lrint.v4i64.v4f16(<4 x half>)
-define <8 x i64> @lrint_v8f16(<8 x half> %x) {
+define <8 x i64> @lrint_v8f16(<8 x half> %x) nounwind {
; BE-LABEL: lrint_v8f16:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -304(r1)
; BE-NEXT: std r0, 320(r1)
-; BE-NEXT: .cfi_def_cfa_offset 304
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset r24, -120
-; BE-NEXT: .cfi_offset r25, -112
-; BE-NEXT: .cfi_offset r26, -104
-; BE-NEXT: .cfi_offset r27, -96
-; BE-NEXT: .cfi_offset r28, -88
-; BE-NEXT: .cfi_offset r29, -80
-; BE-NEXT: .cfi_offset r30, -72
-; BE-NEXT: .cfi_offset f25, -56
-; BE-NEXT: .cfi_offset f26, -48
-; BE-NEXT: .cfi_offset f27, -40
-; BE-NEXT: .cfi_offset f28, -32
-; BE-NEXT: .cfi_offset f29, -24
-; BE-NEXT: .cfi_offset f30, -16
-; BE-NEXT: .cfi_offset f31, -8
; BE-NEXT: stfd f25, 248(r1) # 8-byte Folded Spill
; BE-NEXT: fmr f25, f1
; BE-NEXT: fmr f1, f2
@@ -625,44 +574,24 @@ define <8 x i64> @lrint_v8f16(<8 x half> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -240(r1)
-; CHECK-NEXT: std r0, 256(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 240
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset r24, -120
-; CHECK-NEXT: .cfi_offset r25, -112
-; CHECK-NEXT: .cfi_offset r26, -104
-; CHECK-NEXT: .cfi_offset r27, -96
-; CHECK-NEXT: .cfi_offset r28, -88
-; CHECK-NEXT: .cfi_offset r29, -80
-; CHECK-NEXT: .cfi_offset r30, -72
-; CHECK-NEXT: .cfi_offset f25, -56
-; CHECK-NEXT: .cfi_offset f26, -48
-; CHECK-NEXT: .cfi_offset f27, -40
-; CHECK-NEXT: .cfi_offset f28, -32
-; CHECK-NEXT: .cfi_offset f29, -24
-; CHECK-NEXT: .cfi_offset f30, -16
-; CHECK-NEXT: .cfi_offset f31, -8
-; CHECK-NEXT: .cfi_offset v28, -192
-; CHECK-NEXT: .cfi_offset v29, -176
-; CHECK-NEXT: .cfi_offset v30, -160
-; CHECK-NEXT: .cfi_offset v31, -144
; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 256(r1)
; CHECK-NEXT: std r24, 120(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r25, 128(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r26, 136(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r27, 144(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r28, 152(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r29, 160(r1) # 8-byte Folded Spill
-; CHECK-NEXT: std r30, 168(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stvx v28, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: std r30, 168(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f25, 184(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f25, f2
; CHECK-NEXT: stfd f26, 192(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f26, f3
; CHECK-NEXT: stfd f27, 200(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f27, f4
-; CHECK-NEXT: stvx v29, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 80
; CHECK-NEXT: stfd f28, 208(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f28, f5
@@ -670,11 +599,11 @@ define <8 x i64> @lrint_v8f16(<8 x half> %x) {
; CHECK-NEXT: fmr f29, f6
; CHECK-NEXT: stfd f30, 224(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f30, f7
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 96
; CHECK-NEXT: stfd f31, 232(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f31, f8
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: bl __truncsfhf2
; CHECK-NEXT: nop
; CHECK-NEXT: fmr f1, f25
@@ -777,7 +706,7 @@ define <8 x i64> @lrint_v8f16(<8 x half> %x) {
; CHECK-NEXT: vmr v4, v29
; CHECK-NEXT: lfd f30, 224(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f29, 216(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 80
; CHECK-NEXT: lfd f28, 208(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f27, 200(r1) # 8-byte Folded Reload
@@ -785,7 +714,7 @@ define <8 x i64> @lrint_v8f16(<8 x half> %x) {
; CHECK-NEXT: lfd f25, 184(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r30, 168(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r29, 160(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: ld r28, 152(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r27, 144(r1) # 8-byte Folded Reload
@@ -793,9 +722,9 @@ define <8 x i64> @lrint_v8f16(<8 x half> %x) {
; CHECK-NEXT: ld r26, 136(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r25, 128(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r24, 120(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: lvx v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 240
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -804,16 +733,6 @@ define <8 x i64> @lrint_v8f16(<8 x half> %x) {
; FAST-LABEL: lrint_v8f16:
; FAST: # %bb.0:
; FAST-NEXT: mflr r0
-; FAST-NEXT: .cfi_def_cfa_offset 96
-; FAST-NEXT: .cfi_offset lr, 16
-; FAST-NEXT: .cfi_offset f24, -64
-; FAST-NEXT: .cfi_offset f25, -56
-; FAST-NEXT: .cfi_offset f26, -48
-; FAST-NEXT: .cfi_offset f27, -40
-; FAST-NEXT: .cfi_offset f28, -32
-; FAST-NEXT: .cfi_offset f29, -24
-; FAST-NEXT: .cfi_offset f30, -16
-; FAST-NEXT: .cfi_offset f31, -8
; FAST-NEXT: stfd f24, -64(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f25, -56(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f26, -48(r1) # 8-byte Folded Spill
@@ -931,44 +850,12 @@ define <8 x i64> @lrint_v8f16(<8 x half> %x) {
}
declare <8 x i64> @llvm.lrint.v8i64.v8f16(<8 x half>)
-define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) {
+define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) nounwind {
; BE-LABEL: lrint_v16i64_v16f16:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -496(r1)
; BE-NEXT: std r0, 512(r1)
-; BE-NEXT: .cfi_def_cfa_offset 496
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset r16, -248
-; BE-NEXT: .cfi_offset r17, -240
-; BE-NEXT: .cfi_offset r18, -232
-; BE-NEXT: .cfi_offset r19, -224
-; BE-NEXT: .cfi_offset r20, -216
-; BE-NEXT: .cfi_offset r21, -208
-; BE-NEXT: .cfi_offset r22, -200
-; BE-NEXT: .cfi_offset r23, -192
-; BE-NEXT: .cfi_offset r24, -184
-; BE-NEXT: .cfi_offset r25, -176
-; BE-NEXT: .cfi_offset r26, -168
-; BE-NEXT: .cfi_offset r27, -160
-; BE-NEXT: .cfi_offset r28, -152
-; BE-NEXT: .cfi_offset r29, -144
-; BE-NEXT: .cfi_offset r30, -136
-; BE-NEXT: .cfi_offset f17, -120
-; BE-NEXT: .cfi_offset f18, -112
-; BE-NEXT: .cfi_offset f19, -104
-; BE-NEXT: .cfi_offset f20, -96
-; BE-NEXT: .cfi_offset f21, -88
-; BE-NEXT: .cfi_offset f22, -80
-; BE-NEXT: .cfi_offset f23, -72
-; BE-NEXT: .cfi_offset f24, -64
-; BE-NEXT: .cfi_offset f25, -56
-; BE-NEXT: .cfi_offset f26, -48
-; BE-NEXT: .cfi_offset f27, -40
-; BE-NEXT: .cfi_offset f28, -32
-; BE-NEXT: .cfi_offset f29, -24
-; BE-NEXT: .cfi_offset f30, -16
-; BE-NEXT: .cfi_offset f31, -8
; BE-NEXT: stfd f20, 400(r1) # 8-byte Folded Spill
; BE-NEXT: fmr f20, f1
; BE-NEXT: fmr f1, f2
@@ -1255,105 +1142,65 @@ define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -432(r1)
-; CHECK-NEXT: std r0, 448(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 432
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset r16, -248
-; CHECK-NEXT: .cfi_offset r17, -240
-; CHECK-NEXT: .cfi_offset r18, -232
-; CHECK-NEXT: .cfi_offset r19, -224
-; CHECK-NEXT: .cfi_offset r20, -216
-; CHECK-NEXT: .cfi_offset r21, -208
-; CHECK-NEXT: .cfi_offset r22, -200
-; CHECK-NEXT: .cfi_offset r23, -192
-; CHECK-NEXT: .cfi_offset r24, -184
-; CHECK-NEXT: .cfi_offset r25, -176
-; CHECK-NEXT: .cfi_offset r26, -168
-; CHECK-NEXT: .cfi_offset r27, -160
-; CHECK-NEXT: .cfi_offset r28, -152
-; CHECK-NEXT: .cfi_offset r29, -144
-; CHECK-NEXT: .cfi_offset r30, -136
-; CHECK-NEXT: .cfi_offset f17, -120
-; CHECK-NEXT: .cfi_offset f18, -112
-; CHECK-NEXT: .cfi_offset f19, -104
-; CHECK-NEXT: .cfi_offset f20, -96
-; CHECK-NEXT: .cfi_offset f21, -88
-; CHECK-NEXT: .cfi_offset f22, -80
-; CHECK-NEXT: .cfi_offset f23, -72
-; CHECK-NEXT: .cfi_offset f24, -64
-; CHECK-NEXT: .cfi_offset f25, -56
-; CHECK-NEXT: .cfi_offset f26, -48
-; CHECK-NEXT: .cfi_offset f27, -40
-; CHECK-NEXT: .cfi_offset f28, -32
-; CHECK-NEXT: .cfi_offset f29, -24
-; CHECK-NEXT: .cfi_offset f30, -16
-; CHECK-NEXT: .cfi_offset f31, -8
-; CHECK-NEXT: .cfi_offset v24, -384
-; CHECK-NEXT: .cfi_offset v25, -368
-; CHECK-NEXT: .cfi_offset v26, -352
-; CHECK-NEXT: .cfi_offset v27, -336
-; CHECK-NEXT: .cfi_offset v28, -320
-; CHECK-NEXT: .cfi_offset v29, -304
-; CHECK-NEXT: .cfi_offset v30, -288
-; CHECK-NEXT: .cfi_offset v31, -272
; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 448(r1)
; CHECK-NEXT: std r16, 184(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r17, 192(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r18, 200(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r19, 208(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r20, 216(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r21, 224(r1) # 8-byte Folded Spill
-; CHECK-NEXT: std r22, 232(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stvx v24, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v24, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: std r22, 232(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r23, 240(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r24, 248(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r25, 256(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r26, 264(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r27, 272(r1) # 8-byte Folded Spill
-; CHECK-NEXT: std r28, 280(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stvx v25, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 80
+; CHECK-NEXT: std r28, 280(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r29, 288(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r30, 296(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f17, 312(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f18, 320(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f19, 328(r1) # 8-byte Folded Spill
+; CHECK-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 96
; CHECK-NEXT: stfd f20, 336(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f20, f2
-; CHECK-NEXT: stvx v26, r1, r3 # 16-byte Folded Spill
-; CHECK-NEXT: li r3, 96
; CHECK-NEXT: stfd f21, 344(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f21, f3
; CHECK-NEXT: stfd f22, 352(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f22, f4
+; CHECK-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 112
; CHECK-NEXT: stfd f23, 360(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f23, f5
-; CHECK-NEXT: stvx v27, r1, r3 # 16-byte Folded Spill
-; CHECK-NEXT: li r3, 112
; CHECK-NEXT: stfd f24, 368(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f24, f6
; CHECK-NEXT: stfd f25, 376(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f25, f7
+; CHECK-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 128
; CHECK-NEXT: stfd f26, 384(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f26, f8
-; CHECK-NEXT: stvx v28, r1, r3 # 16-byte Folded Spill
-; CHECK-NEXT: li r3, 128
; CHECK-NEXT: stfd f27, 392(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f27, f9
; CHECK-NEXT: stfd f28, 400(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f28, f10
+; CHECK-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 144
; CHECK-NEXT: stfd f29, 408(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f29, f11
-; CHECK-NEXT: stvx v29, r1, r3 # 16-byte Folded Spill
-; CHECK-NEXT: li r3, 144
; CHECK-NEXT: stfd f30, 416(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f30, f12
; CHECK-NEXT: stfd f31, 424(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f31, f13
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 160
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: bl __truncsfhf2
; CHECK-NEXT: nop
; CHECK-NEXT: fmr f1, f20
@@ -1556,7 +1403,7 @@ define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) {
; CHECK-NEXT: vmr v4, v29
; CHECK-NEXT: lfd f30, 416(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f29, 408(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 144
; CHECK-NEXT: vmr v5, v28
; CHECK-NEXT: vmr v6, v27
@@ -1564,7 +1411,7 @@ define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) {
; CHECK-NEXT: vmr v8, v25
; CHECK-NEXT: lfd f28, 400(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f27, 392(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 128
; CHECK-NEXT: lfd f26, 384(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f25, 376(r1) # 8-byte Folded Reload
@@ -1572,7 +1419,7 @@ define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) {
; CHECK-NEXT: lfd f24, 368(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f23, 360(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f22, 352(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 112
; CHECK-NEXT: lfd f21, 344(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r30, 296(r1) # 8-byte Folded Reload
@@ -1580,7 +1427,7 @@ define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) {
; CHECK-NEXT: lfd f19, 328(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r29, 288(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r28, 280(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 96
; CHECK-NEXT: lfd f18, 320(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r27, 272(r1) # 8-byte Folded Reload
@@ -1588,7 +1435,7 @@ define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) {
; CHECK-NEXT: ld r26, 264(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r25, 256(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r24, 248(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v27, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 80
; CHECK-NEXT: ld r23, 240(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r22, 232(r1) # 8-byte Folded Reload
@@ -1596,13 +1443,13 @@ define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) {
; CHECK-NEXT: ld r20, 216(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r19, 208(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r18, 200(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v26, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: ld r17, 192(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r16, 184(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v25, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: lvx v24, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 432
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -1611,24 +1458,6 @@ define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) {
; FAST-LABEL: lrint_v16i64_v16f16:
; FAST: # %bb.0:
; FAST-NEXT: mflr r0
-; FAST-NEXT: .cfi_def_cfa_offset 160
-; FAST-NEXT: .cfi_offset lr, 16
-; FAST-NEXT: .cfi_offset f16, -128
-; FAST-NEXT: .cfi_offset f17, -120
-; FAST-NEXT: .cfi_offset f18, -112
-; FAST-NEXT: .cfi_offset f19, -104
-; FAST-NEXT: .cfi_offset f20, -96
-; FAST-NEXT: .cfi_offset f21, -88
-; FAST-NEXT: .cfi_offset f22, -80
-; FAST-NEXT: .cfi_offset f23, -72
-; FAST-NEXT: .cfi_offset f24, -64
-; FAST-NEXT: .cfi_offset f25, -56
-; FAST-NEXT: .cfi_offset f26, -48
-; FAST-NEXT: .cfi_offset f27, -40
-; FAST-NEXT: .cfi_offset f28, -32
-; FAST-NEXT: .cfi_offset f29, -24
-; FAST-NEXT: .cfi_offset f30, -16
-; FAST-NEXT: .cfi_offset f31, -8
; FAST-NEXT: stfd f16, -128(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f17, -120(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f18, -112(r1) # 8-byte Folded Spill
@@ -1852,50 +1681,12 @@ define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) {
}
declare <16 x i64> @llvm.lrint.v16i64.v16f16(<16 x half>)
-define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
+define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) nounwind {
; BE-LABEL: lrint_v32i64_v32f16:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -864(r1)
; BE-NEXT: std r0, 880(r1)
-; BE-NEXT: .cfi_def_cfa_offset 864
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset r14, -288
-; BE-NEXT: .cfi_offset r15, -280
-; BE-NEXT: .cfi_offset r16, -272
-; BE-NEXT: .cfi_offset r17, -264
-; BE-NEXT: .cfi_offset r18, -256
-; BE-NEXT: .cfi_offset r19, -248
-; BE-NEXT: .cfi_offset r20, -240
-; BE-NEXT: .cfi_offset r21, -232
-; BE-NEXT: .cfi_offset r22, -224
-; BE-NEXT: .cfi_offset r23, -216
-; BE-NEXT: .cfi_offset r24, -208
-; BE-NEXT: .cfi_offset r25, -200
-; BE-NEXT: .cfi_offset r26, -192
-; BE-NEXT: .cfi_offset r27, -184
-; BE-NEXT: .cfi_offset r28, -176
-; BE-NEXT: .cfi_offset r29, -168
-; BE-NEXT: .cfi_offset r30, -160
-; BE-NEXT: .cfi_offset r31, -152
-; BE-NEXT: .cfi_offset f14, -144
-; BE-NEXT: .cfi_offset f15, -136
-; BE-NEXT: .cfi_offset f16, -128
-; BE-NEXT: .cfi_offset f17, -120
-; BE-NEXT: .cfi_offset f18, -112
-; BE-NEXT: .cfi_offset f19, -104
-; BE-NEXT: .cfi_offset f20, -96
-; BE-NEXT: .cfi_offset f21, -88
-; BE-NEXT: .cfi_offset f22, -80
-; BE-NEXT: .cfi_offset f23, -72
-; BE-NEXT: .cfi_offset f24, -64
-; BE-NEXT: .cfi_offset f25, -56
-; BE-NEXT: .cfi_offset f26, -48
-; BE-NEXT: .cfi_offset f27, -40
-; BE-NEXT: .cfi_offset f28, -32
-; BE-NEXT: .cfi_offset f29, -24
-; BE-NEXT: .cfi_offset f30, -16
-; BE-NEXT: .cfi_offset f31, -8
; BE-NEXT: stfd f20, 768(r1) # 8-byte Folded Spill
; BE-NEXT: fmr f20, f1
; BE-NEXT: fmr f1, f2
@@ -1935,6 +1726,7 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
; BE-NEXT: stfd f30, 848(r1) # 8-byte Folded Spill
; BE-NEXT: stfd f31, 856(r1) # 8-byte Folded Spill
; BE-NEXT: fmr f31, f13
+; BE-NEXT: mr r30, r3
; BE-NEXT: fmr f29, f12
; BE-NEXT: fmr f30, f11
; BE-NEXT: fmr f28, f10
@@ -1945,7 +1737,6 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
; BE-NEXT: fmr f23, f5
; BE-NEXT: fmr f22, f4
; BE-NEXT: fmr f21, f3
-; BE-NEXT: mr r30, r3
; BE-NEXT: bl __truncsfhf2
; BE-NEXT: nop
; BE-NEXT: fmr f1, f20
@@ -2448,98 +2239,48 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -688(r1)
-; CHECK-NEXT: std r0, 704(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 688
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset r14, -288
-; CHECK-NEXT: .cfi_offset r15, -280
-; CHECK-NEXT: .cfi_offset r16, -272
-; CHECK-NEXT: .cfi_offset r17, -264
-; CHECK-NEXT: .cfi_offset r18, -256
-; CHECK-NEXT: .cfi_offset r19, -248
-; CHECK-NEXT: .cfi_offset r20, -240
-; CHECK-NEXT: .cfi_offset r21, -232
-; CHECK-NEXT: .cfi_offset r22, -224
-; CHECK-NEXT: .cfi_offset r23, -216
-; CHECK-NEXT: .cfi_offset r24, -208
-; CHECK-NEXT: .cfi_offset r25, -200
-; CHECK-NEXT: .cfi_offset r26, -192
-; CHECK-NEXT: .cfi_offset r27, -184
-; CHECK-NEXT: .cfi_offset r28, -176
-; CHECK-NEXT: .cfi_offset r29, -168
-; CHECK-NEXT: .cfi_offset r30, -160
-; CHECK-NEXT: .cfi_offset r31, -152
-; CHECK-NEXT: .cfi_offset f14, -144
-; CHECK-NEXT: .cfi_offset f15, -136
-; CHECK-NEXT: .cfi_offset f16, -128
-; CHECK-NEXT: .cfi_offset f17, -120
-; CHECK-NEXT: .cfi_offset f18, -112
-; CHECK-NEXT: .cfi_offset f19, -104
-; CHECK-NEXT: .cfi_offset f20, -96
-; CHECK-NEXT: .cfi_offset f21, -88
-; CHECK-NEXT: .cfi_offset f22, -80
-; CHECK-NEXT: .cfi_offset f23, -72
-; CHECK-NEXT: .cfi_offset f24, -64
-; CHECK-NEXT: .cfi_offset f25, -56
-; CHECK-NEXT: .cfi_offset f26, -48
-; CHECK-NEXT: .cfi_offset f27, -40
-; CHECK-NEXT: .cfi_offset f28, -32
-; CHECK-NEXT: .cfi_offset f29, -24
-; CHECK-NEXT: .cfi_offset f30, -16
-; CHECK-NEXT: .cfi_offset f31, -8
-; CHECK-NEXT: .cfi_offset v20, -480
-; CHECK-NEXT: .cfi_offset v21, -464
-; CHECK-NEXT: .cfi_offset v22, -448
-; CHECK-NEXT: .cfi_offset v23, -432
-; CHECK-NEXT: .cfi_offset v24, -416
-; CHECK-NEXT: .cfi_offset v25, -400
-; CHECK-NEXT: .cfi_offset v26, -384
-; CHECK-NEXT: .cfi_offset v27, -368
-; CHECK-NEXT: .cfi_offset v28, -352
-; CHECK-NEXT: .cfi_offset v29, -336
-; CHECK-NEXT: .cfi_offset v30, -320
-; CHECK-NEXT: .cfi_offset v31, -304
; CHECK-NEXT: li r4, 208
+; CHECK-NEXT: std r0, 704(r1)
; CHECK-NEXT: std r14, 400(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r15, 408(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r16, 416(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r17, 424(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r18, 432(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r19, 440(r1) # 8-byte Folded Spill
-; CHECK-NEXT: std r20, 448(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stvx v20, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v20, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 224
+; CHECK-NEXT: std r20, 448(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r21, 456(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r22, 464(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r23, 472(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r24, 480(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r25, 488(r1) # 8-byte Folded Spill
-; CHECK-NEXT: std r26, 496(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stvx v21, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v21, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 240
+; CHECK-NEXT: std r26, 496(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r27, 504(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r28, 512(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r29, 520(r1) # 8-byte Folded Spill
; CHECK-NEXT: std r30, 528(r1) # 8-byte Folded Spill
; CHECK-NEXT: mr r30, r3
-; CHECK-NEXT: std r31, 536(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stvx v22, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v22, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 256
+; CHECK-NEXT: std r31, 536(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f14, 544(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f15, 552(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f16, 560(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f17, 568(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f18, 576(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stfd f19, 584(r1) # 8-byte Folded Spill
-; CHECK-NEXT: stvx v23, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v23, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 272
+; CHECK-NEXT: stfd f19, 584(r1) # 8-byte Folded Spill
; CHECK-NEXT: stfd f20, 592(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f20, f2
; CHECK-NEXT: stfd f21, 600(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f21, f3
; CHECK-NEXT: stfd f22, 608(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f22, f4
-; CHECK-NEXT: stvx v24, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v24, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 288
; CHECK-NEXT: stfd f23, 616(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f23, f5
@@ -2547,7 +2288,7 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: fmr f24, f6
; CHECK-NEXT: stfd f25, 632(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f25, f7
-; CHECK-NEXT: stvx v25, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v25, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 304
; CHECK-NEXT: stfd f26, 640(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f26, f8
@@ -2555,7 +2296,7 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: fmr f27, f9
; CHECK-NEXT: stfd f28, 656(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f28, f10
-; CHECK-NEXT: stvx v26, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v26, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 320
; CHECK-NEXT: stfd f29, 664(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f29, f11
@@ -2563,15 +2304,15 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: fmr f30, f12
; CHECK-NEXT: stfd f31, 680(r1) # 8-byte Folded Spill
; CHECK-NEXT: fmr f31, f13
-; CHECK-NEXT: stvx v27, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v27, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 336
-; CHECK-NEXT: stvx v28, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v28, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 352
-; CHECK-NEXT: stvx v29, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v29, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 368
-; CHECK-NEXT: stvx v30, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: li r4, 384
-; CHECK-NEXT: stvx v31, r1, r4 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r4 # 16-byte Folded Spill
; CHECK-NEXT: bl __truncsfhf2
; CHECK-NEXT: nop
; CHECK-NEXT: fmr f1, f20
@@ -3050,7 +2791,7 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: li r3, 384
; CHECK-NEXT: xxswapd vs4, vs4
; CHECK-NEXT: stxvd2x vs4, 0, r30
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 368
; CHECK-NEXT: lfd f31, 680(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f30, 672(r1) # 8-byte Folded Reload
@@ -3068,7 +2809,7 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: lfd f18, 576(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f17, 568(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f16, 560(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 352
; CHECK-NEXT: lfd f15, 552(r1) # 8-byte Folded Reload
; CHECK-NEXT: lfd f14, 544(r1) # 8-byte Folded Reload
@@ -3076,7 +2817,7 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: ld r30, 528(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r29, 520(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r28, 512(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 336
; CHECK-NEXT: ld r27, 504(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r26, 496(r1) # 8-byte Folded Reload
@@ -3084,7 +2825,7 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: ld r24, 480(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r23, 472(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r22, 464(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 320
; CHECK-NEXT: ld r21, 456(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r20, 448(r1) # 8-byte Folded Reload
@@ -3092,23 +2833,23 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-NEXT: ld r18, 432(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r17, 424(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r16, 416(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v27, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 304
; CHECK-NEXT: ld r15, 408(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r14, 400(r1) # 8-byte Folded Reload
-; CHECK-NEXT: lvx v26, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 288
-; CHECK-NEXT: lvx v25, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 272
-; CHECK-NEXT: lvx v24, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 256
-; CHECK-NEXT: lvx v23, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v23, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 240
-; CHECK-NEXT: lvx v22, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v22, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 224
-; CHECK-NEXT: lvx v21, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v21, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 208
-; CHECK-NEXT: lvx v20, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v20, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 688
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -3118,95 +2859,62 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
; FAST: # %bb.0:
; FAST-NEXT: mflr r0
; FAST-NEXT: stdu r1, -480(r1)
-; FAST-NEXT: std r0, 496(r1)
-; FAST-NEXT: .cfi_def_cfa_offset 480
-; FAST-NEXT: .cfi_offset lr, 16
-; FAST-NEXT: .cfi_offset r30, -160
-; FAST-NEXT: .cfi_offset f14, -144
-; FAST-NEXT: .cfi_offset f15, -136
-; FAST-NEXT: .cfi_offset f16, -128
-; FAST-NEXT: .cfi_offset f17, -120
-; FAST-NEXT: .cfi_offset f18, -112
-; FAST-NEXT: .cfi_offset f19, -104
-; FAST-NEXT: .cfi_offset f20, -96
-; FAST-NEXT: .cfi_offset f21, -88
-; FAST-NEXT: .cfi_offset f22, -80
-; FAST-NEXT: .cfi_offset f23, -72
-; FAST-NEXT: .cfi_offset f24, -64
-; FAST-NEXT: .cfi_offset f25, -56
-; FAST-NEXT: .cfi_offset f26, -48
-; FAST-NEXT: .cfi_offset f27, -40
-; FAST-NEXT: .cfi_offset f28, -32
-; FAST-NEXT: .cfi_offset f29, -24
-; FAST-NEXT: .cfi_offset f30, -16
-; FAST-NEXT: .cfi_offset f31, -8
-; FAST-NEXT: .cfi_offset v20, -352
-; FAST-NEXT: .cfi_offset v21, -336
-; FAST-NEXT: .cfi_offset v22, -320
-; FAST-NEXT: .cfi_offset v23, -304
-; FAST-NEXT: .cfi_offset v24, -288
-; FAST-NEXT: .cfi_offset v25, -272
-; FAST-NEXT: .cfi_offset v26, -256
-; FAST-NEXT: .cfi_offset v27, -240
-; FAST-NEXT: .cfi_offset v28, -224
-; FAST-NEXT: .cfi_offset v29, -208
-; FAST-NEXT: .cfi_offset v30, -192
-; FAST-NEXT: .cfi_offset v31, -176
; FAST-NEXT: li r4, 128
+; FAST-NEXT: std r0, 496(r1)
; FAST-NEXT: std r30, 320(r1) # 8-byte Folded Spill
+; FAST-NEXT: mr r30, r3
; FAST-NEXT: stfd f14, 336(r1) # 8-byte Folded Spill
-; FAST-NEXT: fmr f14, f5
; FAST-NEXT: stfd f15, 344(r1) # 8-byte Folded Spill
+; FAST-NEXT: fmr f14, f5
; FAST-NEXT: stfd f16, 352(r1) # 8-byte Folded Spill
-; FAST-NEXT: fmr f16, f4
-; FAST-NEXT: mr r30, r3
-; FAST-NEXT: stvx v20, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v20, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 144
+; FAST-NEXT: fmr f16, f4
; FAST-NEXT: stfd f17, 360(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f18, 368(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f19, 376(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f20, 384(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f21, 392(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f22, 400(r1) # 8-byte Folded Spill
-; FAST-NEXT: stvx v21, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v21, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 160
+; FAST-NEXT: stfd f22, 400(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f23, 408(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f24, 416(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f25, 424(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f26, 432(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f27, 440(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f28, 448(r1) # 8-byte Folded Spill
-; FAST-NEXT: stvx v22, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v22, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 176
; FAST-NEXT: xxlor v22, f3, f3
+; FAST-NEXT: stfd f28, 448(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f29, 456(r1) # 8-byte Folded Spill
; FAST-NEXT: fmr f29, f9
; FAST-NEXT: stfd f30, 464(r1) # 8-byte Folded Spill
; FAST-NEXT: stfd f31, 472(r1) # 8-byte Folded Spill
-; FAST-NEXT: stvx v23, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v23, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 192
; FAST-NEXT: xxlor v23, f2, f2
-; FAST-NEXT: stvx v24, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v24, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 208
-; FAST-NEXT: stvx v25, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v25, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 224
; FAST-NEXT: xxlor v25, f13, f13
-; FAST-NEXT: stvx v26, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v26, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 240
; FAST-NEXT: xxlor v26, f12, f12
-; FAST-NEXT: stvx v27, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v27, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 256
; FAST-NEXT: xxlor v27, f11, f11
-; FAST-NEXT: stvx v28, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v28, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 272
; FAST-NEXT: xxlor v28, f10, f10
-; FAST-NEXT: stvx v29, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v29, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 288
; FAST-NEXT: xxlor v29, f8, f8
-; FAST-NEXT: stvx v30, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v30, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 304
; FAST-NEXT: xxlor v30, f7, f7
-; FAST-NEXT: stvx v31, r1, r4 # 16-byte Folded Spill
+; FAST-NEXT: stxvd2x v31, r1, r4 # 16-byte Folded Spill
; FAST-NEXT: li r4, 44
; FAST-NEXT: xxlor v31, f6, f6
; FAST-NEXT: stxsspx f1, r1, r4 # 4-byte Folded Spill
@@ -3635,30 +3343,30 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
; FAST-NEXT: lfd f16, 352(r1) # 8-byte Folded Reload
; FAST-NEXT: lfd f15, 344(r1) # 8-byte Folded Reload
; FAST-NEXT: lfd f14, 336(r1) # 8-byte Folded Reload
-; FAST-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 288
; FAST-NEXT: ld r30, 320(r1) # 8-byte Folded Reload
-; FAST-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 272
-; FAST-NEXT: lvx v29, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 256
-; FAST-NEXT: lvx v28, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 240
-; FAST-NEXT: lvx v27, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 224
-; FAST-NEXT: lvx v26, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 208
-; FAST-NEXT: lvx v25, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 192
-; FAST-NEXT: lvx v24, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 176
-; FAST-NEXT: lvx v23, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v23, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 160
-; FAST-NEXT: lvx v22, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v22, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 144
-; FAST-NEXT: lvx v21, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v21, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: li r3, 128
-; FAST-NEXT: lvx v20, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: lxvd2x v20, r1, r3 # 16-byte Folded Reload
; FAST-NEXT: addi r1, r1, 480
; FAST-NEXT: ld r0, 16(r1)
; FAST-NEXT: mtlr r0
@@ -3668,14 +3376,12 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
}
declare <32 x i64> @llvm.lrint.v32i64.v32f16(<32 x half>)
-define <1 x i64> @lrint_v1f32(<1 x float> %x) {
+define <1 x i64> @lrint_v1f32(<1 x float> %x) nounwind {
; BE-LABEL: lrint_v1f32:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -112(r1)
; BE-NEXT: std r0, 128(r1)
-; BE-NEXT: .cfi_def_cfa_offset 112
-; BE-NEXT: .cfi_offset lr, 16
; BE-NEXT: bl lrintf
; BE-NEXT: nop
; BE-NEXT: addi r1, r1, 112
@@ -3688,8 +3394,6 @@ define <1 x i64> @lrint_v1f32(<1 x float> %x) {
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -32(r1)
; CHECK-NEXT: std r0, 48(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset lr, 16
; CHECK-NEXT: bl lrintf
; CHECK-NEXT: nop
; CHECK-NEXT: addi r1, r1, 32
@@ -3707,15 +3411,13 @@ define <1 x i64> @lrint_v1f32(<1 x float> %x) {
}
declare <1 x i64> @llvm.lrint.v1i64.v1f32(<1 x float>)
-define <2 x i64> @lrint_v2f32(<2 x float> %x) {
+define <2 x i64> @lrint_v2f32(<2 x float> %x) nounwind {
; BE-LABEL: lrint_v2f32:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -144(r1)
-; BE-NEXT: std r0, 160(r1)
-; BE-NEXT: .cfi_def_cfa_offset 144
-; BE-NEXT: .cfi_offset lr, 16
; BE-NEXT: addi r3, r1, 112
+; BE-NEXT: std r0, 160(r1)
; BE-NEXT: stxvw4x v2, 0, r3
; BE-NEXT: lfs f1, 116(r1)
; BE-NEXT: bl lrintf
@@ -3736,14 +3438,11 @@ define <2 x i64> @lrint_v2f32(<2 x float> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -64(r1)
-; CHECK-NEXT: std r0, 80(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset v31, -16
; CHECK-NEXT: xxsldwi vs0, v2, v2, 3
; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 80(r1)
; CHECK-NEXT: xscvspdpn f1, vs0
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v31, v2
; CHECK-NEXT: bl lrintf
; CHECK-NEXT: nop
@@ -3755,7 +3454,7 @@ define <2 x i64> @lrint_v2f32(<2 x float> %x) {
; CHECK-NEXT: mtfprd f0, r3
; CHECK-NEXT: li r3, 48
; CHECK-NEXT: xxmrghd v2, vs0, v31
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 64
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -3780,15 +3479,13 @@ define <2 x i64> @lrint_v2f32(<2 x float> %x) {
}
declare <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float>)
-define <4 x i64> @lrint_v4f32(<4 x float> %x) {
+define <4 x i64> @lrint_v4f32(<4 x float> %x) nounwind {
; BE-LABEL: lrint_v4f32:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -160(r1)
-; BE-NEXT: std r0, 176(r1)
-; BE-NEXT: .cfi_def_cfa_offset 160
-; BE-NEXT: .cfi_offset lr, 16
; BE-NEXT: addi r3, r1, 112
+; BE-NEXT: std r0, 176(r1)
; BE-NEXT: stxvw4x v2, 0, r3
; BE-NEXT: lfs f1, 116(r1)
; BE-NEXT: bl lrintf
@@ -3819,17 +3516,13 @@ define <4 x i64> @lrint_v4f32(<4 x float> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -80(r1)
-; CHECK-NEXT: std r0, 96(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 80
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset v30, -32
-; CHECK-NEXT: .cfi_offset v31, -16
; CHECK-NEXT: xxsldwi vs0, v2, v2, 3
; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 96(r1)
; CHECK-NEXT: xscvspdpn f1, vs0
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 64
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v31, v2
; CHECK-NEXT: bl lrintf
; CHECK-NEXT: nop
@@ -3852,9 +3545,9 @@ define <4 x i64> @lrint_v4f32(<4 x float> %x) {
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: vmr v2, v30
; CHECK-NEXT: xxmrghd v3, v31, vs0
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 80
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -3890,15 +3583,13 @@ define <4 x i64> @lrint_v4f32(<4 x float> %x) {
}
declare <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float>)
-define <8 x i64> @lrint_v8f32(<8 x float> %x) {
+define <8 x i64> @lrint_v8f32(<8 x float> %x) nounwind {
; BE-LABEL: lrint_v8f32:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -208(r1)
-; BE-NEXT: std r0, 224(r1)
-; BE-NEXT: .cfi_def_cfa_offset 208
-; BE-NEXT: .cfi_offset lr, 16
; BE-NEXT: addi r3, r1, 112
+; BE-NEXT: std r0, 224(r1)
; BE-NEXT: stxvw4x v2, 0, r3
; BE-NEXT: addi r3, r1, 128
; BE-NEXT: stxvw4x v3, 0, r3
@@ -3951,24 +3642,18 @@ define <8 x i64> @lrint_v8f32(<8 x float> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -112(r1)
-; CHECK-NEXT: std r0, 128(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 112
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset v28, -64
-; CHECK-NEXT: .cfi_offset v29, -48
-; CHECK-NEXT: .cfi_offset v30, -32
-; CHECK-NEXT: .cfi_offset v31, -16
; CHECK-NEXT: li r3, 48
; CHECK-NEXT: xxsldwi vs0, v2, v2, 3
-; CHECK-NEXT: stvx v28, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: std r0, 128(r1)
+; CHECK-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: xscvspdpn f1, vs0
-; CHECK-NEXT: stvx v29, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 80
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 96
; CHECK-NEXT: vmr v30, v2
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v31, v3
; CHECK-NEXT: bl lrintf
; CHECK-NEXT: nop
@@ -4014,13 +3699,13 @@ define <8 x i64> @lrint_v8f32(<8 x float> %x) {
; CHECK-NEXT: vmr v2, v29
; CHECK-NEXT: vmr v4, v28
; CHECK-NEXT: xxmrghd v5, v31, vs0
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 80
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 64
-; CHECK-NEXT: lvx v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: lvx v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 112
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -4078,15 +3763,13 @@ define <8 x i64> @lrint_v8f32(<8 x float> %x) {
}
declare <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float>)
-define <16 x i64> @lrint_v16i64_v16f32(<16 x float> %x) {
+define <16 x i64> @lrint_v16i64_v16f32(<16 x float> %x) nounwind {
; BE-LABEL: lrint_v16i64_v16f32:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -304(r1)
-; BE-NEXT: std r0, 320(r1)
-; BE-NEXT: .cfi_def_cfa_offset 304
-; BE-NEXT: .cfi_offset lr, 16
; BE-NEXT: addi r3, r1, 112
+; BE-NEXT: std r0, 320(r1)
; BE-NEXT: stxvw4x v2, 0, r3
; BE-NEXT: addi r3, r1, 128
; BE-NEXT: stxvw4x v3, 0, r3
@@ -4183,38 +3866,28 @@ define <16 x i64> @lrint_v16i64_v16f32(<16 x float> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -176(r1)
-; CHECK-NEXT: std r0, 192(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 176
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset v24, -128
-; CHECK-NEXT: .cfi_offset v25, -112
-; CHECK-NEXT: .cfi_offset v26, -96
-; CHECK-NEXT: .cfi_offset v27, -80
-; CHECK-NEXT: .cfi_offset v28, -64
-; CHECK-NEXT: .cfi_offset v29, -48
-; CHECK-NEXT: .cfi_offset v30, -32
-; CHECK-NEXT: .cfi_offset v31, -16
; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 192(r1)
; CHECK-NEXT: xxsldwi vs0, v2, v2, 3
-; CHECK-NEXT: stvx v24, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v24, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: xscvspdpn f1, vs0
-; CHECK-NEXT: stvx v25, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 80
-; CHECK-NEXT: stvx v26, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 96
; CHECK-NEXT: vmr v26, v3
-; CHECK-NEXT: stvx v27, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 112
-; CHECK-NEXT: stvx v28, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 128
; CHECK-NEXT: vmr v28, v4
-; CHECK-NEXT: stvx v29, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 144
; CHECK-NEXT: vmr v29, v2
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 160
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v31, v5
; CHECK-NEXT: bl lrintf
; CHECK-NEXT: nop
@@ -4306,21 +3979,21 @@ define <16 x i64> @lrint_v16i64_v16f32(<16 x float> %x) {
; CHECK-NEXT: vmr v6, v25
; CHECK-NEXT: vmr v8, v24
; CHECK-NEXT: xxmrghd v9, v31, vs0
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 144
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 128
-; CHECK-NEXT: lvx v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 112
-; CHECK-NEXT: lvx v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 96
-; CHECK-NEXT: lvx v27, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 80
-; CHECK-NEXT: lvx v26, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 64
-; CHECK-NEXT: lvx v25, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: lvx v24, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 176
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -4422,14 +4095,12 @@ define <16 x i64> @lrint_v16i64_v16f32(<16 x float> %x) {
}
declare <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float>)
-define <1 x i64> @lrint_v1f64(<1 x double> %x) {
+define <1 x i64> @lrint_v1f64(<1 x double> %x) nounwind {
; BE-LABEL: lrint_v1f64:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -112(r1)
; BE-NEXT: std r0, 128(r1)
-; BE-NEXT: .cfi_def_cfa_offset 112
-; BE-NEXT: .cfi_offset lr, 16
; BE-NEXT: bl lrint
; BE-NEXT: nop
; BE-NEXT: addi r1, r1, 112
@@ -4442,8 +4113,6 @@ define <1 x i64> @lrint_v1f64(<1 x double> %x) {
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -32(r1)
; CHECK-NEXT: std r0, 48(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset lr, 16
; CHECK-NEXT: bl lrint
; CHECK-NEXT: nop
; CHECK-NEXT: addi r1, r1, 32
@@ -4461,16 +4130,13 @@ define <1 x i64> @lrint_v1f64(<1 x double> %x) {
}
declare <1 x i64> @llvm.lrint.v1i64.v1f64(<1 x double>)
-define <2 x i64> @lrint_v2f64(<2 x double> %x) {
+define <2 x i64> @lrint_v2f64(<2 x double> %x) nounwind {
; BE-LABEL: lrint_v2f64:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -160(r1)
-; BE-NEXT: std r0, 176(r1)
-; BE-NEXT: .cfi_def_cfa_offset 160
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset v31, -16
; BE-NEXT: li r3, 144
+; BE-NEXT: std r0, 176(r1)
; BE-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; BE-NEXT: vmr v31, v2
; BE-NEXT: xxlor f1, v31, v31
@@ -4494,12 +4160,9 @@ define <2 x i64> @lrint_v2f64(<2 x double> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -64(r1)
-; CHECK-NEXT: std r0, 80(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset v31, -16
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: std r0, 80(r1)
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v31, v2
; CHECK-NEXT: xxlor f1, v31, v31
; CHECK-NEXT: bl lrint
@@ -4511,7 +4174,7 @@ define <2 x i64> @lrint_v2f64(<2 x double> %x) {
; CHECK-NEXT: mtfprd f0, r3
; CHECK-NEXT: li r3, 48
; CHECK-NEXT: xxmrghd v2, v31, vs0
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 64
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -4534,17 +4197,13 @@ define <2 x i64> @lrint_v2f64(<2 x double> %x) {
}
declare <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double>)
-define <4 x i64> @lrint_v4f64(<4 x double> %x) {
+define <4 x i64> @lrint_v4f64(<4 x double> %x) nounwind {
; BE-LABEL: lrint_v4f64:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -192(r1)
-; BE-NEXT: std r0, 208(r1)
-; BE-NEXT: .cfi_def_cfa_offset 192
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset v30, -32
-; BE-NEXT: .cfi_offset v31, -16
; BE-NEXT: li r3, 160
+; BE-NEXT: std r0, 208(r1)
; BE-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; BE-NEXT: vmr v30, v2
; BE-NEXT: li r3, 176
@@ -4583,17 +4242,13 @@ define <4 x i64> @lrint_v4f64(<4 x double> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -80(r1)
-; CHECK-NEXT: std r0, 96(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 80
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset v30, -32
-; CHECK-NEXT: .cfi_offset v31, -16
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: std r0, 96(r1)
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v30, v2
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: xxlor f1, v30, v30
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v31, v3
; CHECK-NEXT: bl lrint
; CHECK-NEXT: nop
@@ -4614,9 +4269,9 @@ define <4 x i64> @lrint_v4f64(<4 x double> %x) {
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: vmr v2, v30
; CHECK-NEXT: xxmrghd v3, v31, vs0
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 80
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -4648,25 +4303,19 @@ define <4 x i64> @lrint_v4f64(<4 x double> %x) {
}
declare <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double>)
-define <8 x i64> @lrint_v8f64(<8 x double> %x) {
+define <8 x i64> @lrint_v8f64(<8 x double> %x) nounwind {
; BE-LABEL: lrint_v8f64:
; BE: # %bb.0:
; BE-NEXT: mflr r0
; BE-NEXT: stdu r1, -256(r1)
-; BE-NEXT: std r0, 272(r1)
-; BE-NEXT: .cfi_def_cfa_offset 256
-; BE-NEXT: .cfi_offset lr, 16
-; BE-NEXT: .cfi_offset v28, -64
-; BE-NEXT: .cfi_offset v29, -48
-; BE-NEXT: .cfi_offset v30, -32
-; BE-NEXT: .cfi_offset v31, -16
; BE-NEXT: li r3, 192
+; BE-NEXT: std r0, 272(r1)
; BE-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
; BE-NEXT: li r3, 208
; BE-NEXT: vmr v28, v2
-; BE-NEXT: xxlor f1, v28, v28
; BE-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
; BE-NEXT: li r3, 224
+; BE-NEXT: xxlor f1, v28, v28
; BE-NEXT: vmr v29, v3
; BE-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; BE-NEXT: li r3, 240
@@ -4729,25 +4378,19 @@ define <8 x i64> @lrint_v8f64(<8 x double> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: mflr r0
; CHECK-NEXT: stdu r1, -112(r1)
-; CHECK-NEXT: std r0, 128(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 112
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: .cfi_offset v28, -64
-; CHECK-NEXT: .cfi_offset v29, -48
-; CHECK-NEXT: .cfi_offset v30, -32
-; CHECK-NEXT: .cfi_offset v31, -16
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: stvx v28, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: std r0, 128(r1)
+; CHECK-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 64
; CHECK-NEXT: vmr v28, v2
-; CHECK-NEXT: stvx v29, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 80
; CHECK-NEXT: xxlor f1, v28, v28
; CHECK-NEXT: vmr v29, v3
-; CHECK-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: li r3, 96
; CHECK-NEXT: vmr v30, v4
-; CHECK-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
; CHECK-NEXT: vmr v31, v5
; CHECK-NEXT: bl lrint
; CHECK-NEXT: nop
@@ -4788,13 +4431,13 @@ define <8 x i64> @lrint_v8f64(<8 x double> %x) {
; CHECK-NEXT: vmr v3, v29
; CHECK-NEXT: vmr v2, v28
; CHECK-NEXT: xxmrghd v5, v31, vs0
-; CHECK-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 80
-; CHECK-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 64
-; CHECK-NEXT: lvx v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: li r3, 48
-; CHECK-NEXT: lvx v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
; CHECK-NEXT: addi r1, r1, 112
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
@@ -4843,3 +4486,1046 @@ define <8 x i64> @lrint_v8f64(<8 x double> %x) {
ret <8 x i64> %a
}
declare <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double>)
+
+define <1 x i64> @lrint_v1f128(<1 x fp128> %x) nounwind {
+; BE-LABEL: lrint_v1f128:
+; BE: # %bb.0:
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -112(r1)
+; BE-NEXT: std r0, 128(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: addi r1, r1, 112
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: lrint_v1f128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mflr r0
+; CHECK-NEXT: stdu r1, -32(r1)
+; CHECK-NEXT: std r0, 48(r1)
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: addi r1, r1, 32
+; CHECK-NEXT: ld r0, 16(r1)
+; CHECK-NEXT: mtlr r0
+; CHECK-NEXT: blr
+;
+; FAST-LABEL: lrint_v1f128:
+; FAST: # %bb.0:
+; FAST-NEXT: mflr r0
+; FAST-NEXT: stdu r1, -32(r1)
+; FAST-NEXT: std r0, 48(r1)
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: addi r1, r1, 32
+; FAST-NEXT: ld r0, 16(r1)
+; FAST-NEXT: mtlr r0
+; FAST-NEXT: blr
+ %a = call <1 x i64> @llvm.lrint.v1i64.v1f128(<1 x fp128> %x)
+ ret <1 x i64> %a
+}
+declare <1 x i64> @llvm.lrint.v1i64.v1f128(<1 x fp128>)
+
+define <2 x i64> @lrint_v2f128(<2 x fp128> %x) nounwind {
+; BE-LABEL: lrint_v2f128:
+; BE: # %bb.0:
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -160(r1)
+; BE-NEXT: li r3, 144
+; BE-NEXT: std r0, 176(r1)
+; BE-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: vmr v31, v2
+; BE-NEXT: vmr v2, v3
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v31
+; BE-NEXT: std r3, 136(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: std r3, 128(r1)
+; BE-NEXT: addi r3, r1, 128
+; BE-NEXT: lxvd2x v2, 0, r3
+; BE-NEXT: li r3, 144
+; BE-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: addi r1, r1, 160
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: lrint_v2f128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mflr r0
+; CHECK-NEXT: stdu r1, -80(r1)
+; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 96(r1)
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: vmr v31, v3
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v31
+; CHECK-NEXT: mtvsrd v30, r3
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: xxmrghd v2, vs0, v30
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: addi r1, r1, 80
+; CHECK-NEXT: ld r0, 16(r1)
+; CHECK-NEXT: mtlr r0
+; CHECK-NEXT: blr
+;
+; FAST-LABEL: lrint_v2f128:
+; FAST: # %bb.0:
+; FAST-NEXT: mflr r0
+; FAST-NEXT: stdu r1, -80(r1)
+; FAST-NEXT: li r3, 48
+; FAST-NEXT: std r0, 96(r1)
+; FAST-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 64
+; FAST-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: vmr v31, v3
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v31
+; FAST-NEXT: mtvsrd v30, r3
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: li r3, 64
+; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 48
+; FAST-NEXT: xxmrghd v2, vs0, v30
+; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: addi r1, r1, 80
+; FAST-NEXT: ld r0, 16(r1)
+; FAST-NEXT: mtlr r0
+; FAST-NEXT: blr
+ %a = call <2 x i64> @llvm.lrint.v2i64.v2f128(<2 x fp128> %x)
+ ret <2 x i64> %a
+}
+declare <2 x i64> @llvm.lrint.v2i64.v2f128(<2 x fp128>)
+
+define <4 x i64> @lrint_v4f128(<4 x fp128> %x) nounwind {
+; BE-LABEL: lrint_v4f128:
+; BE: # %bb.0:
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -208(r1)
+; BE-NEXT: li r3, 160
+; BE-NEXT: std r0, 224(r1)
+; BE-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 176
+; BE-NEXT: vmr v29, v2
+; BE-NEXT: vmr v2, v3
+; BE-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 192
+; BE-NEXT: vmr v30, v4
+; BE-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: vmr v31, v5
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v29
+; BE-NEXT: std r3, 136(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v31
+; BE-NEXT: std r3, 128(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v30
+; BE-NEXT: std r3, 152(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: std r3, 144(r1)
+; BE-NEXT: addi r3, r1, 128
+; BE-NEXT: lxvd2x v2, 0, r3
+; BE-NEXT: addi r3, r1, 144
+; BE-NEXT: lxvd2x v3, 0, r3
+; BE-NEXT: li r3, 192
+; BE-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 176
+; BE-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 160
+; BE-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: addi r1, r1, 208
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: lrint_v4f128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mflr r0
+; CHECK-NEXT: stdu r1, -112(r1)
+; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 128(r1)
+; CHECK-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 80
+; CHECK-NEXT: vmr v29, v3
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 96
+; CHECK-NEXT: vmr v30, v4
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: vmr v31, v5
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v29
+; CHECK-NEXT: mtvsrd v28, r3
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: vmr v2, v30
+; CHECK-NEXT: xxmrghd v29, vs0, v28
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v31
+; CHECK-NEXT: mtvsrd v30, r3
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: li r3, 96
+; CHECK-NEXT: vmr v2, v29
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 80
+; CHECK-NEXT: xxmrghd v3, vs0, v30
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: addi r1, r1, 112
+; CHECK-NEXT: ld r0, 16(r1)
+; CHECK-NEXT: mtlr r0
+; CHECK-NEXT: blr
+;
+; FAST-LABEL: lrint_v4f128:
+; FAST: # %bb.0:
+; FAST-NEXT: mflr r0
+; FAST-NEXT: stdu r1, -112(r1)
+; FAST-NEXT: li r3, 48
+; FAST-NEXT: std r0, 128(r1)
+; FAST-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 64
+; FAST-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 80
+; FAST-NEXT: vmr v29, v3
+; FAST-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 96
+; FAST-NEXT: vmr v30, v4
+; FAST-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: vmr v31, v5
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v29
+; FAST-NEXT: mtvsrd v28, r3
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: vmr v2, v30
+; FAST-NEXT: xxmrghd v29, vs0, v28
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v31
+; FAST-NEXT: mtvsrd v30, r3
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: li r3, 96
+; FAST-NEXT: vmr v2, v29
+; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 80
+; FAST-NEXT: xxmrghd v3, vs0, v30
+; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 64
+; FAST-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 48
+; FAST-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: addi r1, r1, 112
+; FAST-NEXT: ld r0, 16(r1)
+; FAST-NEXT: mtlr r0
+; FAST-NEXT: blr
+ %a = call <4 x i64> @llvm.lrint.v4i64.v4f128(<4 x fp128> %x)
+ ret <4 x i64> %a
+}
+declare <4 x i64> @llvm.lrint.v4i64.v4f128(<4 x fp128>)
+
+define <8 x i64> @lrint_v8f128(<8 x fp128> %x) nounwind {
+; BE-LABEL: lrint_v8f128:
+; BE: # %bb.0:
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -304(r1)
+; BE-NEXT: li r3, 192
+; BE-NEXT: std r0, 320(r1)
+; BE-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 208
+; BE-NEXT: vmr v25, v2
+; BE-NEXT: vmr v2, v3
+; BE-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 224
+; BE-NEXT: vmr v26, v4
+; BE-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 240
+; BE-NEXT: vmr v27, v5
+; BE-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 256
+; BE-NEXT: vmr v28, v6
+; BE-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 272
+; BE-NEXT: vmr v29, v7
+; BE-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 288
+; BE-NEXT: vmr v30, v8
+; BE-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: vmr v31, v9
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v25
+; BE-NEXT: std r3, 136(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v27
+; BE-NEXT: std r3, 128(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v26
+; BE-NEXT: std r3, 152(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v29
+; BE-NEXT: std r3, 144(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v28
+; BE-NEXT: std r3, 168(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v31
+; BE-NEXT: std r3, 160(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v30
+; BE-NEXT: std r3, 184(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: std r3, 176(r1)
+; BE-NEXT: addi r3, r1, 128
+; BE-NEXT: lxvd2x v2, 0, r3
+; BE-NEXT: addi r3, r1, 144
+; BE-NEXT: lxvd2x v3, 0, r3
+; BE-NEXT: addi r3, r1, 160
+; BE-NEXT: lxvd2x v4, 0, r3
+; BE-NEXT: addi r3, r1, 176
+; BE-NEXT: lxvd2x v5, 0, r3
+; BE-NEXT: li r3, 288
+; BE-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 272
+; BE-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 256
+; BE-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 240
+; BE-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 224
+; BE-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 208
+; BE-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 192
+; BE-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: addi r1, r1, 304
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: lrint_v8f128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mflr r0
+; CHECK-NEXT: stdu r1, -176(r1)
+; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: std r0, 192(r1)
+; CHECK-NEXT: stxvd2x v24, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 80
+; CHECK-NEXT: vmr v25, v3
+; CHECK-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 96
+; CHECK-NEXT: vmr v26, v4
+; CHECK-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 112
+; CHECK-NEXT: vmr v27, v5
+; CHECK-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 128
+; CHECK-NEXT: vmr v28, v6
+; CHECK-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 144
+; CHECK-NEXT: vmr v29, v7
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 160
+; CHECK-NEXT: vmr v30, v8
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: vmr v31, v9
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v25
+; CHECK-NEXT: mtvsrd v24, r3
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: vmr v2, v26
+; CHECK-NEXT: xxmrghd v25, vs0, v24
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v27
+; CHECK-NEXT: mtvsrd v26, r3
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: vmr v2, v28
+; CHECK-NEXT: xxmrghd v27, vs0, v26
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v29
+; CHECK-NEXT: mtvsrd v28, r3
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: vmr v2, v30
+; CHECK-NEXT: xxmrghd v29, vs0, v28
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v31
+; CHECK-NEXT: mtvsrd v30, r3
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: li r3, 160
+; CHECK-NEXT: vmr v4, v29
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 144
+; CHECK-NEXT: vmr v3, v27
+; CHECK-NEXT: vmr v2, v25
+; CHECK-NEXT: xxmrghd v5, vs0, v30
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 128
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 112
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 96
+; CHECK-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 80
+; CHECK-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: addi r1, r1, 176
+; CHECK-NEXT: ld r0, 16(r1)
+; CHECK-NEXT: mtlr r0
+; CHECK-NEXT: blr
+;
+; FAST-LABEL: lrint_v8f128:
+; FAST: # %bb.0:
+; FAST-NEXT: mflr r0
+; FAST-NEXT: stdu r1, -176(r1)
+; FAST-NEXT: li r3, 48
+; FAST-NEXT: std r0, 192(r1)
+; FAST-NEXT: stxvd2x v24, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 64
+; FAST-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 80
+; FAST-NEXT: vmr v25, v3
+; FAST-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 96
+; FAST-NEXT: vmr v26, v4
+; FAST-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 112
+; FAST-NEXT: vmr v27, v5
+; FAST-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 128
+; FAST-NEXT: vmr v28, v6
+; FAST-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 144
+; FAST-NEXT: vmr v29, v7
+; FAST-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 160
+; FAST-NEXT: vmr v30, v8
+; FAST-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: vmr v31, v9
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v25
+; FAST-NEXT: mtvsrd v24, r3
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: vmr v2, v26
+; FAST-NEXT: xxmrghd v25, vs0, v24
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v27
+; FAST-NEXT: mtvsrd v26, r3
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: vmr v2, v28
+; FAST-NEXT: xxmrghd v27, vs0, v26
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v29
+; FAST-NEXT: mtvsrd v28, r3
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: vmr v2, v30
+; FAST-NEXT: xxmrghd v29, vs0, v28
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v31
+; FAST-NEXT: mtvsrd v30, r3
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: li r3, 160
+; FAST-NEXT: vmr v4, v29
+; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 144
+; FAST-NEXT: vmr v3, v27
+; FAST-NEXT: vmr v2, v25
+; FAST-NEXT: xxmrghd v5, vs0, v30
+; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 128
+; FAST-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 112
+; FAST-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 96
+; FAST-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 80
+; FAST-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 64
+; FAST-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 48
+; FAST-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: addi r1, r1, 176
+; FAST-NEXT: ld r0, 16(r1)
+; FAST-NEXT: mtlr r0
+; FAST-NEXT: blr
+ %a = call <8 x i64> @llvm.lrint.v8i64.v8f128(<8 x fp128> %x)
+ ret <8 x i64> %a
+}
+declare <8 x i64> @llvm.lrint.v8i64.v8f128(<8 x fp128>)
+
+define <16 x i64> @lrint_v16i64_v16f128(<16 x fp128> %x) nounwind {
+; BE-LABEL: lrint_v16i64_v16f128:
+; BE: # %bb.0:
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -496(r1)
+; BE-NEXT: li r3, 304
+; BE-NEXT: std r0, 512(r1)
+; BE-NEXT: stxvd2x v20, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 320
+; BE-NEXT: stxvd2x v21, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 336
+; BE-NEXT: vmr v21, v2
+; BE-NEXT: vmr v2, v3
+; BE-NEXT: stxvd2x v22, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 352
+; BE-NEXT: vmr v22, v4
+; BE-NEXT: stxvd2x v23, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 368
+; BE-NEXT: vmr v23, v5
+; BE-NEXT: stxvd2x v24, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 384
+; BE-NEXT: vmr v24, v6
+; BE-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 400
+; BE-NEXT: vmr v25, v7
+; BE-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 416
+; BE-NEXT: vmr v26, v8
+; BE-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 432
+; BE-NEXT: vmr v27, v9
+; BE-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 448
+; BE-NEXT: vmr v28, v11
+; BE-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 464
+; BE-NEXT: vmr v29, v10
+; BE-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 480
+; BE-NEXT: vmr v30, v13
+; BE-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: li r3, 128
+; BE-NEXT: stxvd2x v12, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: addi r3, r1, 768
+; BE-NEXT: lxvw4x vs0, 0, r3
+; BE-NEXT: li r3, 160
+; BE-NEXT: stxvd2x vs0, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: addi r3, r1, 784
+; BE-NEXT: lxvw4x vs0, 0, r3
+; BE-NEXT: li r3, 144
+; BE-NEXT: stxvd2x vs0, r1, r3 # 16-byte Folded Spill
+; BE-NEXT: addi r3, r1, 736
+; BE-NEXT: lxvw4x v20, 0, r3
+; BE-NEXT: addi r3, r1, 752
+; BE-NEXT: lxvw4x v31, 0, r3
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v21
+; BE-NEXT: std r3, 184(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v23
+; BE-NEXT: std r3, 176(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v22
+; BE-NEXT: std r3, 200(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v25
+; BE-NEXT: std r3, 192(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v24
+; BE-NEXT: std r3, 216(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v27
+; BE-NEXT: std r3, 208(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v26
+; BE-NEXT: std r3, 232(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v28
+; BE-NEXT: std r3, 224(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v29
+; BE-NEXT: std r3, 248(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v30
+; BE-NEXT: std r3, 240(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: std r3, 264(r1)
+; BE-NEXT: li r3, 128
+; BE-NEXT: lxvd2x v2, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v31
+; BE-NEXT: std r3, 256(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: vmr v2, v20
+; BE-NEXT: std r3, 280(r1)
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: std r3, 272(r1)
+; BE-NEXT: li r3, 144
+; BE-NEXT: lxvd2x v2, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: std r3, 296(r1)
+; BE-NEXT: li r3, 160
+; BE-NEXT: lxvd2x v2, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: bl lrintf128
+; BE-NEXT: nop
+; BE-NEXT: std r3, 288(r1)
+; BE-NEXT: addi r3, r1, 176
+; BE-NEXT: lxvd2x v2, 0, r3
+; BE-NEXT: addi r3, r1, 192
+; BE-NEXT: lxvd2x v3, 0, r3
+; BE-NEXT: addi r3, r1, 208
+; BE-NEXT: lxvd2x v4, 0, r3
+; BE-NEXT: addi r3, r1, 224
+; BE-NEXT: lxvd2x v5, 0, r3
+; BE-NEXT: addi r3, r1, 240
+; BE-NEXT: lxvd2x v6, 0, r3
+; BE-NEXT: addi r3, r1, 256
+; BE-NEXT: lxvd2x v7, 0, r3
+; BE-NEXT: addi r3, r1, 272
+; BE-NEXT: lxvd2x v8, 0, r3
+; BE-NEXT: addi r3, r1, 288
+; BE-NEXT: lxvd2x v9, 0, r3
+; BE-NEXT: li r3, 480
+; BE-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 464
+; BE-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 448
+; BE-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 432
+; BE-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 416
+; BE-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 400
+; BE-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 384
+; BE-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 368
+; BE-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 352
+; BE-NEXT: lxvd2x v23, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 336
+; BE-NEXT: lxvd2x v22, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 320
+; BE-NEXT: lxvd2x v21, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: li r3, 304
+; BE-NEXT: lxvd2x v20, r1, r3 # 16-byte Folded Reload
+; BE-NEXT: addi r1, r1, 496
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: lrint_v16i64_v16f128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mflr r0
+; CHECK-NEXT: stdu r1, -304(r1)
+; CHECK-NEXT: li r3, 112
+; CHECK-NEXT: std r0, 320(r1)
+; CHECK-NEXT: stxvd2x v20, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 128
+; CHECK-NEXT: stxvd2x v21, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 144
+; CHECK-NEXT: vmr v21, v4
+; CHECK-NEXT: stxvd2x v22, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 160
+; CHECK-NEXT: vmr v22, v6
+; CHECK-NEXT: stxvd2x v23, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 176
+; CHECK-NEXT: vmr v23, v8
+; CHECK-NEXT: stxvd2x v24, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 192
+; CHECK-NEXT: vmr v24, v9
+; CHECK-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 208
+; CHECK-NEXT: vmr v25, v7
+; CHECK-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 224
+; CHECK-NEXT: vmr v26, v10
+; CHECK-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 240
+; CHECK-NEXT: vmr v27, v5
+; CHECK-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 256
+; CHECK-NEXT: vmr v28, v11
+; CHECK-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 272
+; CHECK-NEXT: vmr v29, v12
+; CHECK-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 288
+; CHECK-NEXT: vmr v30, v3
+; CHECK-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: stxvd2x v13, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: addi r3, r1, 576
+; CHECK-NEXT: lxvd2x vs0, 0, r3
+; CHECK-NEXT: addi r3, r1, 560
+; CHECK-NEXT: lxvd2x vs1, 0, r3
+; CHECK-NEXT: addi r3, r1, 544
+; CHECK-NEXT: lxvd2x vs2, 0, r3
+; CHECK-NEXT: li r3, 96
+; CHECK-NEXT: xxswapd vs0, vs0
+; CHECK-NEXT: stxvd2x vs0, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 80
+; CHECK-NEXT: xxswapd vs0, vs1
+; CHECK-NEXT: stxvd2x vs0, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: xxswapd vs0, vs2
+; CHECK-NEXT: stxvd2x vs0, r1, r3 # 16-byte Folded Spill
+; CHECK-NEXT: addi r3, r1, 528
+; CHECK-NEXT: lxvd2x vs0, 0, r3
+; CHECK-NEXT: xxswapd v31, vs0
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v30
+; CHECK-NEXT: mtvsrd v20, r3
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: vmr v2, v21
+; CHECK-NEXT: xxmrghd v30, vs0, v20
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v27
+; CHECK-NEXT: mtvsrd v21, r3
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: vmr v2, v22
+; CHECK-NEXT: xxmrghd v27, vs0, v21
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v25
+; CHECK-NEXT: mtvsrd v22, r3
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: vmr v2, v23
+; CHECK-NEXT: xxmrghd v25, vs0, v22
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v24
+; CHECK-NEXT: mtvsrd v23, r3
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: vmr v2, v26
+; CHECK-NEXT: xxmrghd v24, vs0, v23
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: vmr v2, v28
+; CHECK-NEXT: mtvsrd v26, r3
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: vmr v2, v29
+; CHECK-NEXT: xxmrghd v28, vs0, v26
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtvsrd v29, r3
+; CHECK-NEXT: li r3, 64
+; CHECK-NEXT: lxvd2x v2, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: vmr v2, v31
+; CHECK-NEXT: xxmrghd v29, vs0, v29
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtvsrd v31, r3
+; CHECK-NEXT: li r3, 48
+; CHECK-NEXT: lxvd2x v2, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: li r3, 80
+; CHECK-NEXT: lxvd2x v2, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: xxmrghd v31, vs0, v31
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtvsrd v26, r3
+; CHECK-NEXT: li r3, 96
+; CHECK-NEXT: lxvd2x v2, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: bl lrintf128
+; CHECK-NEXT: nop
+; CHECK-NEXT: mtfprd f0, r3
+; CHECK-NEXT: li r3, 288
+; CHECK-NEXT: vmr v8, v31
+; CHECK-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 272
+; CHECK-NEXT: vmr v2, v30
+; CHECK-NEXT: vmr v7, v29
+; CHECK-NEXT: vmr v6, v28
+; CHECK-NEXT: vmr v3, v27
+; CHECK-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 256
+; CHECK-NEXT: vmr v4, v25
+; CHECK-NEXT: vmr v5, v24
+; CHECK-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 240
+; CHECK-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 224
+; CHECK-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 208
+; CHECK-NEXT: xxmrghd v9, vs0, v26
+; CHECK-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 192
+; CHECK-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 176
+; CHECK-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 160
+; CHECK-NEXT: lxvd2x v23, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 144
+; CHECK-NEXT: lxvd2x v22, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 128
+; CHECK-NEXT: lxvd2x v21, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: li r3, 112
+; CHECK-NEXT: lxvd2x v20, r1, r3 # 16-byte Folded Reload
+; CHECK-NEXT: addi r1, r1, 304
+; CHECK-NEXT: ld r0, 16(r1)
+; CHECK-NEXT: mtlr r0
+; CHECK-NEXT: blr
+;
+; FAST-LABEL: lrint_v16i64_v16f128:
+; FAST: # %bb.0:
+; FAST-NEXT: mflr r0
+; FAST-NEXT: stdu r1, -304(r1)
+; FAST-NEXT: li r3, 112
+; FAST-NEXT: std r0, 320(r1)
+; FAST-NEXT: stxvd2x v20, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 128
+; FAST-NEXT: stxvd2x v21, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 144
+; FAST-NEXT: vmr v21, v4
+; FAST-NEXT: stxvd2x v22, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 160
+; FAST-NEXT: vmr v22, v6
+; FAST-NEXT: stxvd2x v23, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 176
+; FAST-NEXT: vmr v23, v8
+; FAST-NEXT: stxvd2x v24, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 192
+; FAST-NEXT: vmr v24, v9
+; FAST-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 208
+; FAST-NEXT: vmr v25, v7
+; FAST-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 224
+; FAST-NEXT: vmr v26, v10
+; FAST-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 240
+; FAST-NEXT: vmr v27, v5
+; FAST-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 256
+; FAST-NEXT: vmr v28, v11
+; FAST-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 272
+; FAST-NEXT: vmr v29, v12
+; FAST-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 288
+; FAST-NEXT: vmr v30, v3
+; FAST-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 64
+; FAST-NEXT: stxvd2x v13, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: addi r3, r1, 576
+; FAST-NEXT: lxvd2x vs0, 0, r3
+; FAST-NEXT: addi r3, r1, 560
+; FAST-NEXT: lxvd2x vs1, 0, r3
+; FAST-NEXT: addi r3, r1, 544
+; FAST-NEXT: lxvd2x vs2, 0, r3
+; FAST-NEXT: li r3, 96
+; FAST-NEXT: xxswapd vs0, vs0
+; FAST-NEXT: stxvd2x vs0, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 80
+; FAST-NEXT: xxswapd vs0, vs1
+; FAST-NEXT: stxvd2x vs0, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: li r3, 48
+; FAST-NEXT: xxswapd vs0, vs2
+; FAST-NEXT: stxvd2x vs0, r1, r3 # 16-byte Folded Spill
+; FAST-NEXT: addi r3, r1, 528
+; FAST-NEXT: lxvd2x vs0, 0, r3
+; FAST-NEXT: xxswapd v31, vs0
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v30
+; FAST-NEXT: mtvsrd v20, r3
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: vmr v2, v21
+; FAST-NEXT: xxmrghd v30, vs0, v20
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v27
+; FAST-NEXT: mtvsrd v21, r3
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: vmr v2, v22
+; FAST-NEXT: xxmrghd v27, vs0, v21
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v25
+; FAST-NEXT: mtvsrd v22, r3
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: vmr v2, v23
+; FAST-NEXT: xxmrghd v25, vs0, v22
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v24
+; FAST-NEXT: mtvsrd v23, r3
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: vmr v2, v26
+; FAST-NEXT: xxmrghd v24, vs0, v23
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: vmr v2, v28
+; FAST-NEXT: mtvsrd v26, r3
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: vmr v2, v29
+; FAST-NEXT: xxmrghd v28, vs0, v26
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtvsrd v29, r3
+; FAST-NEXT: li r3, 64
+; FAST-NEXT: lxvd2x v2, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: vmr v2, v31
+; FAST-NEXT: xxmrghd v29, vs0, v29
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtvsrd v31, r3
+; FAST-NEXT: li r3, 48
+; FAST-NEXT: lxvd2x v2, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: li r3, 80
+; FAST-NEXT: lxvd2x v2, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: xxmrghd v31, vs0, v31
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtvsrd v26, r3
+; FAST-NEXT: li r3, 96
+; FAST-NEXT: lxvd2x v2, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: bl lrintf128
+; FAST-NEXT: nop
+; FAST-NEXT: mtfprd f0, r3
+; FAST-NEXT: li r3, 288
+; FAST-NEXT: vmr v8, v31
+; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 272
+; FAST-NEXT: vmr v2, v30
+; FAST-NEXT: vmr v7, v29
+; FAST-NEXT: vmr v6, v28
+; FAST-NEXT: vmr v3, v27
+; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 256
+; FAST-NEXT: vmr v4, v25
+; FAST-NEXT: vmr v5, v24
+; FAST-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 240
+; FAST-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 224
+; FAST-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 208
+; FAST-NEXT: xxmrghd v9, vs0, v26
+; FAST-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 192
+; FAST-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 176
+; FAST-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 160
+; FAST-NEXT: lxvd2x v23, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 144
+; FAST-NEXT: lxvd2x v22, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 128
+; FAST-NEXT: lxvd2x v21, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: li r3, 112
+; FAST-NEXT: lxvd2x v20, r1, r3 # 16-byte Folded Reload
+; FAST-NEXT: addi r1, r1, 304
+; FAST-NEXT: ld r0, 16(r1)
+; FAST-NEXT: mtlr r0
+; FAST-NEXT: blr
+ %a = call <16 x i64> @llvm.lrint.v16i64.v16f128(<16 x fp128> %x)
+ ret <16 x i64> %a
+}
+declare <16 x i64> @llvm.lrint.v16i64.v16f128(<16 x fp128>)
diff --git a/llvm/test/CodeGen/PowerPC/vsro-vsr-vsrq-dag-combine.ll b/llvm/test/CodeGen/PowerPC/vsro-vsr-vsrq-dag-combine.ll
new file mode 100644
index 0000000..afbdae6
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/vsro-vsr-vsrq-dag-combine.ll
@@ -0,0 +1,337 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+
+; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64le-unknown-linux-gnu \
+; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWER10-LE
+
+; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64-ibm-aix-xcoff \
+; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWER10-BE
+
+; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc-ibm-aix-xcoff \
+; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWER3210-BE
+
+; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu \
+; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWER9-LE
+
+; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64-ibm-aix-xcoff \
+; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWER9-BE
+
+; Test VSRO + VSR peephole optimization to VSRQ on Power10+
+; This should combine consecutive VSRO (Vector Shift Right Octet) and VSR (Vector Shift Right)
+; instructions using the same shift amount into a single VSRQ (Vector Shift Right Quadword)
+; instruction when targeting Power10 or later processors.
+declare <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32>, <4 x i32>)
+
+define <16 x i8> @shiftright128_v16i8(<16 x i8> %in, i8 zeroext %sh) {
+; POWER10-LE-LABEL: shiftright128_v16i8:
+; POWER10-LE: # %bb.0: # %entry
+; POWER10-LE-NEXT: mtvsrd v3, r5
+; POWER10-LE-NEXT: vspltb v3, v3, 7
+; POWER10-LE-NEXT: vsrq v2, v2, v3
+; POWER10-LE-NEXT: blr
+;
+; POWER10-BE-LABEL: shiftright128_v16i8:
+; POWER10-BE: # %bb.0: # %entry
+; POWER10-BE-NEXT: mtvsrwz v3, r3
+; POWER10-BE-NEXT: vspltb v3, v3, 7
+; POWER10-BE-NEXT: vsrq v2, v2, v3
+; POWER10-BE-NEXT: blr
+;
+; POWER3210-BE-LABEL: shiftright128_v16i8:
+; POWER3210-BE: # %bb.0: # %entry
+; POWER3210-BE-NEXT: mtvsrwz v3, r3
+; POWER3210-BE-NEXT: vspltb v3, v3, 7
+; POWER3210-BE-NEXT: vsrq v2, v2, v3
+; POWER3210-BE-NEXT: blr
+;
+; POWER9-LE-LABEL: shiftright128_v16i8:
+; POWER9-LE: # %bb.0: # %entry
+; POWER9-LE-NEXT: mtvsrd v3, r5
+; POWER9-LE-NEXT: vspltb v3, v3, 7
+; POWER9-LE-NEXT: vsro v2, v2, v3
+; POWER9-LE-NEXT: vsr v2, v2, v3
+; POWER9-LE-NEXT: blr
+;
+; POWER9-BE-LABEL: shiftright128_v16i8:
+; POWER9-BE: # %bb.0: # %entry
+; POWER9-BE-NEXT: mtvsrwz v3, r3
+; POWER9-BE-NEXT: vspltb v3, v3, 7
+; POWER9-BE-NEXT: vsro v2, v2, v3
+; POWER9-BE-NEXT: vsr v2, v2, v3
+; POWER9-BE-NEXT: blr
+entry:
+ %splat.splatinsert.i = insertelement <16 x i8> poison, i8 %sh, i64 0
+ %splat.splat.i = shufflevector <16 x i8> %splat.splatinsert.i, <16 x i8> poison, <16 x i32> zeroinitializer
+ %0 = bitcast <16 x i8> %in to <4 x i32>
+ %1 = bitcast <16 x i8> %splat.splat.i to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %0, <4 x i32> %1)
+ %3 = tail call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %2, <4 x i32> %1)
+ %4 = bitcast <4 x i32> %3 to <16 x i8>
+ ret <16 x i8> %4
+}
+
+define <4 x i32> @shiftright128_v4i32(<4 x i32> %in, i8 zeroext %sh) {
+; POWER10-LE-LABEL: shiftright128_v4i32:
+; POWER10-LE: # %bb.0: # %entry
+; POWER10-LE-NEXT: mtvsrd v3, r5
+; POWER10-LE-NEXT: vspltb v3, v3, 7
+; POWER10-LE-NEXT: vsrq v2, v2, v3
+; POWER10-LE-NEXT: blr
+;
+; POWER10-BE-LABEL: shiftright128_v4i32:
+; POWER10-BE: # %bb.0: # %entry
+; POWER10-BE-NEXT: mtvsrwz v3, r3
+; POWER10-BE-NEXT: vspltb v3, v3, 7
+; POWER10-BE-NEXT: vsrq v2, v2, v3
+; POWER10-BE-NEXT: blr
+;
+; POWER3210-BE-LABEL: shiftright128_v4i32:
+; POWER3210-BE: # %bb.0: # %entry
+; POWER3210-BE-NEXT: mtvsrwz v3, r3
+; POWER3210-BE-NEXT: vspltb v3, v3, 7
+; POWER3210-BE-NEXT: vsrq v2, v2, v3
+; POWER3210-BE-NEXT: blr
+;
+; POWER9-LE-LABEL: shiftright128_v4i32:
+; POWER9-LE: # %bb.0: # %entry
+; POWER9-LE-NEXT: mtvsrd v3, r5
+; POWER9-LE-NEXT: vspltb v3, v3, 7
+; POWER9-LE-NEXT: vsro v2, v2, v3
+; POWER9-LE-NEXT: vsr v2, v2, v3
+; POWER9-LE-NEXT: blr
+;
+; POWER9-BE-LABEL: shiftright128_v4i32:
+; POWER9-BE: # %bb.0: # %entry
+; POWER9-BE-NEXT: mtvsrwz v3, r3
+; POWER9-BE-NEXT: vspltb v3, v3, 7
+; POWER9-BE-NEXT: vsro v2, v2, v3
+; POWER9-BE-NEXT: vsr v2, v2, v3
+; POWER9-BE-NEXT: blr
+entry:
+ %splat.splatinsert.i = insertelement <16 x i8> poison, i8 %sh, i64 0
+ %splat.splat.i = shufflevector <16 x i8> %splat.splatinsert.i, <16 x i8> poison, <16 x i32> zeroinitializer
+ %0 = bitcast <16 x i8> %splat.splat.i to <4 x i32>
+ %1 = tail call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %in, <4 x i32> %0)
+ %2 = tail call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %1, <4 x i32> %0)
+ ret <4 x i32> %2
+}
+
+define <2 x i64> @shiftright128_v2i64(<2 x i64> %in, i8 zeroext %sh) {
+; POWER10-LE-LABEL: shiftright128_v2i64:
+; POWER10-LE: # %bb.0: # %entry
+; POWER10-LE-NEXT: mtvsrd v3, r5
+; POWER10-LE-NEXT: vspltb v3, v3, 7
+; POWER10-LE-NEXT: vsrq v2, v2, v3
+; POWER10-LE-NEXT: blr
+;
+; POWER10-BE-LABEL: shiftright128_v2i64:
+; POWER10-BE: # %bb.0: # %entry
+; POWER10-BE-NEXT: mtvsrwz v3, r3
+; POWER10-BE-NEXT: vspltb v3, v3, 7
+; POWER10-BE-NEXT: vsrq v2, v2, v3
+; POWER10-BE-NEXT: blr
+;
+; POWER3210-BE-LABEL: shiftright128_v2i64:
+; POWER3210-BE: # %bb.0: # %entry
+; POWER3210-BE-NEXT: mtvsrwz v3, r3
+; POWER3210-BE-NEXT: vspltb v3, v3, 7
+; POWER3210-BE-NEXT: vsrq v2, v2, v3
+; POWER3210-BE-NEXT: blr
+;
+; POWER9-LE-LABEL: shiftright128_v2i64:
+; POWER9-LE: # %bb.0: # %entry
+; POWER9-LE-NEXT: mtvsrd v3, r5
+; POWER9-LE-NEXT: vspltb v3, v3, 7
+; POWER9-LE-NEXT: vsro v2, v2, v3
+; POWER9-LE-NEXT: vsr v2, v2, v3
+; POWER9-LE-NEXT: blr
+;
+; POWER9-BE-LABEL: shiftright128_v2i64:
+; POWER9-BE: # %bb.0: # %entry
+; POWER9-BE-NEXT: mtvsrwz v3, r3
+; POWER9-BE-NEXT: vspltb v3, v3, 7
+; POWER9-BE-NEXT: vsro v2, v2, v3
+; POWER9-BE-NEXT: vsr v2, v2, v3
+; POWER9-BE-NEXT: blr
+entry:
+ %splat.splatinsert.i = insertelement <16 x i8> poison, i8 %sh, i64 0
+ %splat.splat.i = shufflevector <16 x i8> %splat.splatinsert.i, <16 x i8> poison, <16 x i32> zeroinitializer
+ %0 = bitcast <2 x i64> %in to <4 x i32>
+ %1 = bitcast <16 x i8> %splat.splat.i to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %0, <4 x i32> %1)
+ %3 = tail call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %2, <4 x i32> %1)
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+define <8 x i16> @shiftright128_v8i16(<8 x i16> %in, i8 zeroext %sh) {
+; POWER10-LE-LABEL: shiftright128_v8i16:
+; POWER10-LE: # %bb.0: # %entry
+; POWER10-LE-NEXT: mtvsrd v3, r5
+; POWER10-LE-NEXT: vspltb v3, v3, 7
+; POWER10-LE-NEXT: vsrq v2, v2, v3
+; POWER10-LE-NEXT: blr
+;
+; POWER10-BE-LABEL: shiftright128_v8i16:
+; POWER10-BE: # %bb.0: # %entry
+; POWER10-BE-NEXT: mtvsrwz v3, r3
+; POWER10-BE-NEXT: vspltb v3, v3, 7
+; POWER10-BE-NEXT: vsrq v2, v2, v3
+; POWER10-BE-NEXT: blr
+;
+; POWER3210-BE-LABEL: shiftright128_v8i16:
+; POWER3210-BE: # %bb.0: # %entry
+; POWER3210-BE-NEXT: mtvsrwz v3, r3
+; POWER3210-BE-NEXT: vspltb v3, v3, 7
+; POWER3210-BE-NEXT: vsrq v2, v2, v3
+; POWER3210-BE-NEXT: blr
+;
+; POWER9-LE-LABEL: shiftright128_v8i16:
+; POWER9-LE: # %bb.0: # %entry
+; POWER9-LE-NEXT: mtvsrd v3, r5
+; POWER9-LE-NEXT: vspltb v3, v3, 7
+; POWER9-LE-NEXT: vsro v2, v2, v3
+; POWER9-LE-NEXT: vsr v2, v2, v3
+; POWER9-LE-NEXT: blr
+;
+; POWER9-BE-LABEL: shiftright128_v8i16:
+; POWER9-BE: # %bb.0: # %entry
+; POWER9-BE-NEXT: mtvsrwz v3, r3
+; POWER9-BE-NEXT: vspltb v3, v3, 7
+; POWER9-BE-NEXT: vsro v2, v2, v3
+; POWER9-BE-NEXT: vsr v2, v2, v3
+; POWER9-BE-NEXT: blr
+entry:
+ %splat.splatinsert.i = insertelement <16 x i8> poison, i8 %sh, i64 0
+ %splat.splat.i = shufflevector <16 x i8> %splat.splatinsert.i, <16 x i8> poison, <16 x i32> zeroinitializer
+ %0 = bitcast <8 x i16> %in to <4 x i32>
+ %1 = bitcast <16 x i8> %splat.splat.i to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %0, <4 x i32> %1)
+ %3 = tail call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %2, <4 x i32> %1)
+ %4 = bitcast <4 x i32> %3 to <8 x i16>
+ ret <8 x i16> %4
+}
+
+; Test case with different vectors (should not optimize - different shift amount registers)
+define <16 x i8> @no_optimization_different_shifts(<16 x i8> %in, i8 zeroext %sh1, i8 zeroext %sh2) {
+; POWER10-LE-LABEL: no_optimization_different_shifts:
+; POWER10-LE: # %bb.0: # %entry
+; POWER10-LE-NEXT: mtvsrd v3, r5
+; POWER10-LE-NEXT: mtvsrd v4, r6
+; POWER10-LE-NEXT: vspltb v3, v3, 7
+; POWER10-LE-NEXT: vspltb v4, v4, 7
+; POWER10-LE-NEXT: vsro v2, v2, v3
+; POWER10-LE-NEXT: vsr v2, v2, v4
+; POWER10-LE-NEXT: blr
+;
+; POWER10-BE-LABEL: no_optimization_different_shifts:
+; POWER10-BE: # %bb.0: # %entry
+; POWER10-BE-NEXT: mtvsrwz v3, r3
+; POWER10-BE-NEXT: mtvsrwz v4, r4
+; POWER10-BE-NEXT: vspltb v3, v3, 7
+; POWER10-BE-NEXT: vspltb v4, v4, 7
+; POWER10-BE-NEXT: vsro v2, v2, v3
+; POWER10-BE-NEXT: vsr v2, v2, v4
+; POWER10-BE-NEXT: blr
+;
+; POWER3210-BE-LABEL: no_optimization_different_shifts:
+; POWER3210-BE: # %bb.0: # %entry
+; POWER3210-BE-NEXT: mtvsrwz v3, r3
+; POWER3210-BE-NEXT: mtvsrwz v4, r4
+; POWER3210-BE-NEXT: vspltb v3, v3, 7
+; POWER3210-BE-NEXT: vspltb v4, v4, 7
+; POWER3210-BE-NEXT: vsro v2, v2, v3
+; POWER3210-BE-NEXT: vsr v2, v2, v4
+; POWER3210-BE-NEXT: blr
+;
+; POWER9-LE-LABEL: no_optimization_different_shifts:
+; POWER9-LE: # %bb.0: # %entry
+; POWER9-LE-NEXT: mtvsrd v3, r5
+; POWER9-LE-NEXT: mtvsrd v4, r6
+; POWER9-LE-NEXT: vspltb v3, v3, 7
+; POWER9-LE-NEXT: vspltb v4, v4, 7
+; POWER9-LE-NEXT: vsro v2, v2, v3
+; POWER9-LE-NEXT: vsr v2, v2, v4
+; POWER9-LE-NEXT: blr
+;
+; POWER9-BE-LABEL: no_optimization_different_shifts:
+; POWER9-BE: # %bb.0: # %entry
+; POWER9-BE-NEXT: mtvsrwz v3, r3
+; POWER9-BE-NEXT: mtvsrwz v4, r4
+; POWER9-BE-NEXT: vspltb v3, v3, 7
+; POWER9-BE-NEXT: vspltb v4, v4, 7
+; POWER9-BE-NEXT: vsro v2, v2, v3
+; POWER9-BE-NEXT: vsr v2, v2, v4
+; POWER9-BE-NEXT: blr
+entry:
+ %splat.splatinsert.i = insertelement <16 x i8> poison, i8 %sh1, i64 0
+ %splat.splat.i = shufflevector <16 x i8> %splat.splatinsert.i, <16 x i8> poison, <16 x i32> zeroinitializer
+ %splat.splatinsert.i2 = insertelement <16 x i8> poison, i8 %sh2, i64 0
+ %splat.splat.i2 = shufflevector <16 x i8> %splat.splatinsert.i2, <16 x i8> poison, <16 x i32> zeroinitializer
+ %0 = bitcast <16 x i8> %in to <4 x i32>
+ %1 = bitcast <16 x i8> %splat.splat.i to <4 x i32>
+ %2 = bitcast <16 x i8> %splat.splat.i2 to <4 x i32>
+ %3 = tail call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %0, <4 x i32> %1)
+ %4 = tail call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %3, <4 x i32> %2)
+ %5 = bitcast <4 x i32> %4 to <16 x i8>
+ ret <16 x i8> %5
+}
+
+; Test case with multiple uses of VSRO result (should not optimize)
+define <16 x i8> @no_optimization_multiple_uses(<16 x i8> %in, i8 zeroext %sh) {
+; POWER10-LE-LABEL: no_optimization_multiple_uses:
+; POWER10-LE: # %bb.0: # %entry
+; POWER10-LE-NEXT: mtvsrd v3, r5
+; POWER10-LE-NEXT: vspltb v3, v3, 7
+; POWER10-LE-NEXT: vsro v2, v2, v3
+; POWER10-LE-NEXT: vsr v3, v2, v3
+; POWER10-LE-NEXT: vaddubm v2, v2, v3
+; POWER10-LE-NEXT: blr
+;
+; POWER10-BE-LABEL: no_optimization_multiple_uses:
+; POWER10-BE: # %bb.0: # %entry
+; POWER10-BE-NEXT: mtvsrwz v3, r3
+; POWER10-BE-NEXT: vspltb v3, v3, 7
+; POWER10-BE-NEXT: vsro v2, v2, v3
+; POWER10-BE-NEXT: vsr v3, v2, v3
+; POWER10-BE-NEXT: vaddubm v2, v2, v3
+; POWER10-BE-NEXT: blr
+;
+; POWER3210-BE-LABEL: no_optimization_multiple_uses:
+; POWER3210-BE: # %bb.0: # %entry
+; POWER3210-BE-NEXT: mtvsrwz v3, r3
+; POWER3210-BE-NEXT: vspltb v3, v3, 7
+; POWER3210-BE-NEXT: vsro v2, v2, v3
+; POWER3210-BE-NEXT: vsr v3, v2, v3
+; POWER3210-BE-NEXT: vaddubm v2, v2, v3
+; POWER3210-BE-NEXT: blr
+;
+; POWER9-LE-LABEL: no_optimization_multiple_uses:
+; POWER9-LE: # %bb.0: # %entry
+; POWER9-LE-NEXT: mtvsrd v3, r5
+; POWER9-LE-NEXT: vspltb v3, v3, 7
+; POWER9-LE-NEXT: vsro v2, v2, v3
+; POWER9-LE-NEXT: vsr v3, v2, v3
+; POWER9-LE-NEXT: vaddubm v2, v2, v3
+; POWER9-LE-NEXT: blr
+;
+; POWER9-BE-LABEL: no_optimization_multiple_uses:
+; POWER9-BE: # %bb.0: # %entry
+; POWER9-BE-NEXT: mtvsrwz v3, r3
+; POWER9-BE-NEXT: vspltb v3, v3, 7
+; POWER9-BE-NEXT: vsro v2, v2, v3
+; POWER9-BE-NEXT: vsr v3, v2, v3
+; POWER9-BE-NEXT: vaddubm v2, v2, v3
+; POWER9-BE-NEXT: blr
+entry:
+ %splat.splatinsert.i = insertelement <16 x i8> poison, i8 %sh, i64 0
+ %splat.splat.i = shufflevector <16 x i8> %splat.splatinsert.i, <16 x i8> poison, <16 x i32> zeroinitializer
+ %0 = bitcast <16 x i8> %in to <4 x i32>
+ %1 = bitcast <16 x i8> %splat.splat.i to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.ppc.altivec.vsro(<4 x i32> %0, <4 x i32> %1)
+ %3 = tail call <4 x i32> @llvm.ppc.altivec.vsr(<4 x i32> %2, <4 x i32> %1)
+ %4 = bitcast <4 x i32> %3 to <16 x i8>
+ %5 = bitcast <4 x i32> %2 to <16 x i8>
+ %6 = add <16 x i8> %5, %4
+ ret <16 x i8> %6
+}
diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-b.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-b.ll
index c366fd5..a51e392 100644
--- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-b.ll
+++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-b.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; Test file to verify the emission of Vector Selection instructions when ternary operators are used.
+; Test file to verify the emission of Vector Evaluation instructions when ternary operators are used.
; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64le-unknown-unknown \
; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s
@@ -15,10 +15,9 @@ define <4 x i32> @ternary_A_and_BC_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_and_BC_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 49
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -31,11 +30,10 @@ define <2 x i64> @ternary_A_and_BC_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_and_BC_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 49
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -48,10 +46,9 @@ define <16 x i8> @ternary_A_and_BC_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_and_BC_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 49
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -64,10 +61,9 @@ define <8 x i16> @ternary_A_and_BC_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_and_BC_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 49
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
@@ -80,10 +76,9 @@ define <4 x i32> @ternary_A_nor_BC_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_nor_BC_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 56
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -97,11 +92,10 @@ define <2 x i64> @ternary_A_nor_BC_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_nor_BC_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 56
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -115,10 +109,9 @@ define <16 x i8> @ternary_A_nor_BC_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_nor_BC_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 56
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -132,10 +125,9 @@ define <8 x i16> @ternary_A_nor_BC_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_nor_BC_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 56
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -149,10 +141,9 @@ define <4 x i32> @ternary_A_eqv_BC_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_eqv_BC_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 57
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -166,11 +157,10 @@ define <2 x i64> @ternary_A_eqv_BC_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_eqv_BC_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 57
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -184,10 +174,9 @@ define <16 x i8> @ternary_A_eqv_BC_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_eqv_BC_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 57
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -201,10 +190,9 @@ define <8 x i16> @ternary_A_eqv_BC_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_eqv_BC_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 57
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -218,10 +206,9 @@ define <4 x i32> @ternary_A_nand_BC_B_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32>
; CHECK-LABEL: ternary_A_nand_BC_B_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 62
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -235,11 +222,10 @@ define <2 x i64> @ternary_A_nand_BC_B_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64>
; CHECK-LABEL: ternary_A_nand_BC_B_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 62
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -253,10 +239,9 @@ define <16 x i8> @ternary_A_nand_BC_B_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_nand_BC_B_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 62
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -270,10 +255,9 @@ define <8 x i16> @ternary_A_nand_BC_B_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16>
; CHECK-LABEL: ternary_A_nand_BC_B_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v3, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 62
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
diff --git a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-c.ll b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-c.ll
index f70f1d0..54bf6c0 100644
--- a/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-c.ll
+++ b/llvm/test/CodeGen/PowerPC/xxeval-vselect-x-c.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; Test file to verify the emission of Vector Selection instructions when ternary operators are used.
+; Test file to verify the emission of Vector Evaluation instructions when ternary operators are used.
; RUN: llc -verify-machineinstrs -mcpu=pwr10 -mtriple=powerpc64le-unknown-unknown \
; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s
@@ -15,10 +15,9 @@ define <4 x i32> @ternary_A_and_BC_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_and_BC_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 81
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -31,11 +30,10 @@ define <2 x i64> @ternary_A_and_BC_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_and_BC_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 81
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -48,10 +46,9 @@ define <16 x i8> @ternary_A_and_BC_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_and_BC_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 81
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -64,10 +61,9 @@ define <8 x i16> @ternary_A_and_BC_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_and_BC_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxland vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 81
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C
@@ -80,10 +76,9 @@ define <4 x i32> @ternary_A_nor_BC_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_nor_BC_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 88
; CHECK-NEXT: blr
entry:
%or = or <4 x i32> %B, %C
@@ -97,11 +92,10 @@ define <2 x i64> @ternary_A_nor_BC_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_nor_BC_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 88
; CHECK-NEXT: blr
entry:
%or = or <2 x i64> %B, %C
@@ -115,10 +109,9 @@ define <16 x i8> @ternary_A_nor_BC_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_nor_BC_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 88
; CHECK-NEXT: blr
entry:
%or = or <16 x i8> %B, %C
@@ -132,10 +125,9 @@ define <8 x i16> @ternary_A_nor_BC_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_nor_BC_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnor vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 88
; CHECK-NEXT: blr
entry:
%or = or <8 x i16> %B, %C
@@ -149,10 +141,9 @@ define <4 x i32> @ternary_A_eqv_BC_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32> %
; CHECK-LABEL: ternary_A_eqv_BC_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 89
; CHECK-NEXT: blr
entry:
%xor = xor <4 x i32> %B, %C
@@ -166,11 +157,10 @@ define <2 x i64> @ternary_A_eqv_BC_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64> %
; CHECK-LABEL: ternary_A_eqv_BC_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 89
; CHECK-NEXT: blr
entry:
%xor = xor <2 x i64> %B, %C
@@ -184,10 +174,9 @@ define <16 x i8> @ternary_A_eqv_BC_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_eqv_BC_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 89
; CHECK-NEXT: blr
entry:
%xor = xor <16 x i8> %B, %C
@@ -201,10 +190,9 @@ define <8 x i16> @ternary_A_eqv_BC_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16> %
; CHECK-LABEL: ternary_A_eqv_BC_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxleqv vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 89
; CHECK-NEXT: blr
entry:
%xor = xor <8 x i16> %B, %C
@@ -218,10 +206,9 @@ define <4 x i32> @ternary_A_nand_BC_C_4x32(<4 x i1> %A, <4 x i32> %B, <4 x i32>
; CHECK-LABEL: ternary_A_nand_BC_C_4x32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxleqv v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslw v2, v2, v5
; CHECK-NEXT: vsraw v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 94
; CHECK-NEXT: blr
entry:
%and = and <4 x i32> %B, %C
@@ -235,11 +222,10 @@ define <2 x i64> @ternary_A_nand_BC_C_2x64(<2 x i1> %A, <2 x i64> %B, <2 x i64>
; CHECK-LABEL: ternary_A_nand_BC_C_2x64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxlxor v5, v5, v5
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: xxsplti32dx v5, 1, 63
; CHECK-NEXT: vsld v2, v2, v5
; CHECK-NEXT: vsrad v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 94
; CHECK-NEXT: blr
entry:
%and = and <2 x i64> %B, %C
@@ -253,10 +239,9 @@ define <16 x i8> @ternary_A_nand_BC_C_16x8(<16 x i1> %A, <16 x i8> %B, <16 x i8>
; CHECK-LABEL: ternary_A_nand_BC_C_16x8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltib v5, 7
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslb v2, v2, v5
; CHECK-NEXT: vsrab v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 94
; CHECK-NEXT: blr
entry:
%and = and <16 x i8> %B, %C
@@ -270,10 +255,9 @@ define <8 x i16> @ternary_A_nand_BC_C_8x16(<8 x i1> %A, <8 x i16> %B, <8 x i16>
; CHECK-LABEL: ternary_A_nand_BC_C_8x16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xxspltiw v5, 983055
-; CHECK-NEXT: xxlnand vs0, v3, v4
; CHECK-NEXT: vslh v2, v2, v5
; CHECK-NEXT: vsrah v2, v2, v5
-; CHECK-NEXT: xxsel v2, v4, vs0, v2
+; CHECK-NEXT: xxeval v2, v2, v3, v4, 94
; CHECK-NEXT: blr
entry:
%and = and <8 x i16> %B, %C