aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/Mips
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/Mips')
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/inline-memcpy.mir21
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir66
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir862
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir862
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir44
-rw-r--r--llvm/test/CodeGen/Mips/abiflags-soft-float.ll12
-rw-r--r--llvm/test/CodeGen/Mips/llvm.frexp.ll651
-rw-r--r--llvm/test/CodeGen/Mips/llvm.sincos.ll1044
-rw-r--r--llvm/test/CodeGen/Mips/nan_lowering.ll25
-rw-r--r--llvm/test/CodeGen/Mips/qnan.ll14
10 files changed, 2761 insertions, 840 deletions
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/inline-memcpy.mir b/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/inline-memcpy.mir
index 3d6a243..22fd220 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/inline-memcpy.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/inline-memcpy.mir
@@ -40,16 +40,17 @@ body: |
; MIPS32-LABEL: name: test_memcpy_inline
; MIPS32: liveins: $a0, $a1
- ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
- ; MIPS32: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY1]](p0) :: (load (s8) from %ir.1, align 4)
- ; MIPS32: G_STORE [[LOAD]](s8), [[COPY]](p0) :: (store (s8) into %ir.0, align 4)
- ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s32)
- ; MIPS32: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from %ir.1 + 1, basealign 4)
- ; MIPS32: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
- ; MIPS32: G_STORE [[LOAD1]](s8), [[PTR_ADD1]](p0) :: (store (s8) into %ir.0 + 1, basealign 4)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY1]](p0) :: (load (s8) from %ir.1, align 4)
+ ; MIPS32-NEXT: G_STORE [[LOAD]](s8), [[COPY]](p0) :: (store (s8) into %ir.0, align 4)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C]](s32)
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from %ir.1 + 1, basealign 4)
+ ; MIPS32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32-NEXT: G_STORE [[LOAD1]](s8), [[PTR_ADD1]](p0) :: (store (s8) into %ir.0 + 1, basealign 4)
+ ; MIPS32-NEXT: RetRA
%0:_(p0) = COPY $a0
%1:_(p0) = COPY $a1
%2:_(s64) = G_CONSTANT i64 2
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir
index ef607c1..f3c7208 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir
@@ -21,10 +21,11 @@ body: |
; MIPS32-LABEL: name: load_i32
; MIPS32: liveins: $a0
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.ptr)
- ; MIPS32: $v0 = COPY [[LOAD]](s32)
- ; MIPS32: RetRA implicit $v0
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.ptr)
+ ; MIPS32-NEXT: $v0 = COPY [[LOAD]](s32)
+ ; MIPS32-NEXT: RetRA implicit $v0
%0:_(p0) = COPY $a0
%1:_(s32) = G_LOAD %0(p0) :: (load (s32) from %ir.ptr)
$v0 = COPY %1(s32)
@@ -42,14 +43,15 @@ body: |
; MIPS32-LABEL: name: load_i64
; MIPS32: liveins: $a0
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.ptr, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from %ir.ptr + 4, basealign 8)
- ; MIPS32: $v0 = COPY [[LOAD]](s32)
- ; MIPS32: $v1 = COPY [[LOAD1]](s32)
- ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.ptr, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s32)
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from %ir.ptr + 4, basealign 8)
+ ; MIPS32-NEXT: $v0 = COPY [[LOAD]](s32)
+ ; MIPS32-NEXT: $v1 = COPY [[LOAD1]](s32)
+ ; MIPS32-NEXT: RetRA implicit $v0, implicit $v1
%0:_(p0) = COPY $a0
%1:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.ptr)
%2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %1(s64)
@@ -69,11 +71,12 @@ body: |
; MIPS32-LABEL: name: load_ambiguous_i64_in_fpr
; MIPS32: liveins: $a0, $a1
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
- ; MIPS32: [[LOAD:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY]](p0) :: (load (s64) from %ir.i64_ptr_a)
- ; MIPS32: G_STORE [[LOAD]](s64), [[COPY1]](p0) :: (store (s64) into %ir.i64_ptr_b)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY]](p0) :: (load (s64) from %ir.i64_ptr_a)
+ ; MIPS32-NEXT: G_STORE [[LOAD]](s64), [[COPY1]](p0) :: (store (s64) into %ir.i64_ptr_b)
+ ; MIPS32-NEXT: RetRA
%0:_(p0) = COPY $a0
%1:_(p0) = COPY $a1
%2:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.i64_ptr_a)
@@ -92,10 +95,11 @@ body: |
; MIPS32-LABEL: name: load_float
; MIPS32: liveins: $a0
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
- ; MIPS32: [[LOAD:%[0-9]+]]:fprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.ptr)
- ; MIPS32: $f0 = COPY [[LOAD]](s32)
- ; MIPS32: RetRA implicit $f0
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:fprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.ptr)
+ ; MIPS32-NEXT: $f0 = COPY [[LOAD]](s32)
+ ; MIPS32-NEXT: RetRA implicit $f0
%0:_(p0) = COPY $a0
%1:_(s32) = G_LOAD %0(p0) :: (load (s32) from %ir.ptr)
$f0 = COPY %1(s32)
@@ -113,11 +117,12 @@ body: |
; MIPS32-LABEL: name: load_ambiguous_float_in_gpr
; MIPS32: liveins: $a0, $a1
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.float_ptr_a)
- ; MIPS32: G_STORE [[LOAD]](s32), [[COPY1]](p0) :: (store (s32) into %ir.float_ptr_b)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.float_ptr_a)
+ ; MIPS32-NEXT: G_STORE [[LOAD]](s32), [[COPY1]](p0) :: (store (s32) into %ir.float_ptr_b)
+ ; MIPS32-NEXT: RetRA
%0:_(p0) = COPY $a0
%1:_(p0) = COPY $a1
%2:_(s32) = G_LOAD %0(p0) :: (load (s32) from %ir.float_ptr_a)
@@ -136,10 +141,11 @@ body: |
; MIPS32-LABEL: name: load_double
; MIPS32: liveins: $a0
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
- ; MIPS32: [[LOAD:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY]](p0) :: (load (s64) from %ir.ptr)
- ; MIPS32: $d0 = COPY [[LOAD]](s64)
- ; MIPS32: RetRA implicit $d0
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY]](p0) :: (load (s64) from %ir.ptr)
+ ; MIPS32-NEXT: $d0 = COPY [[LOAD]](s64)
+ ; MIPS32-NEXT: RetRA implicit $d0
%0:_(p0) = COPY $a0
%1:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.ptr)
$d0 = COPY %1(s64)
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir
index 4226f2b..c79cc00 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir
@@ -251,93 +251,117 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_ambiguous_i64_in_fpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
- ; MIPS32: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
- ; MIPS32: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+ ; MIPS32-NEXT: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
@@ -443,127 +467,151 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_i64_in_gpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY3]], [[C4]](s32)
- ; MIPS32: [[LOAD4:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD1:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD]], [[C5]](s32)
- ; MIPS32: [[LOAD6:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD1]](p0) :: (load (s32) from %ir.c, align 8)
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD2:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD1]], [[C6]](s32)
- ; MIPS32: [[LOAD8:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.c + 4, basealign 8)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD3]](s32), %bb.3, [[LOAD5]](s32), %bb.4, [[LOAD7]](s32), %bb.5
- ; MIPS32: [[PHI1:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD4]](s32), %bb.3, [[LOAD6]](s32), %bb.4, [[LOAD8]](s32), %bb.5
- ; MIPS32: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C7]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C8:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD3:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C8]](s32)
- ; MIPS32: G_STORE [[PHI1]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C9:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C9]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD9:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
- ; MIPS32: [[C10:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD4:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY3]], [[C10]](s32)
- ; MIPS32: [[LOAD10:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD11:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
- ; MIPS32: [[C11:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD5:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD]], [[C11]](s32)
- ; MIPS32: [[LOAD12:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI2:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD9]](s32), %bb.9, [[LOAD11]](s32), %bb.10
- ; MIPS32: [[PHI3:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD10]](s32), %bb.9, [[LOAD12]](s32), %bb.10
- ; MIPS32: [[C12:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C12]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C13:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD6:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C13]](s32)
- ; MIPS32: G_STORE [[PHI3]](s32), [[PTR_ADD6]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI4:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[PHI]](s32), %bb.6
- ; MIPS32: [[PHI5:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[PHI1]](s32), %bb.6
- ; MIPS32: [[PHI6:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[C]](s32), %bb.6
- ; MIPS32: [[PHI7:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[C]](s32), %bb.6
- ; MIPS32: [[C14:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C14]]
- ; MIPS32: [[SELECT:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI4]], [[PHI6]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI5]], [[PHI7]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C14]]
- ; MIPS32: [[SELECT2:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI4]]
- ; MIPS32: [[SELECT3:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT1]], [[PHI5]]
- ; MIPS32: G_STORE [[SELECT2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C15:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD7:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C15]](s32)
- ; MIPS32: G_STORE [[SELECT3]](s32), [[PTR_ADD7]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: G_STORE [[PHI4]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C16:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD8:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C16]](s32)
- ; MIPS32: G_STORE [[PHI5]](s32), [[PTR_ADD8]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[COPY3]], [[C4]](s32)
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD1:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[LOAD]], [[C5]](s32)
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD1]](p0) :: (load (s32) from %ir.c, align 8)
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD2:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[LOAD1]], [[C6]](s32)
+ ; MIPS32-NEXT: [[LOAD8:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.c + 4, basealign 8)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD3]](s32), %bb.3, [[LOAD5]](s32), %bb.4, [[LOAD7]](s32), %bb.5
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD4]](s32), %bb.3, [[LOAD6]](s32), %bb.4, [[LOAD8]](s32), %bb.5
+ ; MIPS32-NEXT: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C7]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C8:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD3:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[LOAD2]], [[C8]](s32)
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C9:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C9]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD9:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
+ ; MIPS32-NEXT: [[C10:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD4:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[COPY3]], [[C10]](s32)
+ ; MIPS32-NEXT: [[LOAD10:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD11:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
+ ; MIPS32-NEXT: [[C11:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD5:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[LOAD]], [[C11]](s32)
+ ; MIPS32-NEXT: [[LOAD12:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD9]](s32), %bb.9, [[LOAD11]](s32), %bb.10
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD10]](s32), %bb.9, [[LOAD12]](s32), %bb.10
+ ; MIPS32-NEXT: [[C12:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C12]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C13:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD6:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[LOAD2]], [[C13]](s32)
+ ; MIPS32-NEXT: G_STORE [[PHI3]](s32), [[PTR_ADD6]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI4:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[PHI]](s32), %bb.6
+ ; MIPS32-NEXT: [[PHI5:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[PHI1]](s32), %bb.6
+ ; MIPS32-NEXT: [[PHI6:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[C]](s32), %bb.6
+ ; MIPS32-NEXT: [[PHI7:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[C]](s32), %bb.6
+ ; MIPS32-NEXT: [[C14:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C14]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI4]], [[PHI6]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI5]], [[PHI7]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C14]]
+ ; MIPS32-NEXT: [[SELECT2:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI4]]
+ ; MIPS32-NEXT: [[SELECT3:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT1]], [[PHI5]]
+ ; MIPS32-NEXT: G_STORE [[SELECT2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C15:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD7:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[LOAD2]], [[C15]](s32)
+ ; MIPS32-NEXT: G_STORE [[SELECT3]](s32), [[PTR_ADD7]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: G_STORE [[PHI4]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C16:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD8:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[LOAD2]], [[C16]](s32)
+ ; MIPS32-NEXT: G_STORE [[PHI5]](s32), [[PTR_ADD8]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
@@ -671,93 +719,117 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_ambiguous_double_in_fpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
- ; MIPS32: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
- ; MIPS32: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+ ; MIPS32-NEXT: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
@@ -863,94 +935,118 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_double_in_fpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C4]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C5]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C6]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[C]](s64), %bb.6
- ; MIPS32: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C7]]
- ; MIPS32: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C7]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
- ; MIPS32: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C4]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C5]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C6]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[C]](s64), %bb.6
+ ; MIPS32-NEXT: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C7]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C7]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+ ; MIPS32-NEXT: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir
index 4226f2b..c79cc00 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir
@@ -251,93 +251,117 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_ambiguous_i64_in_fpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
- ; MIPS32: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
- ; MIPS32: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+ ; MIPS32-NEXT: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
@@ -443,127 +467,151 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_i64_in_gpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY3]], [[C4]](s32)
- ; MIPS32: [[LOAD4:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD1:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD]], [[C5]](s32)
- ; MIPS32: [[LOAD6:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD1]](p0) :: (load (s32) from %ir.c, align 8)
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD2:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD1]], [[C6]](s32)
- ; MIPS32: [[LOAD8:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.c + 4, basealign 8)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD3]](s32), %bb.3, [[LOAD5]](s32), %bb.4, [[LOAD7]](s32), %bb.5
- ; MIPS32: [[PHI1:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD4]](s32), %bb.3, [[LOAD6]](s32), %bb.4, [[LOAD8]](s32), %bb.5
- ; MIPS32: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C7]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C8:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD3:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C8]](s32)
- ; MIPS32: G_STORE [[PHI1]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C9:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C9]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD9:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
- ; MIPS32: [[C10:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD4:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY3]], [[C10]](s32)
- ; MIPS32: [[LOAD10:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD11:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
- ; MIPS32: [[C11:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD5:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD]], [[C11]](s32)
- ; MIPS32: [[LOAD12:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI2:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD9]](s32), %bb.9, [[LOAD11]](s32), %bb.10
- ; MIPS32: [[PHI3:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD10]](s32), %bb.9, [[LOAD12]](s32), %bb.10
- ; MIPS32: [[C12:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C12]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C13:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD6:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C13]](s32)
- ; MIPS32: G_STORE [[PHI3]](s32), [[PTR_ADD6]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI4:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[PHI]](s32), %bb.6
- ; MIPS32: [[PHI5:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[PHI1]](s32), %bb.6
- ; MIPS32: [[PHI6:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[C]](s32), %bb.6
- ; MIPS32: [[PHI7:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[C]](s32), %bb.6
- ; MIPS32: [[C14:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C14]]
- ; MIPS32: [[SELECT:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI4]], [[PHI6]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI5]], [[PHI7]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C14]]
- ; MIPS32: [[SELECT2:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI4]]
- ; MIPS32: [[SELECT3:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT1]], [[PHI5]]
- ; MIPS32: G_STORE [[SELECT2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C15:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD7:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C15]](s32)
- ; MIPS32: G_STORE [[SELECT3]](s32), [[PTR_ADD7]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: G_STORE [[PHI4]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
- ; MIPS32: [[C16:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD8:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C16]](s32)
- ; MIPS32: G_STORE [[PHI5]](s32), [[PTR_ADD8]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[COPY3]], [[C4]](s32)
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD1:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[LOAD]], [[C5]](s32)
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD1]](p0) :: (load (s32) from %ir.c, align 8)
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD2:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[LOAD1]], [[C6]](s32)
+ ; MIPS32-NEXT: [[LOAD8:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.c + 4, basealign 8)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD3]](s32), %bb.3, [[LOAD5]](s32), %bb.4, [[LOAD7]](s32), %bb.5
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD4]](s32), %bb.3, [[LOAD6]](s32), %bb.4, [[LOAD8]](s32), %bb.5
+ ; MIPS32-NEXT: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C7]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C8:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD3:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[LOAD2]], [[C8]](s32)
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C9:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C9]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD9:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load (s32) from %ir.a, align 8)
+ ; MIPS32-NEXT: [[C10:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD4:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[COPY3]], [[C10]](s32)
+ ; MIPS32-NEXT: [[LOAD10:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load (s32) from %ir.a + 4, basealign 8)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD11:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.b, align 8)
+ ; MIPS32-NEXT: [[C11:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD5:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[LOAD]], [[C11]](s32)
+ ; MIPS32-NEXT: [[LOAD12:%[0-9]+]]:gprb(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load (s32) from %ir.b + 4, basealign 8)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD9]](s32), %bb.9, [[LOAD11]](s32), %bb.10
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD10]](s32), %bb.9, [[LOAD12]](s32), %bb.10
+ ; MIPS32-NEXT: [[C12:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C12]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C13:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD6:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[LOAD2]], [[C13]](s32)
+ ; MIPS32-NEXT: G_STORE [[PHI3]](s32), [[PTR_ADD6]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI4:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[PHI]](s32), %bb.6
+ ; MIPS32-NEXT: [[PHI5:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[PHI1]](s32), %bb.6
+ ; MIPS32-NEXT: [[PHI6:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[C]](s32), %bb.6
+ ; MIPS32-NEXT: [[PHI7:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[C]](s32), %bb.6
+ ; MIPS32-NEXT: [[C14:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C14]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI4]], [[PHI6]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI5]], [[PHI7]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C14]]
+ ; MIPS32-NEXT: [[SELECT2:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI4]]
+ ; MIPS32-NEXT: [[SELECT3:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT1]], [[PHI5]]
+ ; MIPS32-NEXT: G_STORE [[SELECT2]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C15:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD7:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[LOAD2]], [[C15]](s32)
+ ; MIPS32-NEXT: G_STORE [[SELECT3]](s32), [[PTR_ADD7]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: G_STORE [[PHI4]](s32), [[LOAD2]](p0) :: (store (s32) into %ir.result, align 8)
+ ; MIPS32-NEXT: [[C16:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD8:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[LOAD2]], [[C16]](s32)
+ ; MIPS32-NEXT: G_STORE [[PHI5]](s32), [[PTR_ADD8]](p0) :: (store (s32) into %ir.result + 4, basealign 8)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
@@ -671,93 +719,117 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_ambiguous_double_in_fpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
- ; MIPS32: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
- ; MIPS32: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+ ; MIPS32-NEXT: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
@@ -863,94 +935,118 @@ fixedStack:
body: |
; MIPS32-LABEL: name: long_chain_double_in_fpr
; MIPS32: bb.0.entry:
- ; MIPS32: successors: %bb.8(0x40000000), %bb.1(0x40000000)
- ; MIPS32: liveins: $a0, $a1, $a2, $a3
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
- ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
- ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
- ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
- ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
- ; MIPS32: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
- ; MIPS32: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
- ; MIPS32: G_BRCOND [[AND]](s32), %bb.8
- ; MIPS32: bb.1.pre.PHI.1:
- ; MIPS32: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; MIPS32: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
- ; MIPS32: G_BRCOND [[AND1]](s32), %bb.4
- ; MIPS32: bb.2.pre.PHI.1.0:
- ; MIPS32: successors: %bb.5(0x40000000), %bb.3(0x40000000)
- ; MIPS32: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
- ; MIPS32: G_BRCOND [[AND2]](s32), %bb.5
- ; MIPS32: bb.3.b.PHI.1.0:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.4.b.PHI.1.1:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: G_BR %bb.6
- ; MIPS32: bb.5.b.PHI.1.2:
- ; MIPS32: successors: %bb.6(0x80000000)
- ; MIPS32: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
- ; MIPS32: bb.6.b.PHI.1:
- ; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
- ; MIPS32: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
- ; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C4]]
- ; MIPS32: G_BRCOND [[AND3]](s32), %bb.7
- ; MIPS32: G_BR %bb.13
- ; MIPS32: bb.7.b.PHI.1.end:
- ; MIPS32: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.8.pre.PHI.2:
- ; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C5]]
- ; MIPS32: G_BRCOND [[AND4]](s32), %bb.9
- ; MIPS32: G_BR %bb.10
- ; MIPS32: bb.9.b.PHI.2.0:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
- ; MIPS32: G_BR %bb.11
- ; MIPS32: bb.10.b.PHI.2.1:
- ; MIPS32: successors: %bb.11(0x80000000)
- ; MIPS32: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
- ; MIPS32: bb.11.b.PHI.2:
- ; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
- ; MIPS32: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
- ; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C6]]
- ; MIPS32: G_BRCOND [[AND5]](s32), %bb.13
- ; MIPS32: bb.12.b.PHI.2.end:
- ; MIPS32: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
- ; MIPS32: bb.13.b.PHI.3:
- ; MIPS32: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
- ; MIPS32: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[C]](s64), %bb.6
- ; MIPS32: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
- ; MIPS32: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
- ; MIPS32: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C7]]
- ; MIPS32: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
- ; MIPS32: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
- ; MIPS32: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C7]]
- ; MIPS32: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
- ; MIPS32: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: successors: %bb.8(0x40000000), %bb.1(0x40000000)
+ ; MIPS32-NEXT: liveins: $a0, $a1, $a2, $a3
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+ ; MIPS32-NEXT: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; MIPS32-NEXT: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load (p0) from %fixed-stack.0, align 8)
+ ; MIPS32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; MIPS32-NEXT: [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (p0) from %fixed-stack.1)
+ ; MIPS32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+ ; MIPS32-NEXT: [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (p0) from %fixed-stack.2, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
+ ; MIPS32-NEXT: G_BRCOND [[AND]](s32), %bb.8
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.1.pre.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
+ ; MIPS32-NEXT: G_BRCOND [[AND1]](s32), %bb.4
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.2.pre.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.5(0x40000000), %bb.3(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
+ ; MIPS32-NEXT: G_BRCOND [[AND2]](s32), %bb.5
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.3.b.PHI.1.0:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.4.b.PHI.1.1:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: G_BR %bb.6
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.5.b.PHI.1.2:
+ ; MIPS32-NEXT: successors: %bb.6(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load (s64) from %ir.c)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.6.b.PHI.1:
+ ; MIPS32-NEXT: successors: %bb.7(0x40000000), %bb.13(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C4]]
+ ; MIPS32-NEXT: G_BRCOND [[AND3]](s32), %bb.7
+ ; MIPS32-NEXT: G_BR %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.7.b.PHI.1.end:
+ ; MIPS32-NEXT: G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.8.pre.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C5]]
+ ; MIPS32-NEXT: G_BRCOND [[AND4]](s32), %bb.9
+ ; MIPS32-NEXT: G_BR %bb.10
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.9.b.PHI.2.0:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load (s64) from %ir.a)
+ ; MIPS32-NEXT: G_BR %bb.11
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.10.b.PHI.2.1:
+ ; MIPS32-NEXT: successors: %bb.11(0x80000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load (s64) from %ir.b)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.11.b.PHI.2:
+ ; MIPS32-NEXT: successors: %bb.13(0x40000000), %bb.12(0x40000000)
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C6]]
+ ; MIPS32-NEXT: G_BRCOND [[AND5]](s32), %bb.13
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.12.b.PHI.2.end:
+ ; MIPS32-NEXT: G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: bb.13.b.PHI.3:
+ ; MIPS32-NEXT: [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+ ; MIPS32-NEXT: [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[C]](s64), %bb.6
+ ; MIPS32-NEXT: [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C7]]
+ ; MIPS32-NEXT: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+ ; MIPS32-NEXT: [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C7]]
+ ; MIPS32-NEXT: [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+ ; MIPS32-NEXT: G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store (s64) into %ir.result)
+ ; MIPS32-NEXT: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir
index 80bf04a..e48e8e2 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir
@@ -19,10 +19,11 @@ body: |
; MIPS32-LABEL: name: store_i32
; MIPS32: liveins: $a0, $a1
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
- ; MIPS32: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32) into %ir.ptr)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+ ; MIPS32-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32) into %ir.ptr)
+ ; MIPS32-NEXT: RetRA
%0:_(s32) = COPY $a0
%1:_(p0) = COPY $a1
G_STORE %0(s32), %1(p0) :: (store (s32) into %ir.ptr)
@@ -40,14 +41,15 @@ body: |
; MIPS32-LABEL: name: store_i64
; MIPS32: liveins: $a0, $a1, $a2
- ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
- ; MIPS32: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
- ; MIPS32: G_STORE [[COPY]](s32), [[COPY2]](p0) :: (store (s32) into %ir.ptr, align 8)
- ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
- ; MIPS32: [[PTR_ADD:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY2]], [[C]](s32)
- ; MIPS32: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store (s32) into %ir.ptr + 4, basealign 8)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+ ; MIPS32-NEXT: G_STORE [[COPY]](s32), [[COPY2]](p0) :: (store (s32) into %ir.ptr, align 8)
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[PTR_ADD:%[0-9]+]]:gprb(p0) = nuw G_PTR_ADD [[COPY2]], [[C]](s32)
+ ; MIPS32-NEXT: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store (s32) into %ir.ptr + 4, basealign 8)
+ ; MIPS32-NEXT: RetRA
%2:_(s32) = COPY $a0
%3:_(s32) = COPY $a1
%0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
@@ -67,10 +69,11 @@ body: |
; MIPS32-LABEL: name: store_float
; MIPS32: liveins: $a1, $f12
- ; MIPS32: [[COPY:%[0-9]+]]:fprb(s32) = COPY $f12
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
- ; MIPS32: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32) into %ir.ptr)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:fprb(s32) = COPY $f12
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+ ; MIPS32-NEXT: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store (s32) into %ir.ptr)
+ ; MIPS32-NEXT: RetRA
%0:_(s32) = COPY $f12
%1:_(p0) = COPY $a1
G_STORE %0(s32), %1(p0) :: (store (s32) into %ir.ptr)
@@ -88,10 +91,11 @@ body: |
; MIPS32-LABEL: name: store_double
; MIPS32: liveins: $a2, $d6
- ; MIPS32: [[COPY:%[0-9]+]]:fprb(s64) = COPY $d6
- ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a2
- ; MIPS32: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64) into %ir.ptr)
- ; MIPS32: RetRA
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:fprb(s64) = COPY $d6
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a2
+ ; MIPS32-NEXT: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64) into %ir.ptr)
+ ; MIPS32-NEXT: RetRA
%0:_(s64) = COPY $d6
%1:_(p0) = COPY $a2
G_STORE %0(s64), %1(p0) :: (store (s64) into %ir.ptr)
diff --git a/llvm/test/CodeGen/Mips/abiflags-soft-float.ll b/llvm/test/CodeGen/Mips/abiflags-soft-float.ll
new file mode 100644
index 0000000..01821f2
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/abiflags-soft-float.ll
@@ -0,0 +1,12 @@
+; RUN: llc -filetype=obj -mtriple mipsel-unknown-linux -mcpu=mips32 %s -o tmp.o
+; RUN: llvm-readobj -A tmp.o | FileCheck %s -check-prefix=OBJ
+; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 %s -o - | \
+; RUN: FileCheck %s -check-prefix=ASM
+
+; OBJ: FP ABI: Soft float
+; ASM: .module softfloat
+
+define dso_local void @asm_is_null() "use-soft-float"="true" {
+ call void asm sideeffect "", ""()
+ ret void
+}
diff --git a/llvm/test/CodeGen/Mips/llvm.frexp.ll b/llvm/test/CodeGen/Mips/llvm.frexp.ll
new file mode 100644
index 0000000..3226766
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/llvm.frexp.ll
@@ -0,0 +1,651 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=mipsel < %s | FileCheck -check-prefix=MIPSEL %s
+; RUN: llc -mtriple=mips < %s | FileCheck %s -check-prefixes=SOFT-FLOAT-32
+; RUN: llc -mtriple=mips64 < %s | FileCheck %s -check-prefixes=SOFT-FLOAT-64
+
+define { half, i32 } @test_frexp_f16_i32(half %a) nounwind {
+; MIPSEL-LABEL: test_frexp_f16_i32:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -24
+; MIPSEL-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: jal __extendhfsf2
+; MIPSEL-NEXT: nop
+; MIPSEL-NEXT: addiu $5, $sp, 16
+; MIPSEL-NEXT: jal frexpf
+; MIPSEL-NEXT: mov.s $f12, $f0
+; MIPSEL-NEXT: jal __truncsfhf2
+; MIPSEL-NEXT: mov.s $f12, $f0
+; MIPSEL-NEXT: lw $3, 16($sp)
+; MIPSEL-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 24
+;
+; SOFT-FLOAT-32-LABEL: test_frexp_f16_i32:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -24
+; SOFT-FLOAT-32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-32-NEXT: nop
+; SOFT-FLOAT-32-NEXT: addiu $5, $sp, 16
+; SOFT-FLOAT-32-NEXT: jal frexpf
+; SOFT-FLOAT-32-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-32-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-32-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-32-NEXT: lw $3, 16($sp)
+; SOFT-FLOAT-32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 24
+;
+; SOFT-FLOAT-64-LABEL: test_frexp_f16_i32:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -16
+; SOFT-FLOAT-64-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-64-NEXT: sll $4, $4, 0
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 4
+; SOFT-FLOAT-64-NEXT: jal frexpf
+; SOFT-FLOAT-64-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-64-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-64-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-64-NEXT: lw $3, 4($sp)
+; SOFT-FLOAT-64-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 16
+%result = call { half, i32 } @llvm.frexp.f16.i32(half %a)
+ ret { half, i32 } %result
+}
+
+define { <2 x half>, <2 x i32> } @test_frexp_v2f16_v2i32(<2 x half> %a) nounwind {
+; MIPSEL-LABEL: test_frexp_v2f16_v2i32:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -32
+; MIPSEL-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $18, 24($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $17, 20($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $16, 16($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: move $16, $5
+; MIPSEL-NEXT: move $17, $4
+; MIPSEL-NEXT: jal __extendhfsf2
+; MIPSEL-NEXT: srl $4, $5, 16
+; MIPSEL-NEXT: addiu $5, $17, 12
+; MIPSEL-NEXT: jal frexpf
+; MIPSEL-NEXT: mov.s $f12, $f0
+; MIPSEL-NEXT: jal __truncsfhf2
+; MIPSEL-NEXT: mov.s $f12, $f0
+; MIPSEL-NEXT: move $18, $2
+; MIPSEL-NEXT: jal __extendhfsf2
+; MIPSEL-NEXT: move $4, $16
+; MIPSEL-NEXT: addiu $5, $17, 8
+; MIPSEL-NEXT: jal frexpf
+; MIPSEL-NEXT: mov.s $f12, $f0
+; MIPSEL-NEXT: sh $18, 2($17)
+; MIPSEL-NEXT: jal __truncsfhf2
+; MIPSEL-NEXT: mov.s $f12, $f0
+; MIPSEL-NEXT: sh $2, 0($17)
+; MIPSEL-NEXT: lw $16, 16($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $17, 20($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $18, 24($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-32-LABEL: test_frexp_v2f16_v2i32:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -32
+; SOFT-FLOAT-32-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $18, 24($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $17, 20($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $16, 16($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: move $16, $5
+; SOFT-FLOAT-32-NEXT: move $17, $4
+; SOFT-FLOAT-32-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-32-NEXT: move $4, $5
+; SOFT-FLOAT-32-NEXT: addiu $5, $17, 12
+; SOFT-FLOAT-32-NEXT: jal frexpf
+; SOFT-FLOAT-32-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-32-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-32-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-32-NEXT: move $18, $2
+; SOFT-FLOAT-32-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-32-NEXT: srl $4, $16, 16
+; SOFT-FLOAT-32-NEXT: addiu $5, $17, 8
+; SOFT-FLOAT-32-NEXT: jal frexpf
+; SOFT-FLOAT-32-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-32-NEXT: sh $18, 2($17)
+; SOFT-FLOAT-32-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-32-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-32-NEXT: sh $2, 0($17)
+; SOFT-FLOAT-32-NEXT: lw $16, 16($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $17, 20($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $18, 24($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-64-LABEL: test_frexp_v2f16_v2i32:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -32
+; SOFT-FLOAT-64-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $18, 16($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $17, 8($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $16, 0($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: move $16, $5
+; SOFT-FLOAT-64-NEXT: move $17, $4
+; SOFT-FLOAT-64-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-64-NEXT: sll $4, $5, 0
+; SOFT-FLOAT-64-NEXT: daddiu $5, $17, 12
+; SOFT-FLOAT-64-NEXT: jal frexpf
+; SOFT-FLOAT-64-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-64-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-64-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-64-NEXT: move $18, $2
+; SOFT-FLOAT-64-NEXT: sll $1, $16, 0
+; SOFT-FLOAT-64-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-64-NEXT: srl $4, $1, 16
+; SOFT-FLOAT-64-NEXT: daddiu $5, $17, 8
+; SOFT-FLOAT-64-NEXT: jal frexpf
+; SOFT-FLOAT-64-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-64-NEXT: sh $18, 2($17)
+; SOFT-FLOAT-64-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-64-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-64-NEXT: sh $2, 0($17)
+; SOFT-FLOAT-64-NEXT: ld $16, 0($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $17, 8($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $18, 16($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 32
+ %result = call { <2 x half>, <2 x i32> } @llvm.frexp.v2f16.v2i32(<2 x half> %a)
+ ret { <2 x half>, <2 x i32> } %result
+}
+
+define { float, i32 } @test_frexp_f32_i32(float %a) nounwind {
+; MIPSEL-LABEL: test_frexp_f32_i32:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -24
+; MIPSEL-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: jal frexpf
+; MIPSEL-NEXT: addiu $5, $sp, 16
+; MIPSEL-NEXT: lw $2, 16($sp)
+; MIPSEL-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 24
+;
+; SOFT-FLOAT-32-LABEL: test_frexp_f32_i32:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -24
+; SOFT-FLOAT-32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: jal frexpf
+; SOFT-FLOAT-32-NEXT: addiu $5, $sp, 16
+; SOFT-FLOAT-32-NEXT: lw $2, 16($sp)
+; SOFT-FLOAT-32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 24
+;
+; SOFT-FLOAT-64-LABEL: test_frexp_f32_i32:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -16
+; SOFT-FLOAT-64-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: jal frexpf
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 4
+; SOFT-FLOAT-64-NEXT: lw $2, 4($sp)
+; SOFT-FLOAT-64-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 16
+ %result = call { float, i32 } @llvm.frexp.f32.i32(float %a)
+ ret { float, i32 } %result
+}
+
+define { float, i32 } @test_frexp_f32_i32_tailcall(float %a) nounwind {
+; MIPSEL-LABEL: test_frexp_f32_i32_tailcall:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -24
+; MIPSEL-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: jal frexpf
+; MIPSEL-NEXT: addiu $5, $sp, 16
+; MIPSEL-NEXT: lw $2, 16($sp)
+; MIPSEL-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 24
+;
+; SOFT-FLOAT-32-LABEL: test_frexp_f32_i32_tailcall:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -24
+; SOFT-FLOAT-32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: jal frexpf
+; SOFT-FLOAT-32-NEXT: addiu $5, $sp, 16
+; SOFT-FLOAT-32-NEXT: lw $2, 16($sp)
+; SOFT-FLOAT-32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 24
+;
+; SOFT-FLOAT-64-LABEL: test_frexp_f32_i32_tailcall:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -16
+; SOFT-FLOAT-64-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: jal frexpf
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 4
+; SOFT-FLOAT-64-NEXT: lw $2, 4($sp)
+; SOFT-FLOAT-64-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 16
+ %result = tail call { float, i32 } @llvm.frexp.f32.i32(float %a)
+ ret { float, i32 } %result
+}
+
+define { <2 x float>, <2 x i32> } @test_frexp_v2f32_v2i32(<2 x float> %a) nounwind {
+; MIPSEL-LABEL: test_frexp_v2f32_v2i32:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -32
+; MIPSEL-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $17, 24($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $16, 20($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: move $16, $6
+; MIPSEL-NEXT: move $17, $4
+; MIPSEL-NEXT: mtc1 $7, $f12
+; MIPSEL-NEXT: jal frexpf
+; MIPSEL-NEXT: addiu $5, $4, 12
+; MIPSEL-NEXT: swc1 $f0, 4($17)
+; MIPSEL-NEXT: mtc1 $16, $f12
+; MIPSEL-NEXT: jal frexpf
+; MIPSEL-NEXT: addiu $5, $17, 8
+; MIPSEL-NEXT: swc1 $f0, 0($17)
+; MIPSEL-NEXT: lw $16, 20($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $17, 24($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-32-LABEL: test_frexp_v2f32_v2i32:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -32
+; SOFT-FLOAT-32-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $17, 24($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $16, 20($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: move $16, $6
+; SOFT-FLOAT-32-NEXT: move $17, $4
+; SOFT-FLOAT-32-NEXT: mtc1 $7, $f12
+; SOFT-FLOAT-32-NEXT: jal frexpf
+; SOFT-FLOAT-32-NEXT: addiu $5, $4, 12
+; SOFT-FLOAT-32-NEXT: swc1 $f0, 4($17)
+; SOFT-FLOAT-32-NEXT: mtc1 $16, $f12
+; SOFT-FLOAT-32-NEXT: jal frexpf
+; SOFT-FLOAT-32-NEXT: addiu $5, $17, 8
+; SOFT-FLOAT-32-NEXT: swc1 $f0, 0($17)
+; SOFT-FLOAT-32-NEXT: lw $16, 20($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $17, 24($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-64-LABEL: test_frexp_v2f32_v2i32:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -32
+; SOFT-FLOAT-64-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $16, 16($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: move $16, $4
+; SOFT-FLOAT-64-NEXT: sll $1, $16, 0
+; SOFT-FLOAT-64-NEXT: mtc1 $1, $f12
+; SOFT-FLOAT-64-NEXT: jal frexpf
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 8
+; SOFT-FLOAT-64-NEXT: mfc1 $1, $f0
+; SOFT-FLOAT-64-NEXT: dsll $1, $1, 32
+; SOFT-FLOAT-64-NEXT: dsrl $2, $16, 32
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 12
+; SOFT-FLOAT-64-NEXT: dsrl $16, $1, 32
+; SOFT-FLOAT-64-NEXT: sll $1, $2, 0
+; SOFT-FLOAT-64-NEXT: jal frexpf
+; SOFT-FLOAT-64-NEXT: mtc1 $1, $f12
+; SOFT-FLOAT-64-NEXT: mfc1 $1, $f0
+; SOFT-FLOAT-64-NEXT: dsll $1, $1, 32
+; SOFT-FLOAT-64-NEXT: or $2, $16, $1
+; SOFT-FLOAT-64-NEXT: lw $1, 12($sp)
+; SOFT-FLOAT-64-NEXT: dsll $1, $1, 32
+; SOFT-FLOAT-64-NEXT: lw $3, 8($sp)
+; SOFT-FLOAT-64-NEXT: dsll $3, $3, 32
+; SOFT-FLOAT-64-NEXT: dsrl $3, $3, 32
+; SOFT-FLOAT-64-NEXT: or $3, $3, $1
+; SOFT-FLOAT-64-NEXT: ld $16, 16($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 32
+ %result = call { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float> %a)
+ ret { <2 x float>, <2 x i32> } %result
+}
+
+define { double, i32 } @test_frexp_f64_i32(double %a) nounwind {
+; MIPSEL-LABEL: test_frexp_f64_i32:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -24
+; MIPSEL-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: jal frexp
+; MIPSEL-NEXT: addiu $6, $sp, 16
+; MIPSEL-NEXT: lw $2, 16($sp)
+; MIPSEL-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 24
+;
+; SOFT-FLOAT-32-LABEL: test_frexp_f64_i32:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -24
+; SOFT-FLOAT-32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: jal frexp
+; SOFT-FLOAT-32-NEXT: addiu $6, $sp, 16
+; SOFT-FLOAT-32-NEXT: lw $2, 16($sp)
+; SOFT-FLOAT-32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 24
+;
+; SOFT-FLOAT-64-LABEL: test_frexp_f64_i32:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -16
+; SOFT-FLOAT-64-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: jal frexp
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 4
+; SOFT-FLOAT-64-NEXT: lw $2, 4($sp)
+; SOFT-FLOAT-64-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 16
+ %result = call { double, i32 } @llvm.frexp.f64.i32(double %a)
+ ret { double, i32 } %result
+}
+
+define { <2 x double>, <2 x i32> } @test_frexp_v2f64_v2i32(<2 x double> %a) nounwind {
+; MIPSEL-LABEL: test_frexp_v2f64_v2i32:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -48
+; MIPSEL-NEXT: sw $ra, 44($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $18, 40($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $17, 36($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $16, 32($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: move $16, $7
+; MIPSEL-NEXT: move $17, $6
+; MIPSEL-NEXT: move $18, $4
+; MIPSEL-NEXT: lw $1, 64($sp)
+; MIPSEL-NEXT: lw $2, 68($sp)
+; MIPSEL-NEXT: sw $2, 28($sp)
+; MIPSEL-NEXT: sw $1, 24($sp)
+; MIPSEL-NEXT: addiu $6, $4, 20
+; MIPSEL-NEXT: jal frexp
+; MIPSEL-NEXT: ldc1 $f12, 24($sp)
+; MIPSEL-NEXT: sdc1 $f0, 8($18)
+; MIPSEL-NEXT: sw $16, 20($sp)
+; MIPSEL-NEXT: sw $17, 16($sp)
+; MIPSEL-NEXT: addiu $6, $18, 16
+; MIPSEL-NEXT: jal frexp
+; MIPSEL-NEXT: ldc1 $f12, 16($sp)
+; MIPSEL-NEXT: sdc1 $f0, 0($18)
+; MIPSEL-NEXT: lw $16, 32($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $17, 36($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $18, 40($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $ra, 44($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 48
+;
+; SOFT-FLOAT-32-LABEL: test_frexp_v2f64_v2i32:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -48
+; SOFT-FLOAT-32-NEXT: sw $ra, 44($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $18, 40($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $17, 36($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $16, 32($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: move $16, $7
+; SOFT-FLOAT-32-NEXT: move $17, $6
+; SOFT-FLOAT-32-NEXT: move $18, $4
+; SOFT-FLOAT-32-NEXT: lw $1, 64($sp)
+; SOFT-FLOAT-32-NEXT: lw $2, 68($sp)
+; SOFT-FLOAT-32-NEXT: sw $2, 28($sp)
+; SOFT-FLOAT-32-NEXT: sw $1, 24($sp)
+; SOFT-FLOAT-32-NEXT: addiu $6, $4, 20
+; SOFT-FLOAT-32-NEXT: jal frexp
+; SOFT-FLOAT-32-NEXT: ldc1 $f12, 24($sp)
+; SOFT-FLOAT-32-NEXT: sdc1 $f0, 8($18)
+; SOFT-FLOAT-32-NEXT: sw $16, 20($sp)
+; SOFT-FLOAT-32-NEXT: sw $17, 16($sp)
+; SOFT-FLOAT-32-NEXT: addiu $6, $18, 16
+; SOFT-FLOAT-32-NEXT: jal frexp
+; SOFT-FLOAT-32-NEXT: ldc1 $f12, 16($sp)
+; SOFT-FLOAT-32-NEXT: sdc1 $f0, 0($18)
+; SOFT-FLOAT-32-NEXT: lw $16, 32($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $17, 36($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $18, 40($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $ra, 44($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 48
+;
+; SOFT-FLOAT-64-LABEL: test_frexp_v2f64_v2i32:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -32
+; SOFT-FLOAT-64-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $17, 16($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $16, 8($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: move $16, $5
+; SOFT-FLOAT-64-NEXT: move $17, $4
+; SOFT-FLOAT-64-NEXT: dmtc1 $6, $f12
+; SOFT-FLOAT-64-NEXT: jal frexp
+; SOFT-FLOAT-64-NEXT: daddiu $5, $4, 20
+; SOFT-FLOAT-64-NEXT: sdc1 $f0, 8($17)
+; SOFT-FLOAT-64-NEXT: dmtc1 $16, $f12
+; SOFT-FLOAT-64-NEXT: jal frexp
+; SOFT-FLOAT-64-NEXT: daddiu $5, $17, 16
+; SOFT-FLOAT-64-NEXT: sdc1 $f0, 0($17)
+; SOFT-FLOAT-64-NEXT: ld $16, 8($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $17, 16($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 32
+ %result = call { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double> %a)
+ ret { <2 x double>, <2 x i32> } %result
+}
+
+define { fp128, i32 } @test_frexp_fp128_i32(fp128 %a) nounwind {
+; MIPSEL-LABEL: test_frexp_fp128_i32:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -40
+; MIPSEL-NEXT: sw $ra, 36($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $16, 32($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: move $1, $7
+; MIPSEL-NEXT: move $16, $4
+; MIPSEL-NEXT: addiu $2, $sp, 28
+; MIPSEL-NEXT: sw $2, 16($sp)
+; MIPSEL-NEXT: lw $7, 56($sp)
+; MIPSEL-NEXT: move $4, $5
+; MIPSEL-NEXT: move $5, $6
+; MIPSEL-NEXT: jal frexpl
+; MIPSEL-NEXT: move $6, $1
+; MIPSEL-NEXT: sw $5, 12($16)
+; MIPSEL-NEXT: sw $4, 8($16)
+; MIPSEL-NEXT: sw $3, 4($16)
+; MIPSEL-NEXT: sw $2, 0($16)
+; MIPSEL-NEXT: lw $1, 28($sp)
+; MIPSEL-NEXT: sw $1, 16($16)
+; MIPSEL-NEXT: lw $16, 32($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $ra, 36($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 40
+;
+; SOFT-FLOAT-32-LABEL: test_frexp_fp128_i32:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -40
+; SOFT-FLOAT-32-NEXT: sw $ra, 36($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $16, 32($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: move $1, $7
+; SOFT-FLOAT-32-NEXT: move $16, $4
+; SOFT-FLOAT-32-NEXT: addiu $2, $sp, 28
+; SOFT-FLOAT-32-NEXT: sw $2, 16($sp)
+; SOFT-FLOAT-32-NEXT: lw $7, 56($sp)
+; SOFT-FLOAT-32-NEXT: move $4, $5
+; SOFT-FLOAT-32-NEXT: move $5, $6
+; SOFT-FLOAT-32-NEXT: jal frexpl
+; SOFT-FLOAT-32-NEXT: move $6, $1
+; SOFT-FLOAT-32-NEXT: sw $5, 12($16)
+; SOFT-FLOAT-32-NEXT: sw $4, 8($16)
+; SOFT-FLOAT-32-NEXT: sw $3, 4($16)
+; SOFT-FLOAT-32-NEXT: sw $2, 0($16)
+; SOFT-FLOAT-32-NEXT: lw $1, 28($sp)
+; SOFT-FLOAT-32-NEXT: sw $1, 16($16)
+; SOFT-FLOAT-32-NEXT: lw $16, 32($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $ra, 36($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 40
+;
+; SOFT-FLOAT-64-LABEL: test_frexp_fp128_i32:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -16
+; SOFT-FLOAT-64-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: dmfc1 $4, $f12
+; SOFT-FLOAT-64-NEXT: dmfc1 $5, $f13
+; SOFT-FLOAT-64-NEXT: jal frexpl
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 4
+; SOFT-FLOAT-64-NEXT: lw $4, 4($sp)
+; SOFT-FLOAT-64-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 16
+ %result = call { fp128, i32 } @llvm.frexp.fp128.i32(fp128 %a)
+ ret { fp128, i32 } %result
+}
+
+define { <2 x fp128>, <2 x i32> } @test_frexp_v2fp128_v2i32(<2 x fp128> %a) nounwind {
+; MIPSEL-LABEL: test_frexp_v2fp128_v2i32:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -48
+; MIPSEL-NEXT: sw $ra, 44($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $18, 40($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $17, 36($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $16, 32($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: move $16, $7
+; MIPSEL-NEXT: move $17, $6
+; MIPSEL-NEXT: move $18, $4
+; MIPSEL-NEXT: addiu $1, $sp, 28
+; MIPSEL-NEXT: sw $1, 16($sp)
+; MIPSEL-NEXT: lw $4, 72($sp)
+; MIPSEL-NEXT: lw $5, 76($sp)
+; MIPSEL-NEXT: lw $6, 80($sp)
+; MIPSEL-NEXT: lw $7, 84($sp)
+; MIPSEL-NEXT: jal frexpl
+; MIPSEL-NEXT: nop
+; MIPSEL-NEXT: addiu $1, $sp, 24
+; MIPSEL-NEXT: sw $1, 16($sp)
+; MIPSEL-NEXT: lw $7, 68($sp)
+; MIPSEL-NEXT: lw $6, 64($sp)
+; MIPSEL-NEXT: sw $5, 28($18)
+; MIPSEL-NEXT: sw $4, 24($18)
+; MIPSEL-NEXT: sw $3, 20($18)
+; MIPSEL-NEXT: sw $2, 16($18)
+; MIPSEL-NEXT: move $4, $17
+; MIPSEL-NEXT: jal frexpl
+; MIPSEL-NEXT: move $5, $16
+; MIPSEL-NEXT: sw $5, 12($18)
+; MIPSEL-NEXT: sw $4, 8($18)
+; MIPSEL-NEXT: sw $3, 4($18)
+; MIPSEL-NEXT: sw $2, 0($18)
+; MIPSEL-NEXT: lw $1, 28($sp)
+; MIPSEL-NEXT: sw $1, 36($18)
+; MIPSEL-NEXT: lw $1, 24($sp)
+; MIPSEL-NEXT: sw $1, 32($18)
+; MIPSEL-NEXT: lw $16, 32($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $17, 36($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $18, 40($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $ra, 44($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 48
+;
+; SOFT-FLOAT-32-LABEL: test_frexp_v2fp128_v2i32:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -48
+; SOFT-FLOAT-32-NEXT: sw $ra, 44($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $18, 40($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $17, 36($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $16, 32($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: move $16, $7
+; SOFT-FLOAT-32-NEXT: move $17, $6
+; SOFT-FLOAT-32-NEXT: move $18, $4
+; SOFT-FLOAT-32-NEXT: addiu $1, $sp, 28
+; SOFT-FLOAT-32-NEXT: sw $1, 16($sp)
+; SOFT-FLOAT-32-NEXT: lw $4, 72($sp)
+; SOFT-FLOAT-32-NEXT: lw $5, 76($sp)
+; SOFT-FLOAT-32-NEXT: lw $6, 80($sp)
+; SOFT-FLOAT-32-NEXT: lw $7, 84($sp)
+; SOFT-FLOAT-32-NEXT: jal frexpl
+; SOFT-FLOAT-32-NEXT: nop
+; SOFT-FLOAT-32-NEXT: addiu $1, $sp, 24
+; SOFT-FLOAT-32-NEXT: sw $1, 16($sp)
+; SOFT-FLOAT-32-NEXT: lw $7, 68($sp)
+; SOFT-FLOAT-32-NEXT: lw $6, 64($sp)
+; SOFT-FLOAT-32-NEXT: sw $5, 28($18)
+; SOFT-FLOAT-32-NEXT: sw $4, 24($18)
+; SOFT-FLOAT-32-NEXT: sw $3, 20($18)
+; SOFT-FLOAT-32-NEXT: sw $2, 16($18)
+; SOFT-FLOAT-32-NEXT: move $4, $17
+; SOFT-FLOAT-32-NEXT: jal frexpl
+; SOFT-FLOAT-32-NEXT: move $5, $16
+; SOFT-FLOAT-32-NEXT: sw $5, 12($18)
+; SOFT-FLOAT-32-NEXT: sw $4, 8($18)
+; SOFT-FLOAT-32-NEXT: sw $3, 4($18)
+; SOFT-FLOAT-32-NEXT: sw $2, 0($18)
+; SOFT-FLOAT-32-NEXT: lw $1, 28($sp)
+; SOFT-FLOAT-32-NEXT: sw $1, 36($18)
+; SOFT-FLOAT-32-NEXT: lw $1, 24($sp)
+; SOFT-FLOAT-32-NEXT: sw $1, 32($18)
+; SOFT-FLOAT-32-NEXT: lw $16, 32($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $17, 36($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $18, 40($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $ra, 44($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 48
+;
+; SOFT-FLOAT-64-LABEL: test_frexp_v2fp128_v2i32:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -64
+; SOFT-FLOAT-64-NEXT: sd $ra, 56($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $20, 48($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $19, 40($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $18, 32($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $17, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $16, 16($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: move $16, $6
+; SOFT-FLOAT-64-NEXT: move $17, $5
+; SOFT-FLOAT-64-NEXT: move $18, $4
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 12
+; SOFT-FLOAT-64-NEXT: move $4, $7
+; SOFT-FLOAT-64-NEXT: jal frexpl
+; SOFT-FLOAT-64-NEXT: move $5, $8
+; SOFT-FLOAT-64-NEXT: move $19, $2
+; SOFT-FLOAT-64-NEXT: move $20, $3
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 8
+; SOFT-FLOAT-64-NEXT: lw $1, 12($sp)
+; SOFT-FLOAT-64-NEXT: sw $1, 36($18)
+; SOFT-FLOAT-64-NEXT: move $4, $17
+; SOFT-FLOAT-64-NEXT: jal frexpl
+; SOFT-FLOAT-64-NEXT: move $5, $16
+; SOFT-FLOAT-64-NEXT: lw $1, 8($sp)
+; SOFT-FLOAT-64-NEXT: sw $1, 32($18)
+; SOFT-FLOAT-64-NEXT: sd $20, 24($18)
+; SOFT-FLOAT-64-NEXT: sd $19, 16($18)
+; SOFT-FLOAT-64-NEXT: sd $3, 8($18)
+; SOFT-FLOAT-64-NEXT: sd $2, 0($18)
+; SOFT-FLOAT-64-NEXT: ld $16, 16($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $17, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $18, 32($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $19, 40($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $20, 48($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $ra, 56($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 64
+ %result = call { <2 x fp128>, <2 x i32> } @llvm.frexp.v2fp128.v2i32(<2 x fp128> %a)
+ ret { <2 x fp128>, <2 x i32> } %result
+}
+
+declare { half, i32 } @llvm.frexp.f16.i32(half) #0
+declare { <2 x half>, <2 x i32> } @llvm.frexp.v2f16.v2i32(<2 x half>) #0
+
+declare { float, i32 } @llvm.frexp.f32.i32(float) #0
+declare { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float>) #0
+
+declare { double, i32 } @llvm.frexp.f64.i32(double) #0
+declare { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double>) #0
+
+declare { fp128, i32 } @llvm.frexp.fp128.i32(fp128) #0
+declare { <2 x fp128>, <2 x i32> } @llvm.frexp.v2fp128.v2i32(<2 x fp128>) #0
+
+attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
diff --git a/llvm/test/CodeGen/Mips/llvm.sincos.ll b/llvm/test/CodeGen/Mips/llvm.sincos.ll
new file mode 100644
index 0000000..046be12
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/llvm.sincos.ll
@@ -0,0 +1,1044 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=mipsel < %s | FileCheck -check-prefix=MIPSEL %s
+; RUN: llc -mtriple=mips < %s | FileCheck %s -check-prefixes=SOFT-FLOAT-32
+; RUN: llc -mtriple=mips64 < %s | FileCheck %s -check-prefixes=SOFT-FLOAT-64
+
+define { half, half } @test_sincos_f16(half %a) #0 {
+; MIPSEL-LABEL: test_sincos_f16:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -32
+; MIPSEL-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $16, 24($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: jal __extendhfsf2
+; MIPSEL-NEXT: nop
+; MIPSEL-NEXT: addiu $5, $sp, 20
+; MIPSEL-NEXT: addiu $6, $sp, 16
+; MIPSEL-NEXT: jal sincosf
+; MIPSEL-NEXT: mov.s $f12, $f0
+; MIPSEL-NEXT: jal __truncsfhf2
+; MIPSEL-NEXT: lwc1 $f12, 20($sp)
+; MIPSEL-NEXT: move $16, $2
+; MIPSEL-NEXT: jal __truncsfhf2
+; MIPSEL-NEXT: lwc1 $f12, 16($sp)
+; MIPSEL-NEXT: move $3, $2
+; MIPSEL-NEXT: move $2, $16
+; MIPSEL-NEXT: lw $16, 24($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-32-LABEL: test_sincos_f16:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -32
+; SOFT-FLOAT-32-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $16, 24($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-32-NEXT: nop
+; SOFT-FLOAT-32-NEXT: addiu $5, $sp, 20
+; SOFT-FLOAT-32-NEXT: addiu $6, $sp, 16
+; SOFT-FLOAT-32-NEXT: jal sincosf
+; SOFT-FLOAT-32-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-32-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-32-NEXT: lwc1 $f12, 20($sp)
+; SOFT-FLOAT-32-NEXT: move $16, $2
+; SOFT-FLOAT-32-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-32-NEXT: lwc1 $f12, 16($sp)
+; SOFT-FLOAT-32-NEXT: move $3, $2
+; SOFT-FLOAT-32-NEXT: move $2, $16
+; SOFT-FLOAT-32-NEXT: lw $16, 24($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-64-LABEL: test_sincos_f16:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -32
+; SOFT-FLOAT-64-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $16, 16($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-64-NEXT: sll $4, $4, 0
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 12
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 8
+; SOFT-FLOAT-64-NEXT: jal sincosf
+; SOFT-FLOAT-64-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-64-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-64-NEXT: lwc1 $f12, 12($sp)
+; SOFT-FLOAT-64-NEXT: move $16, $2
+; SOFT-FLOAT-64-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-64-NEXT: lwc1 $f12, 8($sp)
+; SOFT-FLOAT-64-NEXT: move $3, $2
+; SOFT-FLOAT-64-NEXT: move $2, $16
+; SOFT-FLOAT-64-NEXT: ld $16, 16($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 32
+ %result = call { half, half } @llvm.sincos.f16(half %a)
+ ret { half, half } %result
+}
+
+define half @test_sincos_f16_only_use_sin(half %a) #0 {
+; MIPSEL-LABEL: test_sincos_f16_only_use_sin:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -32
+; MIPSEL-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: jal __extendhfsf2
+; MIPSEL-NEXT: nop
+; MIPSEL-NEXT: addiu $5, $sp, 24
+; MIPSEL-NEXT: addiu $6, $sp, 20
+; MIPSEL-NEXT: jal sincosf
+; MIPSEL-NEXT: mov.s $f12, $f0
+; MIPSEL-NEXT: jal __truncsfhf2
+; MIPSEL-NEXT: lwc1 $f12, 24($sp)
+; MIPSEL-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-32-LABEL: test_sincos_f16_only_use_sin:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -32
+; SOFT-FLOAT-32-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-32-NEXT: nop
+; SOFT-FLOAT-32-NEXT: addiu $5, $sp, 24
+; SOFT-FLOAT-32-NEXT: addiu $6, $sp, 20
+; SOFT-FLOAT-32-NEXT: jal sincosf
+; SOFT-FLOAT-32-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-32-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-32-NEXT: lwc1 $f12, 24($sp)
+; SOFT-FLOAT-32-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-64-LABEL: test_sincos_f16_only_use_sin:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -16
+; SOFT-FLOAT-64-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-64-NEXT: sll $4, $4, 0
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 4
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 0
+; SOFT-FLOAT-64-NEXT: jal sincosf
+; SOFT-FLOAT-64-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-64-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-64-NEXT: lwc1 $f12, 4($sp)
+; SOFT-FLOAT-64-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 16
+ %result = call { half, half } @llvm.sincos.f16(half %a)
+ %result.0 = extractvalue { half, half } %result, 0
+ ret half %result.0
+}
+
+define half @test_sincos_f16_only_use_cos(half %a) #0 {
+; MIPSEL-LABEL: test_sincos_f16_only_use_cos:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -32
+; MIPSEL-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: jal __extendhfsf2
+; MIPSEL-NEXT: nop
+; MIPSEL-NEXT: addiu $5, $sp, 24
+; MIPSEL-NEXT: addiu $6, $sp, 20
+; MIPSEL-NEXT: jal sincosf
+; MIPSEL-NEXT: mov.s $f12, $f0
+; MIPSEL-NEXT: jal __truncsfhf2
+; MIPSEL-NEXT: lwc1 $f12, 20($sp)
+; MIPSEL-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-32-LABEL: test_sincos_f16_only_use_cos:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -32
+; SOFT-FLOAT-32-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-32-NEXT: nop
+; SOFT-FLOAT-32-NEXT: addiu $5, $sp, 24
+; SOFT-FLOAT-32-NEXT: addiu $6, $sp, 20
+; SOFT-FLOAT-32-NEXT: jal sincosf
+; SOFT-FLOAT-32-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-32-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-32-NEXT: lwc1 $f12, 20($sp)
+; SOFT-FLOAT-32-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-64-LABEL: test_sincos_f16_only_use_cos:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -16
+; SOFT-FLOAT-64-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-64-NEXT: sll $4, $4, 0
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 4
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 0
+; SOFT-FLOAT-64-NEXT: jal sincosf
+; SOFT-FLOAT-64-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-64-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-64-NEXT: lwc1 $f12, 0($sp)
+; SOFT-FLOAT-64-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 16
+ %result = call { half, half } @llvm.sincos.f16(half %a)
+ %result.1 = extractvalue { half, half } %result, 1
+ ret half %result.1
+}
+
+define { <2 x half>, <2 x half> } @test_sincos_v2f16(<2 x half> %a) #0 {
+; MIPSEL-LABEL: test_sincos_v2f16:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -48
+; MIPSEL-NEXT: sw $ra, 44($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $17, 40($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $16, 36($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: move $16, $5
+; MIPSEL-NEXT: move $17, $4
+; MIPSEL-NEXT: jal __extendhfsf2
+; MIPSEL-NEXT: move $4, $5
+; MIPSEL-NEXT: addiu $5, $sp, 24
+; MIPSEL-NEXT: addiu $6, $sp, 20
+; MIPSEL-NEXT: jal sincosf
+; MIPSEL-NEXT: mov.s $f12, $f0
+; MIPSEL-NEXT: jal __extendhfsf2
+; MIPSEL-NEXT: srl $4, $16, 16
+; MIPSEL-NEXT: addiu $5, $sp, 32
+; MIPSEL-NEXT: addiu $6, $sp, 28
+; MIPSEL-NEXT: jal sincosf
+; MIPSEL-NEXT: mov.s $f12, $f0
+; MIPSEL-NEXT: jal __truncsfhf2
+; MIPSEL-NEXT: lwc1 $f12, 20($sp)
+; MIPSEL-NEXT: lwc1 $f12, 24($sp)
+; MIPSEL-NEXT: jal __truncsfhf2
+; MIPSEL-NEXT: sh $2, 4($17)
+; MIPSEL-NEXT: sh $2, 0($17)
+; MIPSEL-NEXT: jal __truncsfhf2
+; MIPSEL-NEXT: lwc1 $f12, 28($sp)
+; MIPSEL-NEXT: sh $2, 6($17)
+; MIPSEL-NEXT: jal __truncsfhf2
+; MIPSEL-NEXT: lwc1 $f12, 32($sp)
+; MIPSEL-NEXT: sh $2, 2($17)
+; MIPSEL-NEXT: lw $16, 36($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $17, 40($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $ra, 44($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 48
+;
+; SOFT-FLOAT-32-LABEL: test_sincos_v2f16:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -48
+; SOFT-FLOAT-32-NEXT: sw $ra, 44($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $17, 40($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $16, 36($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: move $16, $5
+; SOFT-FLOAT-32-NEXT: move $17, $4
+; SOFT-FLOAT-32-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-32-NEXT: move $4, $5
+; SOFT-FLOAT-32-NEXT: addiu $5, $sp, 24
+; SOFT-FLOAT-32-NEXT: addiu $6, $sp, 20
+; SOFT-FLOAT-32-NEXT: jal sincosf
+; SOFT-FLOAT-32-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-32-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-32-NEXT: srl $4, $16, 16
+; SOFT-FLOAT-32-NEXT: addiu $5, $sp, 32
+; SOFT-FLOAT-32-NEXT: addiu $6, $sp, 28
+; SOFT-FLOAT-32-NEXT: jal sincosf
+; SOFT-FLOAT-32-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-32-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-32-NEXT: lwc1 $f12, 20($sp)
+; SOFT-FLOAT-32-NEXT: lwc1 $f12, 24($sp)
+; SOFT-FLOAT-32-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-32-NEXT: sh $2, 6($17)
+; SOFT-FLOAT-32-NEXT: sh $2, 2($17)
+; SOFT-FLOAT-32-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-32-NEXT: lwc1 $f12, 28($sp)
+; SOFT-FLOAT-32-NEXT: sh $2, 4($17)
+; SOFT-FLOAT-32-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-32-NEXT: lwc1 $f12, 32($sp)
+; SOFT-FLOAT-32-NEXT: sh $2, 0($17)
+; SOFT-FLOAT-32-NEXT: lw $16, 36($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $17, 40($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $ra, 44($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 48
+;
+; SOFT-FLOAT-64-LABEL: test_sincos_v2f16:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -48
+; SOFT-FLOAT-64-NEXT: sd $ra, 40($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $17, 32($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $16, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: move $17, $5
+; SOFT-FLOAT-64-NEXT: move $16, $4
+; SOFT-FLOAT-64-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-64-NEXT: sll $4, $5, 0
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 12
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 8
+; SOFT-FLOAT-64-NEXT: jal sincosf
+; SOFT-FLOAT-64-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-64-NEXT: sll $1, $17, 0
+; SOFT-FLOAT-64-NEXT: jal __extendhfsf2
+; SOFT-FLOAT-64-NEXT: srl $4, $1, 16
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 20
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 16
+; SOFT-FLOAT-64-NEXT: jal sincosf
+; SOFT-FLOAT-64-NEXT: mov.s $f12, $f0
+; SOFT-FLOAT-64-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-64-NEXT: lwc1 $f12, 8($sp)
+; SOFT-FLOAT-64-NEXT: lwc1 $f12, 12($sp)
+; SOFT-FLOAT-64-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-64-NEXT: sh $2, 6($16)
+; SOFT-FLOAT-64-NEXT: sh $2, 2($16)
+; SOFT-FLOAT-64-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-64-NEXT: lwc1 $f12, 16($sp)
+; SOFT-FLOAT-64-NEXT: sh $2, 4($16)
+; SOFT-FLOAT-64-NEXT: jal __truncsfhf2
+; SOFT-FLOAT-64-NEXT: lwc1 $f12, 20($sp)
+; SOFT-FLOAT-64-NEXT: sh $2, 0($16)
+; SOFT-FLOAT-64-NEXT: ld $16, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $17, 32($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $ra, 40($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 48
+ %result = call { <2 x half>, <2 x half> } @llvm.sincos.v2f16(<2 x half> %a)
+ ret { <2 x half>, <2 x half> } %result
+}
+
+define { float, float } @test_sincos_f32(float %a) #0 {
+; MIPSEL-LABEL: test_sincos_f32:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -32
+; MIPSEL-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: addiu $5, $sp, 24
+; MIPSEL-NEXT: jal sincosf
+; MIPSEL-NEXT: addiu $6, $sp, 20
+; MIPSEL-NEXT: lwc1 $f0, 24($sp)
+; MIPSEL-NEXT: lwc1 $f2, 20($sp)
+; MIPSEL-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-32-LABEL: test_sincos_f32:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -32
+; SOFT-FLOAT-32-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: addiu $5, $sp, 24
+; SOFT-FLOAT-32-NEXT: jal sincosf
+; SOFT-FLOAT-32-NEXT: addiu $6, $sp, 20
+; SOFT-FLOAT-32-NEXT: lwc1 $f0, 24($sp)
+; SOFT-FLOAT-32-NEXT: lwc1 $f2, 20($sp)
+; SOFT-FLOAT-32-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-64-LABEL: test_sincos_f32:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -16
+; SOFT-FLOAT-64-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 4
+; SOFT-FLOAT-64-NEXT: jal sincosf
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 0
+; SOFT-FLOAT-64-NEXT: lwc1 $f0, 4($sp)
+; SOFT-FLOAT-64-NEXT: lwc1 $f2, 0($sp)
+; SOFT-FLOAT-64-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 16
+ %result = call { float, float } @llvm.sincos.f32(float %a)
+ ret { float, float } %result
+}
+
+define { <2 x float>, <2 x float> } @test_sincos_v2f32(<2 x float> %a) #0 {
+; MIPSEL-LABEL: test_sincos_v2f32:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -32
+; MIPSEL-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $17, 24($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $16, 20($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: move $16, $6
+; MIPSEL-NEXT: move $17, $4
+; MIPSEL-NEXT: mtc1 $7, $f12
+; MIPSEL-NEXT: addiu $5, $4, 4
+; MIPSEL-NEXT: jal sincosf
+; MIPSEL-NEXT: addiu $6, $4, 12
+; MIPSEL-NEXT: mtc1 $16, $f12
+; MIPSEL-NEXT: addiu $6, $17, 8
+; MIPSEL-NEXT: jal sincosf
+; MIPSEL-NEXT: move $5, $17
+; MIPSEL-NEXT: lw $16, 20($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $17, 24($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-32-LABEL: test_sincos_v2f32:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -32
+; SOFT-FLOAT-32-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $17, 24($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $16, 20($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: move $16, $6
+; SOFT-FLOAT-32-NEXT: move $17, $4
+; SOFT-FLOAT-32-NEXT: mtc1 $7, $f12
+; SOFT-FLOAT-32-NEXT: addiu $5, $4, 4
+; SOFT-FLOAT-32-NEXT: jal sincosf
+; SOFT-FLOAT-32-NEXT: addiu $6, $4, 12
+; SOFT-FLOAT-32-NEXT: mtc1 $16, $f12
+; SOFT-FLOAT-32-NEXT: addiu $6, $17, 8
+; SOFT-FLOAT-32-NEXT: jal sincosf
+; SOFT-FLOAT-32-NEXT: move $5, $17
+; SOFT-FLOAT-32-NEXT: lw $16, 20($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $17, 24($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-64-LABEL: test_sincos_v2f32:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -32
+; SOFT-FLOAT-64-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $16, 16($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: move $16, $4
+; SOFT-FLOAT-64-NEXT: dsrl $1, $4, 32
+; SOFT-FLOAT-64-NEXT: sll $1, $1, 0
+; SOFT-FLOAT-64-NEXT: mtc1 $1, $f12
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 12
+; SOFT-FLOAT-64-NEXT: jal sincosf
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 8
+; SOFT-FLOAT-64-NEXT: sll $1, $16, 0
+; SOFT-FLOAT-64-NEXT: mtc1 $1, $f12
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 4
+; SOFT-FLOAT-64-NEXT: jal sincosf
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 0
+; SOFT-FLOAT-64-NEXT: lwc1 $f0, 12($sp)
+; SOFT-FLOAT-64-NEXT: mfc1 $1, $f0
+; SOFT-FLOAT-64-NEXT: dsll $1, $1, 32
+; SOFT-FLOAT-64-NEXT: lwc1 $f0, 8($sp)
+; SOFT-FLOAT-64-NEXT: mfc1 $3, $f0
+; SOFT-FLOAT-64-NEXT: lwc1 $f0, 4($sp)
+; SOFT-FLOAT-64-NEXT: mfc1 $2, $f0
+; SOFT-FLOAT-64-NEXT: dsll $2, $2, 32
+; SOFT-FLOAT-64-NEXT: dsrl $2, $2, 32
+; SOFT-FLOAT-64-NEXT: or $2, $2, $1
+; SOFT-FLOAT-64-NEXT: dsll $1, $3, 32
+; SOFT-FLOAT-64-NEXT: lwc1 $f0, 0($sp)
+; SOFT-FLOAT-64-NEXT: mfc1 $3, $f0
+; SOFT-FLOAT-64-NEXT: dsll $3, $3, 32
+; SOFT-FLOAT-64-NEXT: dsrl $3, $3, 32
+; SOFT-FLOAT-64-NEXT: or $3, $3, $1
+; SOFT-FLOAT-64-NEXT: ld $16, 16($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 32
+; SOFT-FLOAT-64R2-LABEL: test_sincos_v2f32:
+; SOFT-FLOAT-64R2: # %bb.0:
+; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, -32
+; SOFT-FLOAT-64R2-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT: sd $16, 16($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT: move $16, $4
+; SOFT-FLOAT-64R2-NEXT: dsrl $1, $4, 32
+; SOFT-FLOAT-64R2-NEXT: sll $1, $1, 0
+; SOFT-FLOAT-64R2-NEXT: mtc1 $1, $f12
+; SOFT-FLOAT-64R2-NEXT: daddiu $5, $sp, 12
+; SOFT-FLOAT-64R2-NEXT: jal sincosf
+; SOFT-FLOAT-64R2-NEXT: daddiu $6, $sp, 8
+; SOFT-FLOAT-64R2-NEXT: sll $1, $16, 0
+; SOFT-FLOAT-64R2-NEXT: mtc1 $1, $f12
+; SOFT-FLOAT-64R2-NEXT: daddiu $5, $sp, 4
+; SOFT-FLOAT-64R2-NEXT: jal sincosf
+; SOFT-FLOAT-64R2-NEXT: daddiu $6, $sp, 0
+; SOFT-FLOAT-64R2-NEXT: lwc1 $f0, 12($sp)
+; SOFT-FLOAT-64R2-NEXT: mfc1 $1, $f0
+; SOFT-FLOAT-64R2-NEXT: dsll $1, $1, 32
+; SOFT-FLOAT-64R2-NEXT: lwc1 $f0, 4($sp)
+; SOFT-FLOAT-64R2-NEXT: mfc1 $2, $f0
+; SOFT-FLOAT-64R2-NEXT: dext $2, $2, 0, 32
+; SOFT-FLOAT-64R2-NEXT: lwc1 $f0, 8($sp)
+; SOFT-FLOAT-64R2-NEXT: or $2, $2, $1
+; SOFT-FLOAT-64R2-NEXT: mfc1 $1, $f0
+; SOFT-FLOAT-64R2-NEXT: dsll $1, $1, 32
+; SOFT-FLOAT-64R2-NEXT: lwc1 $f0, 0($sp)
+; SOFT-FLOAT-64R2-NEXT: mfc1 $3, $f0
+; SOFT-FLOAT-64R2-NEXT: dext $3, $3, 0, 32
+; SOFT-FLOAT-64R2-NEXT: or $3, $3, $1
+; SOFT-FLOAT-64R2-NEXT: ld $16, 16($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT: jr $ra
+; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, 32
+ %result = call { <2 x float>, <2 x float> } @llvm.sincos.v2f32(<2 x float> %a)
+ ret { <2 x float>, <2 x float> } %result
+}
+
+define { <3 x float>, <3 x float> } @test_sincos_v3f32(<3 x float> %a) #0 {
+; MIPSEL-LABEL: test_sincos_v3f32:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -32
+; MIPSEL-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $18, 24($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $17, 20($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $16, 16($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: move $16, $6
+; MIPSEL-NEXT: move $17, $5
+; MIPSEL-NEXT: move $18, $4
+; MIPSEL-NEXT: mtc1 $7, $f12
+; MIPSEL-NEXT: addiu $5, $4, 8
+; MIPSEL-NEXT: jal sincosf
+; MIPSEL-NEXT: addiu $6, $4, 24
+; MIPSEL-NEXT: mtc1 $16, $f12
+; MIPSEL-NEXT: addiu $5, $18, 4
+; MIPSEL-NEXT: jal sincosf
+; MIPSEL-NEXT: addiu $6, $18, 20
+; MIPSEL-NEXT: mtc1 $17, $f12
+; MIPSEL-NEXT: addiu $6, $18, 16
+; MIPSEL-NEXT: jal sincosf
+; MIPSEL-NEXT: move $5, $18
+; MIPSEL-NEXT: lw $16, 16($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $17, 20($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $18, 24($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-32-LABEL: test_sincos_v3f32:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -32
+; SOFT-FLOAT-32-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $18, 24($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $17, 20($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $16, 16($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: move $16, $6
+; SOFT-FLOAT-32-NEXT: move $17, $5
+; SOFT-FLOAT-32-NEXT: move $18, $4
+; SOFT-FLOAT-32-NEXT: mtc1 $7, $f12
+; SOFT-FLOAT-32-NEXT: addiu $5, $4, 8
+; SOFT-FLOAT-32-NEXT: jal sincosf
+; SOFT-FLOAT-32-NEXT: addiu $6, $4, 24
+; SOFT-FLOAT-32-NEXT: mtc1 $16, $f12
+; SOFT-FLOAT-32-NEXT: addiu $5, $18, 4
+; SOFT-FLOAT-32-NEXT: jal sincosf
+; SOFT-FLOAT-32-NEXT: addiu $6, $18, 20
+; SOFT-FLOAT-32-NEXT: mtc1 $17, $f12
+; SOFT-FLOAT-32-NEXT: addiu $6, $18, 16
+; SOFT-FLOAT-32-NEXT: jal sincosf
+; SOFT-FLOAT-32-NEXT: move $5, $18
+; SOFT-FLOAT-32-NEXT: lw $16, 16($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $17, 20($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $18, 24($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 32
+;
+; SOFT-FLOAT-64-LABEL: test_sincos_v3f32:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -48
+; SOFT-FLOAT-64-NEXT: sdc1 $f25, 40($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sdc1 $f24, 32($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $16, 16($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: mov.s $f24, $f15
+; SOFT-FLOAT-64-NEXT: mov.s $f25, $f14
+; SOFT-FLOAT-64-NEXT: mov.s $f12, $f13
+; SOFT-FLOAT-64-NEXT: move $16, $4
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 4
+; SOFT-FLOAT-64-NEXT: jal sincosf
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 0
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 12
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 8
+; SOFT-FLOAT-64-NEXT: jal sincosf
+; SOFT-FLOAT-64-NEXT: mov.s $f12, $f25
+; SOFT-FLOAT-64-NEXT: daddiu $5, $16, 8
+; SOFT-FLOAT-64-NEXT: daddiu $6, $16, 24
+; SOFT-FLOAT-64-NEXT: jal sincosf
+; SOFT-FLOAT-64-NEXT: mov.s $f12, $f24
+; SOFT-FLOAT-64-NEXT: lwc1 $f0, 0($sp)
+; SOFT-FLOAT-64-NEXT: mfc1 $1, $f0
+; SOFT-FLOAT-64-NEXT: lwc1 $f0, 4($sp)
+; SOFT-FLOAT-64-NEXT: mfc1 $2, $f0
+; SOFT-FLOAT-64-NEXT: dsll $1, $1, 32
+; SOFT-FLOAT-64-NEXT: lwc1 $f0, 8($sp)
+; SOFT-FLOAT-64-NEXT: mfc1 $3, $f0
+; SOFT-FLOAT-64-NEXT: dsll $3, $3, 32
+; SOFT-FLOAT-64-NEXT: dsrl $3, $3, 32
+; SOFT-FLOAT-64-NEXT: or $1, $3, $1
+; SOFT-FLOAT-64-NEXT: sd $1, 16($16)
+; SOFT-FLOAT-64-NEXT: dsll $1, $2, 32
+; SOFT-FLOAT-64-NEXT: lwc1 $f0, 12($sp)
+; SOFT-FLOAT-64-NEXT: mfc1 $2, $f0
+; SOFT-FLOAT-64-NEXT: dsll $2, $2, 32
+; SOFT-FLOAT-64-NEXT: dsrl $2, $2, 32
+; SOFT-FLOAT-64-NEXT: or $1, $2, $1
+; SOFT-FLOAT-64-NEXT: sd $1, 0($16)
+; SOFT-FLOAT-64-NEXT: ld $16, 16($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ldc1 $f24, 32($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ldc1 $f25, 40($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 48
+; SOFT-FLOAT-64R2-LABEL: test_sincos_v3f32:
+; SOFT-FLOAT-64R2: # %bb.0:
+; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, -48
+; SOFT-FLOAT-64R2-NEXT: sdc1 $f25, 40($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT: sdc1 $f24, 32($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT: sd $16, 16($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT: mov.s $f24, $f15
+; SOFT-FLOAT-64R2-NEXT: mov.s $f25, $f14
+; SOFT-FLOAT-64R2-NEXT: mov.s $f12, $f13
+; SOFT-FLOAT-64R2-NEXT: move $16, $4
+; SOFT-FLOAT-64R2-NEXT: daddiu $5, $sp, 4
+; SOFT-FLOAT-64R2-NEXT: jal sincosf
+; SOFT-FLOAT-64R2-NEXT: daddiu $6, $sp, 0
+; SOFT-FLOAT-64R2-NEXT: daddiu $5, $sp, 12
+; SOFT-FLOAT-64R2-NEXT: daddiu $6, $sp, 8
+; SOFT-FLOAT-64R2-NEXT: jal sincosf
+; SOFT-FLOAT-64R2-NEXT: mov.s $f12, $f25
+; SOFT-FLOAT-64R2-NEXT: daddiu $5, $16, 8
+; SOFT-FLOAT-64R2-NEXT: daddiu $6, $16, 24
+; SOFT-FLOAT-64R2-NEXT: jal sincosf
+; SOFT-FLOAT-64R2-NEXT: mov.s $f12, $f24
+; SOFT-FLOAT-64R2-NEXT: lwc1 $f0, 0($sp)
+; SOFT-FLOAT-64R2-NEXT: mfc1 $1, $f0
+; SOFT-FLOAT-64R2-NEXT: dsll $1, $1, 32
+; SOFT-FLOAT-64R2-NEXT: lwc1 $f0, 8($sp)
+; SOFT-FLOAT-64R2-NEXT: mfc1 $2, $f0
+; SOFT-FLOAT-64R2-NEXT: dext $2, $2, 0, 32
+; SOFT-FLOAT-64R2-NEXT: or $1, $2, $1
+; SOFT-FLOAT-64R2-NEXT: lwc1 $f0, 4($sp)
+; SOFT-FLOAT-64R2-NEXT: mfc1 $2, $f0
+; SOFT-FLOAT-64R2-NEXT: sd $1, 16($16)
+; SOFT-FLOAT-64R2-NEXT: dsll $1, $2, 32
+; SOFT-FLOAT-64R2-NEXT: lwc1 $f0, 12($sp)
+; SOFT-FLOAT-64R2-NEXT: mfc1 $2, $f0
+; SOFT-FLOAT-64R2-NEXT: dext $2, $2, 0, 32
+; SOFT-FLOAT-64R2-NEXT: or $1, $2, $1
+; SOFT-FLOAT-64R2-NEXT: sd $1, 0($16)
+; SOFT-FLOAT-64R2-NEXT: ld $16, 16($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT: ldc1 $f24, 32($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT: ldc1 $f25, 40($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT: jr $ra
+; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, 48
+ %result = call { <3 x float>, <3 x float> } @llvm.sincos.v3f32(<3 x float> %a)
+ ret { <3 x float>, <3 x float> } %result
+}
+
+define { double, double } @test_sincos_f64(double %a) #0 {
+; MIPSEL-LABEL: test_sincos_f64:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -40
+; MIPSEL-NEXT: sw $ra, 36($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: addiu $6, $sp, 24
+; MIPSEL-NEXT: jal sincos
+; MIPSEL-NEXT: addiu $7, $sp, 16
+; MIPSEL-NEXT: ldc1 $f0, 24($sp)
+; MIPSEL-NEXT: ldc1 $f2, 16($sp)
+; MIPSEL-NEXT: lw $ra, 36($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 40
+;
+; SOFT-FLOAT-32-LABEL: test_sincos_f64:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -40
+; SOFT-FLOAT-32-NEXT: sw $ra, 36($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: addiu $6, $sp, 24
+; SOFT-FLOAT-32-NEXT: jal sincos
+; SOFT-FLOAT-32-NEXT: addiu $7, $sp, 16
+; SOFT-FLOAT-32-NEXT: ldc1 $f0, 24($sp)
+; SOFT-FLOAT-32-NEXT: ldc1 $f2, 16($sp)
+; SOFT-FLOAT-32-NEXT: lw $ra, 36($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 40
+;
+; SOFT-FLOAT-64-LABEL: test_sincos_f64:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -32
+; SOFT-FLOAT-64-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: daddiu $5, $sp, 16
+; SOFT-FLOAT-64-NEXT: jal sincos
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 8
+; SOFT-FLOAT-64-NEXT: ldc1 $f0, 16($sp)
+; SOFT-FLOAT-64-NEXT: ldc1 $f2, 8($sp)
+; SOFT-FLOAT-64-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 32
+ %result = call { double, double } @llvm.sincos.f64(double %a)
+ ret { double, double } %result
+}
+
+define { <2 x double>, <2 x double> } @test_sincos_v2f64(<2 x double> %a) #0 {
+; MIPSEL-LABEL: test_sincos_v2f64:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -40
+; MIPSEL-NEXT: sw $ra, 36($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $16, 32($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: move $16, $4
+; MIPSEL-NEXT: lw $1, 56($sp)
+; MIPSEL-NEXT: lw $2, 60($sp)
+; MIPSEL-NEXT: sw $2, 28($sp)
+; MIPSEL-NEXT: sw $1, 24($sp)
+; MIPSEL-NEXT: sw $7, 20($sp)
+; MIPSEL-NEXT: sw $6, 16($sp)
+; MIPSEL-NEXT: addiu $6, $4, 8
+; MIPSEL-NEXT: addiu $7, $4, 24
+; MIPSEL-NEXT: jal sincos
+; MIPSEL-NEXT: ldc1 $f12, 24($sp)
+; MIPSEL-NEXT: addiu $7, $16, 16
+; MIPSEL-NEXT: ldc1 $f12, 16($sp)
+; MIPSEL-NEXT: jal sincos
+; MIPSEL-NEXT: move $6, $16
+; MIPSEL-NEXT: lw $16, 32($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $ra, 36($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 40
+;
+; SOFT-FLOAT-32-LABEL: test_sincos_v2f64:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -40
+; SOFT-FLOAT-32-NEXT: sw $ra, 36($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $16, 32($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: move $16, $4
+; SOFT-FLOAT-32-NEXT: lw $1, 56($sp)
+; SOFT-FLOAT-32-NEXT: lw $2, 60($sp)
+; SOFT-FLOAT-32-NEXT: sw $2, 28($sp)
+; SOFT-FLOAT-32-NEXT: sw $1, 24($sp)
+; SOFT-FLOAT-32-NEXT: sw $7, 20($sp)
+; SOFT-FLOAT-32-NEXT: sw $6, 16($sp)
+; SOFT-FLOAT-32-NEXT: addiu $6, $4, 8
+; SOFT-FLOAT-32-NEXT: addiu $7, $4, 24
+; SOFT-FLOAT-32-NEXT: jal sincos
+; SOFT-FLOAT-32-NEXT: ldc1 $f12, 24($sp)
+; SOFT-FLOAT-32-NEXT: addiu $7, $16, 16
+; SOFT-FLOAT-32-NEXT: ldc1 $f12, 16($sp)
+; SOFT-FLOAT-32-NEXT: jal sincos
+; SOFT-FLOAT-32-NEXT: move $6, $16
+; SOFT-FLOAT-32-NEXT: lw $16, 32($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $ra, 36($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 40
+;
+; SOFT-FLOAT-64-LABEL: test_sincos_v2f64:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -32
+; SOFT-FLOAT-64-NEXT: sd $ra, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $17, 16($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $16, 8($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: move $16, $5
+; SOFT-FLOAT-64-NEXT: move $17, $4
+; SOFT-FLOAT-64-NEXT: dmtc1 $6, $f12
+; SOFT-FLOAT-64-NEXT: daddiu $5, $4, 8
+; SOFT-FLOAT-64-NEXT: jal sincos
+; SOFT-FLOAT-64-NEXT: daddiu $6, $4, 24
+; SOFT-FLOAT-64-NEXT: dmtc1 $16, $f12
+; SOFT-FLOAT-64-NEXT: daddiu $6, $17, 16
+; SOFT-FLOAT-64-NEXT: jal sincos
+; SOFT-FLOAT-64-NEXT: move $5, $17
+; SOFT-FLOAT-64-NEXT: ld $16, 8($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $17, 16($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $ra, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 32
+; SOFT-FLOAT-32R2-LABEL: test_sincos_v2f64:
+; SOFT-FLOAT-32R2: # %bb.0:
+; SOFT-FLOAT-32R2-NEXT: addiu $sp, $sp, -32
+; SOFT-FLOAT-32R2-NEXT: sw $ra, 28($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32R2-NEXT: sw $18, 24($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32R2-NEXT: sw $17, 20($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32R2-NEXT: sw $16, 16($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32R2-NEXT: move $16, $7
+; SOFT-FLOAT-32R2-NEXT: move $17, $6
+; SOFT-FLOAT-32R2-NEXT: move $18, $4
+; SOFT-FLOAT-32R2-NEXT: lw $1, 48($sp)
+; SOFT-FLOAT-32R2-NEXT: lw $2, 52($sp)
+; SOFT-FLOAT-32R2-NEXT: mtc1 $2, $f12
+; SOFT-FLOAT-32R2-NEXT: mthc1 $1, $f12
+; SOFT-FLOAT-32R2-NEXT: addiu $6, $4, 8
+; SOFT-FLOAT-32R2-NEXT: jal sincos
+; SOFT-FLOAT-32R2-NEXT: addiu $7, $4, 24
+; SOFT-FLOAT-32R2-NEXT: mtc1 $16, $f12
+; SOFT-FLOAT-32R2-NEXT: mthc1 $17, $f12
+; SOFT-FLOAT-32R2-NEXT: addiu $7, $18, 16
+; SOFT-FLOAT-32R2-NEXT: jal sincos
+; SOFT-FLOAT-32R2-NEXT: move $6, $18
+; SOFT-FLOAT-32R2-NEXT: lw $16, 16($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32R2-NEXT: lw $17, 20($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32R2-NEXT: lw $18, 24($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32R2-NEXT: lw $ra, 28($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32R2-NEXT: jr $ra
+; SOFT-FLOAT-32R2-NEXT: addiu $sp, $sp, 32
+ %result = call { <2 x double>, <2 x double> } @llvm.sincos.v2f64(<2 x double> %a)
+ ret { <2 x double>, <2 x double> } %result
+}
+
+define { fp128, fp128 } @test_sincos_f128(fp128 %a) #0 {
+; MIPSEL-LABEL: test_sincos_f128:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -64
+; MIPSEL-NEXT: sw $ra, 60($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $16, 56($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: move $1, $7
+; MIPSEL-NEXT: move $16, $4
+; MIPSEL-NEXT: addiu $2, $sp, 24
+; MIPSEL-NEXT: sw $2, 20($sp)
+; MIPSEL-NEXT: addiu $2, $sp, 40
+; MIPSEL-NEXT: sw $2, 16($sp)
+; MIPSEL-NEXT: lw $7, 80($sp)
+; MIPSEL-NEXT: move $4, $5
+; MIPSEL-NEXT: move $5, $6
+; MIPSEL-NEXT: jal sincosl
+; MIPSEL-NEXT: move $6, $1
+; MIPSEL-NEXT: lw $1, 52($sp)
+; MIPSEL-NEXT: lw $2, 24($sp)
+; MIPSEL-NEXT: lw $3, 28($sp)
+; MIPSEL-NEXT: lw $4, 32($sp)
+; MIPSEL-NEXT: lw $5, 36($sp)
+; MIPSEL-NEXT: sw $5, 28($16)
+; MIPSEL-NEXT: sw $4, 24($16)
+; MIPSEL-NEXT: sw $3, 20($16)
+; MIPSEL-NEXT: sw $2, 16($16)
+; MIPSEL-NEXT: sw $1, 12($16)
+; MIPSEL-NEXT: lw $1, 48($sp)
+; MIPSEL-NEXT: sw $1, 8($16)
+; MIPSEL-NEXT: lw $1, 44($sp)
+; MIPSEL-NEXT: sw $1, 4($16)
+; MIPSEL-NEXT: lw $1, 40($sp)
+; MIPSEL-NEXT: sw $1, 0($16)
+; MIPSEL-NEXT: lw $16, 56($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $ra, 60($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 64
+;
+; SOFT-FLOAT-32-LABEL: test_sincos_f128:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -64
+; SOFT-FLOAT-32-NEXT: sw $ra, 60($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $16, 56($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: move $1, $7
+; SOFT-FLOAT-32-NEXT: move $16, $4
+; SOFT-FLOAT-32-NEXT: addiu $2, $sp, 24
+; SOFT-FLOAT-32-NEXT: sw $2, 20($sp)
+; SOFT-FLOAT-32-NEXT: addiu $2, $sp, 40
+; SOFT-FLOAT-32-NEXT: sw $2, 16($sp)
+; SOFT-FLOAT-32-NEXT: lw $7, 80($sp)
+; SOFT-FLOAT-32-NEXT: move $4, $5
+; SOFT-FLOAT-32-NEXT: move $5, $6
+; SOFT-FLOAT-32-NEXT: jal sincosl
+; SOFT-FLOAT-32-NEXT: move $6, $1
+; SOFT-FLOAT-32-NEXT: lw $1, 52($sp)
+; SOFT-FLOAT-32-NEXT: lw $2, 24($sp)
+; SOFT-FLOAT-32-NEXT: lw $3, 28($sp)
+; SOFT-FLOAT-32-NEXT: lw $4, 32($sp)
+; SOFT-FLOAT-32-NEXT: lw $5, 36($sp)
+; SOFT-FLOAT-32-NEXT: sw $5, 28($16)
+; SOFT-FLOAT-32-NEXT: sw $4, 24($16)
+; SOFT-FLOAT-32-NEXT: sw $3, 20($16)
+; SOFT-FLOAT-32-NEXT: sw $2, 16($16)
+; SOFT-FLOAT-32-NEXT: sw $1, 12($16)
+; SOFT-FLOAT-32-NEXT: lw $1, 48($sp)
+; SOFT-FLOAT-32-NEXT: sw $1, 8($16)
+; SOFT-FLOAT-32-NEXT: lw $1, 44($sp)
+; SOFT-FLOAT-32-NEXT: sw $1, 4($16)
+; SOFT-FLOAT-32-NEXT: lw $1, 40($sp)
+; SOFT-FLOAT-32-NEXT: sw $1, 0($16)
+; SOFT-FLOAT-32-NEXT: lw $16, 56($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $ra, 60($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 64
+;
+; SOFT-FLOAT-64-LABEL: test_sincos_f128:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -48
+; SOFT-FLOAT-64-NEXT: sd $ra, 40($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $16, 32($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: move $16, $4
+; SOFT-FLOAT-64-NEXT: dmfc1 $4, $f13
+; SOFT-FLOAT-64-NEXT: dmfc1 $5, $f14
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 16
+; SOFT-FLOAT-64-NEXT: jal sincosl
+; SOFT-FLOAT-64-NEXT: daddiu $7, $sp, 0
+; SOFT-FLOAT-64-NEXT: ld $1, 8($sp)
+; SOFT-FLOAT-64-NEXT: sd $1, 24($16)
+; SOFT-FLOAT-64-NEXT: ld $1, 0($sp)
+; SOFT-FLOAT-64-NEXT: sd $1, 16($16)
+; SOFT-FLOAT-64-NEXT: ld $1, 24($sp)
+; SOFT-FLOAT-64-NEXT: sd $1, 8($16)
+; SOFT-FLOAT-64-NEXT: ld $1, 16($sp)
+; SOFT-FLOAT-64-NEXT: sd $1, 0($16)
+; SOFT-FLOAT-64-NEXT: ld $16, 32($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $ra, 40($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 48
+ %result = call { fp128, fp128 } @llvm.sincos.f128(fp128 %a)
+ ret { fp128, fp128 } %result
+}
+
+define { <2 x fp128>, <2 x fp128> } @test_sincos_v2f128(<2 x fp128> %a) #0 {
+; MIPSEL-LABEL: test_sincos_v2f128:
+; MIPSEL: # %bb.0:
+; MIPSEL-NEXT: addiu $sp, $sp, -96
+; MIPSEL-NEXT: sw $ra, 92($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: sw $16, 88($sp) # 4-byte Folded Spill
+; MIPSEL-NEXT: move $5, $7
+; MIPSEL-NEXT: move $1, $6
+; MIPSEL-NEXT: move $16, $4
+; MIPSEL-NEXT: addiu $2, $sp, 24
+; MIPSEL-NEXT: sw $2, 20($sp)
+; MIPSEL-NEXT: addiu $2, $sp, 40
+; MIPSEL-NEXT: sw $2, 16($sp)
+; MIPSEL-NEXT: lw $6, 112($sp)
+; MIPSEL-NEXT: lw $7, 116($sp)
+; MIPSEL-NEXT: jal sincosl
+; MIPSEL-NEXT: move $4, $1
+; MIPSEL-NEXT: addiu $1, $sp, 56
+; MIPSEL-NEXT: sw $1, 20($sp)
+; MIPSEL-NEXT: addiu $1, $sp, 72
+; MIPSEL-NEXT: sw $1, 16($sp)
+; MIPSEL-NEXT: lw $4, 120($sp)
+; MIPSEL-NEXT: lw $5, 124($sp)
+; MIPSEL-NEXT: lw $6, 128($sp)
+; MIPSEL-NEXT: lw $7, 132($sp)
+; MIPSEL-NEXT: jal sincosl
+; MIPSEL-NEXT: nop
+; MIPSEL-NEXT: lw $1, 36($sp)
+; MIPSEL-NEXT: lw $2, 56($sp)
+; MIPSEL-NEXT: lw $3, 60($sp)
+; MIPSEL-NEXT: lw $4, 64($sp)
+; MIPSEL-NEXT: lw $5, 52($sp)
+; MIPSEL-NEXT: lw $6, 72($sp)
+; MIPSEL-NEXT: lw $7, 76($sp)
+; MIPSEL-NEXT: lw $8, 80($sp)
+; MIPSEL-NEXT: lw $9, 84($sp)
+; MIPSEL-NEXT: lw $10, 24($sp)
+; MIPSEL-NEXT: lw $11, 28($sp)
+; MIPSEL-NEXT: lw $12, 32($sp)
+; MIPSEL-NEXT: lw $13, 68($sp)
+; MIPSEL-NEXT: sw $13, 60($16)
+; MIPSEL-NEXT: sw $4, 56($16)
+; MIPSEL-NEXT: sw $3, 52($16)
+; MIPSEL-NEXT: sw $2, 48($16)
+; MIPSEL-NEXT: sw $1, 44($16)
+; MIPSEL-NEXT: sw $12, 40($16)
+; MIPSEL-NEXT: sw $11, 36($16)
+; MIPSEL-NEXT: sw $10, 32($16)
+; MIPSEL-NEXT: sw $9, 28($16)
+; MIPSEL-NEXT: sw $8, 24($16)
+; MIPSEL-NEXT: sw $7, 20($16)
+; MIPSEL-NEXT: sw $6, 16($16)
+; MIPSEL-NEXT: sw $5, 12($16)
+; MIPSEL-NEXT: lw $1, 48($sp)
+; MIPSEL-NEXT: sw $1, 8($16)
+; MIPSEL-NEXT: lw $1, 44($sp)
+; MIPSEL-NEXT: sw $1, 4($16)
+; MIPSEL-NEXT: lw $1, 40($sp)
+; MIPSEL-NEXT: sw $1, 0($16)
+; MIPSEL-NEXT: lw $16, 88($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: lw $ra, 92($sp) # 4-byte Folded Reload
+; MIPSEL-NEXT: jr $ra
+; MIPSEL-NEXT: addiu $sp, $sp, 96
+;
+; SOFT-FLOAT-32-LABEL: test_sincos_v2f128:
+; SOFT-FLOAT-32: # %bb.0:
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -96
+; SOFT-FLOAT-32-NEXT: sw $ra, 92($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: sw $16, 88($sp) # 4-byte Folded Spill
+; SOFT-FLOAT-32-NEXT: move $5, $7
+; SOFT-FLOAT-32-NEXT: move $1, $6
+; SOFT-FLOAT-32-NEXT: move $16, $4
+; SOFT-FLOAT-32-NEXT: addiu $2, $sp, 24
+; SOFT-FLOAT-32-NEXT: sw $2, 20($sp)
+; SOFT-FLOAT-32-NEXT: addiu $2, $sp, 40
+; SOFT-FLOAT-32-NEXT: sw $2, 16($sp)
+; SOFT-FLOAT-32-NEXT: lw $6, 112($sp)
+; SOFT-FLOAT-32-NEXT: lw $7, 116($sp)
+; SOFT-FLOAT-32-NEXT: jal sincosl
+; SOFT-FLOAT-32-NEXT: move $4, $1
+; SOFT-FLOAT-32-NEXT: addiu $1, $sp, 56
+; SOFT-FLOAT-32-NEXT: sw $1, 20($sp)
+; SOFT-FLOAT-32-NEXT: addiu $1, $sp, 72
+; SOFT-FLOAT-32-NEXT: sw $1, 16($sp)
+; SOFT-FLOAT-32-NEXT: lw $4, 120($sp)
+; SOFT-FLOAT-32-NEXT: lw $5, 124($sp)
+; SOFT-FLOAT-32-NEXT: lw $6, 128($sp)
+; SOFT-FLOAT-32-NEXT: lw $7, 132($sp)
+; SOFT-FLOAT-32-NEXT: jal sincosl
+; SOFT-FLOAT-32-NEXT: nop
+; SOFT-FLOAT-32-NEXT: lw $1, 36($sp)
+; SOFT-FLOAT-32-NEXT: lw $2, 56($sp)
+; SOFT-FLOAT-32-NEXT: lw $3, 60($sp)
+; SOFT-FLOAT-32-NEXT: lw $4, 64($sp)
+; SOFT-FLOAT-32-NEXT: lw $5, 52($sp)
+; SOFT-FLOAT-32-NEXT: lw $6, 72($sp)
+; SOFT-FLOAT-32-NEXT: lw $7, 76($sp)
+; SOFT-FLOAT-32-NEXT: lw $8, 80($sp)
+; SOFT-FLOAT-32-NEXT: lw $9, 84($sp)
+; SOFT-FLOAT-32-NEXT: lw $10, 24($sp)
+; SOFT-FLOAT-32-NEXT: lw $11, 28($sp)
+; SOFT-FLOAT-32-NEXT: lw $12, 32($sp)
+; SOFT-FLOAT-32-NEXT: lw $13, 68($sp)
+; SOFT-FLOAT-32-NEXT: sw $13, 60($16)
+; SOFT-FLOAT-32-NEXT: sw $4, 56($16)
+; SOFT-FLOAT-32-NEXT: sw $3, 52($16)
+; SOFT-FLOAT-32-NEXT: sw $2, 48($16)
+; SOFT-FLOAT-32-NEXT: sw $1, 44($16)
+; SOFT-FLOAT-32-NEXT: sw $12, 40($16)
+; SOFT-FLOAT-32-NEXT: sw $11, 36($16)
+; SOFT-FLOAT-32-NEXT: sw $10, 32($16)
+; SOFT-FLOAT-32-NEXT: sw $9, 28($16)
+; SOFT-FLOAT-32-NEXT: sw $8, 24($16)
+; SOFT-FLOAT-32-NEXT: sw $7, 20($16)
+; SOFT-FLOAT-32-NEXT: sw $6, 16($16)
+; SOFT-FLOAT-32-NEXT: sw $5, 12($16)
+; SOFT-FLOAT-32-NEXT: lw $1, 48($sp)
+; SOFT-FLOAT-32-NEXT: sw $1, 8($16)
+; SOFT-FLOAT-32-NEXT: lw $1, 44($sp)
+; SOFT-FLOAT-32-NEXT: sw $1, 4($16)
+; SOFT-FLOAT-32-NEXT: lw $1, 40($sp)
+; SOFT-FLOAT-32-NEXT: sw $1, 0($16)
+; SOFT-FLOAT-32-NEXT: lw $16, 88($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: lw $ra, 92($sp) # 4-byte Folded Reload
+; SOFT-FLOAT-32-NEXT: jr $ra
+; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 96
+;
+; SOFT-FLOAT-64-LABEL: test_sincos_v2f128:
+; SOFT-FLOAT-64: # %bb.0:
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -96
+; SOFT-FLOAT-64-NEXT: sd $ra, 88($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $18, 80($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $17, 72($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: sd $16, 64($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT: move $1, $7
+; SOFT-FLOAT-64-NEXT: move $16, $6
+; SOFT-FLOAT-64-NEXT: move $17, $5
+; SOFT-FLOAT-64-NEXT: move $18, $4
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 48
+; SOFT-FLOAT-64-NEXT: daddiu $7, $sp, 32
+; SOFT-FLOAT-64-NEXT: move $4, $1
+; SOFT-FLOAT-64-NEXT: jal sincosl
+; SOFT-FLOAT-64-NEXT: move $5, $8
+; SOFT-FLOAT-64-NEXT: daddiu $6, $sp, 16
+; SOFT-FLOAT-64-NEXT: daddiu $7, $sp, 0
+; SOFT-FLOAT-64-NEXT: move $4, $17
+; SOFT-FLOAT-64-NEXT: jal sincosl
+; SOFT-FLOAT-64-NEXT: move $5, $16
+; SOFT-FLOAT-64-NEXT: ld $1, 56($sp)
+; SOFT-FLOAT-64-NEXT: ld $2, 0($sp)
+; SOFT-FLOAT-64-NEXT: ld $3, 8($sp)
+; SOFT-FLOAT-64-NEXT: ld $4, 32($sp)
+; SOFT-FLOAT-64-NEXT: ld $5, 40($sp)
+; SOFT-FLOAT-64-NEXT: sd $5, 56($18)
+; SOFT-FLOAT-64-NEXT: sd $4, 48($18)
+; SOFT-FLOAT-64-NEXT: sd $3, 40($18)
+; SOFT-FLOAT-64-NEXT: sd $2, 32($18)
+; SOFT-FLOAT-64-NEXT: sd $1, 24($18)
+; SOFT-FLOAT-64-NEXT: ld $1, 48($sp)
+; SOFT-FLOAT-64-NEXT: sd $1, 16($18)
+; SOFT-FLOAT-64-NEXT: ld $1, 24($sp)
+; SOFT-FLOAT-64-NEXT: sd $1, 8($18)
+; SOFT-FLOAT-64-NEXT: ld $1, 16($sp)
+; SOFT-FLOAT-64-NEXT: sd $1, 0($18)
+; SOFT-FLOAT-64-NEXT: ld $16, 64($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $17, 72($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $18, 80($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: ld $ra, 88($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT: jr $ra
+; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 96
+ %result = call { <2 x fp128>, <2 x fp128> } @llvm.sincos.v2f128(<2 x fp128> %a)
+ ret { <2 x fp128>, <2 x fp128> } %result
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/Mips/nan_lowering.ll b/llvm/test/CodeGen/Mips/nan_lowering.ll
new file mode 100644
index 0000000..2a11278
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/nan_lowering.ll
@@ -0,0 +1,25 @@
+; RUN: llc -mtriple=mips-linux-gnu -mattr=-nan2008 < %s | FileCheck %s
+; RUN: llc -mtriple=mips-linux-gnu -mattr=+nan2008 < %s | FileCheck %s
+
+; Make sure that lowering does not corrupt the value of NaN values,
+; regardless of what the NaN mode is.
+
+define float @test1() {
+; CHECK: .4byte 0x7fc00000
+ ret float bitcast (i32 u0x7fc00000 to float)
+}
+
+define float @test2() {
+; CHECK: .4byte 0x7fc00001
+ ret float bitcast (i32 u0x7fc00001 to float)
+}
+
+define float @test3() {
+; CHECK: .4byte 0x7f800000
+ ret float bitcast (i32 u0x7f800000 to float)
+}
+
+define float @test4() {
+; CHECK: .4byte 0x7f800001
+ ret float bitcast (i32 u0x7f800001 to float)
+}
diff --git a/llvm/test/CodeGen/Mips/qnan.ll b/llvm/test/CodeGen/Mips/qnan.ll
deleted file mode 100644
index e5b4aa1..0000000
--- a/llvm/test/CodeGen/Mips/qnan.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc -O3 -mcpu=mips32r2 -mtriple=mips-linux-gnu < %s -o - | FileCheck %s -check-prefixes=MIPS_Legacy
-; RUN: llc -O3 -mcpu=mips32r2 -mtriple=mips-linux-gnu -mattr=+nan2008 < %s -o - | FileCheck %s -check-prefixes=MIPS_NaN2008
-
-define dso_local float @nan(float noundef %a, float noundef %b) local_unnamed_addr #0 {
-; MIPS_Legacy: $CPI0_0:
-; MIPS_Legacy-NEXT: .4byte 0x7fa00000 # float NaN
-
-; MIPS_NaN2008: $CPI0_0:
-; MIPS_NaN2008-NEXT: .4byte 0x7fc00000 # float NaN
-
-entry:
- %0 = tail call float @llvm.minimum.f32(float %a, float %b)
- ret float %0
-}