aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/X86/regalloc-fp.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/regalloc-fp.ll')
-rw-r--r--llvm/test/CodeGen/X86/regalloc-fp.ll775
1 files changed, 775 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/regalloc-fp.ll b/llvm/test/CodeGen/X86/regalloc-fp.ll
new file mode 100644
index 0000000..e89e5ab1
--- /dev/null
+++ b/llvm/test/CodeGen/X86/regalloc-fp.ll
@@ -0,0 +1,775 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Context:
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+define i32 @check_none() "frame-pointer"="none" {
+; CHECK-LABEL: check_none:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: .cfi_def_cfa_offset 24
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: pushq %r13
+; CHECK-NEXT: .cfi_def_cfa_offset 40
+; CHECK-NEXT: pushq %r12
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 56
+; CHECK-NEXT: .cfi_offset %rbx, -56
+; CHECK-NEXT: .cfi_offset %r12, -48
+; CHECK-NEXT: .cfi_offset %r13, -40
+; CHECK-NEXT: .cfi_offset %r14, -32
+; CHECK-NEXT: .cfi_offset %r15, -24
+; CHECK-NEXT: .cfi_offset %rbp, -16
+; CHECK-NEXT: movl $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $1, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $2, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $3, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $4, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $5, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $6, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $7, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $8, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $9, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $16, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $17, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $18, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $19, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $20, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %edx
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %esi
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %edi
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r8d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r9d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r10d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r11d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ebx
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ebp
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r14d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r15d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r12d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r13d
+; CHECK-NEXT: #APP
+; CHECK-NEXT: nop
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %edx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %esi, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %edi, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r8d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r9d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r10d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r11d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %ebx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %ebp, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r14d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r15d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r12d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r13d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: popq %r12
+; CHECK-NEXT: .cfi_def_cfa_offset 40
+; CHECK-NEXT: popq %r13
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: .cfi_def_cfa_offset 24
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+entry:
+ %reg0 = alloca i32, align 4
+ %reg1 = alloca i32, align 4
+ %reg2 = alloca i32, align 4
+ %reg3 = alloca i32, align 4
+ %reg4 = alloca i32, align 4
+ %reg5 = alloca i32, align 4
+ %reg6 = alloca i32, align 4
+ %reg7 = alloca i32, align 4
+ %reg8 = alloca i32, align 4
+ %reg9 = alloca i32, align 4
+ %reg10 = alloca i32, align 4
+ %reg11 = alloca i32, align 4
+ %reg12 = alloca i32, align 4
+ %reg13 = alloca i32, align 4
+ %reg14 = alloca i32, align 4
+ store volatile i32 0, ptr %reg0, align 4
+ store volatile i32 1, ptr %reg1, align 4
+ store volatile i32 2, ptr %reg2, align 4
+ store volatile i32 3, ptr %reg3, align 4
+ store volatile i32 4, ptr %reg4, align 4
+ store volatile i32 5, ptr %reg5, align 4
+ store volatile i32 6, ptr %reg6, align 4
+ store volatile i32 7, ptr %reg7, align 4
+ store volatile i32 8, ptr %reg8, align 4
+ store volatile i32 9, ptr %reg9, align 4
+ store volatile i32 16, ptr %reg10, align 4
+ store volatile i32 17, ptr %reg11, align 4
+ store volatile i32 18, ptr %reg12, align 4
+ store volatile i32 19, ptr %reg13, align 4
+ store volatile i32 20, ptr %reg14, align 4
+ %0 = load volatile i32, ptr %reg0, align 4
+ %1 = load volatile i32, ptr %reg1, align 4
+ %2 = load volatile i32, ptr %reg2, align 4
+ %3 = load volatile i32, ptr %reg3, align 4
+ %4 = load volatile i32, ptr %reg4, align 4
+ %5 = load volatile i32, ptr %reg5, align 4
+ %6 = load volatile i32, ptr %reg6, align 4
+ %7 = load volatile i32, ptr %reg7, align 4
+ %8 = load volatile i32, ptr %reg8, align 4
+ %9 = load volatile i32, ptr %reg9, align 4
+ %10 = load volatile i32, ptr %reg10, align 4
+ %11 = load volatile i32, ptr %reg11, align 4
+ %12 = load volatile i32, ptr %reg12, align 4
+ %13 = load volatile i32, ptr %reg13, align 4
+ %14 = load volatile i32, ptr %reg14, align 4
+ %15 = call { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } asm "nop", "=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, i32 %14) #1
+ %asmresult = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 0
+ %asmresult1 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 1
+ %asmresult2 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 2
+ %asmresult3 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 3
+ %asmresult4 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 4
+ %asmresult5 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 5
+ %asmresult6 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 6
+ %asmresult7 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 7
+ %asmresult8 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 8
+ %asmresult9 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 9
+ %asmresult10 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 10
+ %asmresult11 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 11
+ %asmresult12 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 12
+ %asmresult13 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 13
+ %asmresult14 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 14
+ store volatile i32 %asmresult, ptr %reg0, align 4
+ store volatile i32 %asmresult1, ptr %reg1, align 4
+ store volatile i32 %asmresult2, ptr %reg2, align 4
+ store volatile i32 %asmresult3, ptr %reg3, align 4
+ store volatile i32 %asmresult4, ptr %reg4, align 4
+ store volatile i32 %asmresult5, ptr %reg5, align 4
+ store volatile i32 %asmresult6, ptr %reg6, align 4
+ store volatile i32 %asmresult7, ptr %reg7, align 4
+ store volatile i32 %asmresult8, ptr %reg8, align 4
+ store volatile i32 %asmresult9, ptr %reg9, align 4
+ store volatile i32 %asmresult10, ptr %reg10, align 4
+ store volatile i32 %asmresult11, ptr %reg11, align 4
+ store volatile i32 %asmresult12, ptr %reg12, align 4
+ store volatile i32 %asmresult13, ptr %reg13, align 4
+ store volatile i32 %asmresult14, ptr %reg14, align 4
+ ret i32 0
+}
+
+define i32 @test_non_leaf_no_reserve() "frame-pointer"="non-leaf-no-reserve" {
+; CHECK-LABEL: test_non_leaf_no_reserve:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: .cfi_def_cfa_offset 24
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: pushq %r13
+; CHECK-NEXT: .cfi_def_cfa_offset 40
+; CHECK-NEXT: pushq %r12
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 56
+; CHECK-NEXT: .cfi_offset %rbx, -56
+; CHECK-NEXT: .cfi_offset %r12, -48
+; CHECK-NEXT: .cfi_offset %r13, -40
+; CHECK-NEXT: .cfi_offset %r14, -32
+; CHECK-NEXT: .cfi_offset %r15, -24
+; CHECK-NEXT: .cfi_offset %rbp, -16
+; CHECK-NEXT: movl $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $1, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $2, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $3, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $4, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $5, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $6, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $7, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $8, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $9, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $16, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $17, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $18, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $19, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $20, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %edx
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %esi
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %edi
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r8d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r9d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r10d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r11d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ebx
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ebp
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r14d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r15d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r12d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r13d
+; CHECK-NEXT: #APP
+; CHECK-NEXT: nop
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %edx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %esi, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %edi, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r8d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r9d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r10d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r11d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %ebx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %ebp, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r14d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r15d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r12d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r13d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: popq %r12
+; CHECK-NEXT: .cfi_def_cfa_offset 40
+; CHECK-NEXT: popq %r13
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: .cfi_def_cfa_offset 24
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+entry:
+ %reg0 = alloca i32, align 4
+ %reg1 = alloca i32, align 4
+ %reg2 = alloca i32, align 4
+ %reg3 = alloca i32, align 4
+ %reg4 = alloca i32, align 4
+ %reg5 = alloca i32, align 4
+ %reg6 = alloca i32, align 4
+ %reg7 = alloca i32, align 4
+ %reg8 = alloca i32, align 4
+ %reg9 = alloca i32, align 4
+ %reg10 = alloca i32, align 4
+ %reg11 = alloca i32, align 4
+ %reg12 = alloca i32, align 4
+ %reg13 = alloca i32, align 4
+ %reg14 = alloca i32, align 4
+ store volatile i32 0, ptr %reg0, align 4
+ store volatile i32 1, ptr %reg1, align 4
+ store volatile i32 2, ptr %reg2, align 4
+ store volatile i32 3, ptr %reg3, align 4
+ store volatile i32 4, ptr %reg4, align 4
+ store volatile i32 5, ptr %reg5, align 4
+ store volatile i32 6, ptr %reg6, align 4
+ store volatile i32 7, ptr %reg7, align 4
+ store volatile i32 8, ptr %reg8, align 4
+ store volatile i32 9, ptr %reg9, align 4
+ store volatile i32 16, ptr %reg10, align 4
+ store volatile i32 17, ptr %reg11, align 4
+ store volatile i32 18, ptr %reg12, align 4
+ store volatile i32 19, ptr %reg13, align 4
+ store volatile i32 20, ptr %reg14, align 4
+ %0 = load volatile i32, ptr %reg0, align 4
+ %1 = load volatile i32, ptr %reg1, align 4
+ %2 = load volatile i32, ptr %reg2, align 4
+ %3 = load volatile i32, ptr %reg3, align 4
+ %4 = load volatile i32, ptr %reg4, align 4
+ %5 = load volatile i32, ptr %reg5, align 4
+ %6 = load volatile i32, ptr %reg6, align 4
+ %7 = load volatile i32, ptr %reg7, align 4
+ %8 = load volatile i32, ptr %reg8, align 4
+ %9 = load volatile i32, ptr %reg9, align 4
+ %10 = load volatile i32, ptr %reg10, align 4
+ %11 = load volatile i32, ptr %reg11, align 4
+ %12 = load volatile i32, ptr %reg12, align 4
+ %13 = load volatile i32, ptr %reg13, align 4
+ %14 = load volatile i32, ptr %reg14, align 4
+ %15 = call { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } asm "nop", "=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, i32 %14) #1
+ %asmresult = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 0
+ %asmresult1 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 1
+ %asmresult2 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 2
+ %asmresult3 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 3
+ %asmresult4 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 4
+ %asmresult5 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 5
+ %asmresult6 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 6
+ %asmresult7 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 7
+ %asmresult8 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 8
+ %asmresult9 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 9
+ %asmresult10 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 10
+ %asmresult11 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 11
+ %asmresult12 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 12
+ %asmresult13 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 13
+ %asmresult14 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %15, 14
+ store volatile i32 %asmresult, ptr %reg0, align 4
+ store volatile i32 %asmresult1, ptr %reg1, align 4
+ store volatile i32 %asmresult2, ptr %reg2, align 4
+ store volatile i32 %asmresult3, ptr %reg3, align 4
+ store volatile i32 %asmresult4, ptr %reg4, align 4
+ store volatile i32 %asmresult5, ptr %reg5, align 4
+ store volatile i32 %asmresult6, ptr %reg6, align 4
+ store volatile i32 %asmresult7, ptr %reg7, align 4
+ store volatile i32 %asmresult8, ptr %reg8, align 4
+ store volatile i32 %asmresult9, ptr %reg9, align 4
+ store volatile i32 %asmresult10, ptr %reg10, align 4
+ store volatile i32 %asmresult11, ptr %reg11, align 4
+ store volatile i32 %asmresult12, ptr %reg12, align 4
+ store volatile i32 %asmresult13, ptr %reg13, align 4
+ store volatile i32 %asmresult14, ptr %reg14, align 4
+ ret i32 0
+}
+
+define i32 @test_non_leaf() "frame-pointer"="non-leaf" {
+; CHECK-LABEL: test_non_leaf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: .cfi_def_cfa_offset 24
+; CHECK-NEXT: pushq %r13
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: pushq %r12
+; CHECK-NEXT: .cfi_def_cfa_offset 40
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset %rbx, -48
+; CHECK-NEXT: .cfi_offset %r12, -40
+; CHECK-NEXT: .cfi_offset %r13, -32
+; CHECK-NEXT: .cfi_offset %r14, -24
+; CHECK-NEXT: .cfi_offset %r15, -16
+; CHECK-NEXT: movl $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $1, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $2, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $3, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $4, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $5, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $6, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $7, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $8, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $9, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $16, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $17, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $18, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $19, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %edx
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %esi
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %edi
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r8d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r9d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r10d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r11d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ebx
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r14d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r15d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r12d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r13d
+; CHECK-NEXT: #APP
+; CHECK-NEXT: nop
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %edx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %esi, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %edi, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r8d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r9d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r10d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r11d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %ebx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r14d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r15d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r12d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r13d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 40
+; CHECK-NEXT: popq %r12
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: popq %r13
+; CHECK-NEXT: .cfi_def_cfa_offset 24
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+entry:
+ %reg0 = alloca i32, align 4
+ %reg1 = alloca i32, align 4
+ %reg2 = alloca i32, align 4
+ %reg3 = alloca i32, align 4
+ %reg4 = alloca i32, align 4
+ %reg5 = alloca i32, align 4
+ %reg6 = alloca i32, align 4
+ %reg7 = alloca i32, align 4
+ %reg8 = alloca i32, align 4
+ %reg9 = alloca i32, align 4
+ %reg10 = alloca i32, align 4
+ %reg11 = alloca i32, align 4
+ %reg12 = alloca i32, align 4
+ %reg13 = alloca i32, align 4
+ store volatile i32 0, ptr %reg0, align 4
+ store volatile i32 1, ptr %reg1, align 4
+ store volatile i32 2, ptr %reg2, align 4
+ store volatile i32 3, ptr %reg3, align 4
+ store volatile i32 4, ptr %reg4, align 4
+ store volatile i32 5, ptr %reg5, align 4
+ store volatile i32 6, ptr %reg6, align 4
+ store volatile i32 7, ptr %reg7, align 4
+ store volatile i32 8, ptr %reg8, align 4
+ store volatile i32 9, ptr %reg9, align 4
+ store volatile i32 16, ptr %reg10, align 4
+ store volatile i32 17, ptr %reg11, align 4
+ store volatile i32 18, ptr %reg12, align 4
+ store volatile i32 19, ptr %reg13, align 4
+ %0 = load volatile i32, ptr %reg0, align 4
+ %1 = load volatile i32, ptr %reg1, align 4
+ %2 = load volatile i32, ptr %reg2, align 4
+ %3 = load volatile i32, ptr %reg3, align 4
+ %4 = load volatile i32, ptr %reg4, align 4
+ %5 = load volatile i32, ptr %reg5, align 4
+ %6 = load volatile i32, ptr %reg6, align 4
+ %7 = load volatile i32, ptr %reg7, align 4
+ %8 = load volatile i32, ptr %reg8, align 4
+ %9 = load volatile i32, ptr %reg9, align 4
+ %10 = load volatile i32, ptr %reg10, align 4
+ %11 = load volatile i32, ptr %reg11, align 4
+ %12 = load volatile i32, ptr %reg12, align 4
+ %13 = load volatile i32, ptr %reg13, align 4
+ %14 = call { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } asm "nop", "=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,8,9,10,11,12,13,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13) #1
+ %asmresult = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 0
+ %asmresult1 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 1
+ %asmresult2 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 2
+ %asmresult3 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 3
+ %asmresult4 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 4
+ %asmresult5 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 5
+ %asmresult6 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 6
+ %asmresult7 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 7
+ %asmresult8 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 8
+ %asmresult9 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 9
+ %asmresult10 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 10
+ %asmresult11 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 11
+ %asmresult12 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 12
+ %asmresult13 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 13
+ store volatile i32 %asmresult, ptr %reg0, align 4
+ store volatile i32 %asmresult1, ptr %reg1, align 4
+ store volatile i32 %asmresult2, ptr %reg2, align 4
+ store volatile i32 %asmresult3, ptr %reg3, align 4
+ store volatile i32 %asmresult4, ptr %reg4, align 4
+ store volatile i32 %asmresult5, ptr %reg5, align 4
+ store volatile i32 %asmresult6, ptr %reg6, align 4
+ store volatile i32 %asmresult7, ptr %reg7, align 4
+ store volatile i32 %asmresult8, ptr %reg8, align 4
+ store volatile i32 %asmresult9, ptr %reg9, align 4
+ store volatile i32 %asmresult10, ptr %reg10, align 4
+ store volatile i32 %asmresult11, ptr %reg11, align 4
+ store volatile i32 %asmresult12, ptr %reg12, align 4
+ store volatile i32 %asmresult13, ptr %reg13, align 4
+ ret i32 0
+}
+
+define i32 @test_reserved() "frame-pointer"="reserved" {
+; CHECK-LABEL: test_reserved:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: .cfi_def_cfa_offset 24
+; CHECK-NEXT: pushq %r13
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: pushq %r12
+; CHECK-NEXT: .cfi_def_cfa_offset 40
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: .cfi_offset %rbx, -48
+; CHECK-NEXT: .cfi_offset %r12, -40
+; CHECK-NEXT: .cfi_offset %r13, -32
+; CHECK-NEXT: .cfi_offset %r14, -24
+; CHECK-NEXT: .cfi_offset %r15, -16
+; CHECK-NEXT: movl $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $1, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $2, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $3, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $4, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $5, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $6, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $7, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $8, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $9, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $16, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $17, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $18, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl $19, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %edx
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %esi
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %edi
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r8d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r9d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r10d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r11d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %ebx
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r14d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r15d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r12d
+; CHECK-NEXT: movl -{{[0-9]+}}(%rsp), %r13d
+; CHECK-NEXT: #APP
+; CHECK-NEXT: nop
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %edx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %esi, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %edi, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r8d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r9d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r10d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r11d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %ebx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r14d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r15d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r12d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movl %r13d, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 40
+; CHECK-NEXT: popq %r12
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: popq %r13
+; CHECK-NEXT: .cfi_def_cfa_offset 24
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+entry:
+ %reg0 = alloca i32, align 4
+ %reg1 = alloca i32, align 4
+ %reg2 = alloca i32, align 4
+ %reg3 = alloca i32, align 4
+ %reg4 = alloca i32, align 4
+ %reg5 = alloca i32, align 4
+ %reg6 = alloca i32, align 4
+ %reg7 = alloca i32, align 4
+ %reg8 = alloca i32, align 4
+ %reg9 = alloca i32, align 4
+ %reg10 = alloca i32, align 4
+ %reg11 = alloca i32, align 4
+ %reg12 = alloca i32, align 4
+ %reg13 = alloca i32, align 4
+ store volatile i32 0, ptr %reg0, align 4
+ store volatile i32 1, ptr %reg1, align 4
+ store volatile i32 2, ptr %reg2, align 4
+ store volatile i32 3, ptr %reg3, align 4
+ store volatile i32 4, ptr %reg4, align 4
+ store volatile i32 5, ptr %reg5, align 4
+ store volatile i32 6, ptr %reg6, align 4
+ store volatile i32 7, ptr %reg7, align 4
+ store volatile i32 8, ptr %reg8, align 4
+ store volatile i32 9, ptr %reg9, align 4
+ store volatile i32 16, ptr %reg10, align 4
+ store volatile i32 17, ptr %reg11, align 4
+ store volatile i32 18, ptr %reg12, align 4
+ store volatile i32 19, ptr %reg13, align 4
+ %0 = load volatile i32, ptr %reg0, align 4
+ %1 = load volatile i32, ptr %reg1, align 4
+ %2 = load volatile i32, ptr %reg2, align 4
+ %3 = load volatile i32, ptr %reg3, align 4
+ %4 = load volatile i32, ptr %reg4, align 4
+ %5 = load volatile i32, ptr %reg5, align 4
+ %6 = load volatile i32, ptr %reg6, align 4
+ %7 = load volatile i32, ptr %reg7, align 4
+ %8 = load volatile i32, ptr %reg8, align 4
+ %9 = load volatile i32, ptr %reg9, align 4
+ %10 = load volatile i32, ptr %reg10, align 4
+ %11 = load volatile i32, ptr %reg11, align 4
+ %12 = load volatile i32, ptr %reg12, align 4
+ %13 = load volatile i32, ptr %reg13, align 4
+ %14 = call { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } asm "nop", "=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,8,9,10,11,12,13,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13) #1
+ %asmresult = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 0
+ %asmresult1 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 1
+ %asmresult2 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 2
+ %asmresult3 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 3
+ %asmresult4 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 4
+ %asmresult5 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 5
+ %asmresult6 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 6
+ %asmresult7 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 7
+ %asmresult8 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 8
+ %asmresult9 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 9
+ %asmresult10 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 10
+ %asmresult11 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 11
+ %asmresult12 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 12
+ %asmresult13 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 13
+ store volatile i32 %asmresult, ptr %reg0, align 4
+ store volatile i32 %asmresult1, ptr %reg1, align 4
+ store volatile i32 %asmresult2, ptr %reg2, align 4
+ store volatile i32 %asmresult3, ptr %reg3, align 4
+ store volatile i32 %asmresult4, ptr %reg4, align 4
+ store volatile i32 %asmresult5, ptr %reg5, align 4
+ store volatile i32 %asmresult6, ptr %reg6, align 4
+ store volatile i32 %asmresult7, ptr %reg7, align 4
+ store volatile i32 %asmresult8, ptr %reg8, align 4
+ store volatile i32 %asmresult9, ptr %reg9, align 4
+ store volatile i32 %asmresult10, ptr %reg10, align 4
+ store volatile i32 %asmresult11, ptr %reg11, align 4
+ store volatile i32 %asmresult12, ptr %reg12, align 4
+ store volatile i32 %asmresult13, ptr %reg13, align 4
+ ret i32 0
+}
+
+define i32 @test_all() "frame-pointer"="all" {
+; CHECK-LABEL: test_all:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbp, -16
+; CHECK-NEXT: movq %rsp, %rbp
+; CHECK-NEXT: .cfi_def_cfa_register %rbp
+; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: pushq %r13
+; CHECK-NEXT: pushq %r12
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_offset %rbx, -56
+; CHECK-NEXT: .cfi_offset %r12, -48
+; CHECK-NEXT: .cfi_offset %r13, -40
+; CHECK-NEXT: .cfi_offset %r14, -32
+; CHECK-NEXT: .cfi_offset %r15, -24
+; CHECK-NEXT: movl $0, -96(%rbp)
+; CHECK-NEXT: movl $1, -92(%rbp)
+; CHECK-NEXT: movl $2, -88(%rbp)
+; CHECK-NEXT: movl $3, -84(%rbp)
+; CHECK-NEXT: movl $4, -80(%rbp)
+; CHECK-NEXT: movl $5, -76(%rbp)
+; CHECK-NEXT: movl $6, -72(%rbp)
+; CHECK-NEXT: movl $7, -68(%rbp)
+; CHECK-NEXT: movl $8, -64(%rbp)
+; CHECK-NEXT: movl $9, -60(%rbp)
+; CHECK-NEXT: movl $16, -56(%rbp)
+; CHECK-NEXT: movl $17, -52(%rbp)
+; CHECK-NEXT: movl $18, -48(%rbp)
+; CHECK-NEXT: movl $19, -44(%rbp)
+; CHECK-NEXT: movl -96(%rbp), %eax
+; CHECK-NEXT: movl -92(%rbp), %ecx
+; CHECK-NEXT: movl -88(%rbp), %edx
+; CHECK-NEXT: movl -84(%rbp), %esi
+; CHECK-NEXT: movl -80(%rbp), %edi
+; CHECK-NEXT: movl -76(%rbp), %r8d
+; CHECK-NEXT: movl -72(%rbp), %r9d
+; CHECK-NEXT: movl -68(%rbp), %r10d
+; CHECK-NEXT: movl -64(%rbp), %r11d
+; CHECK-NEXT: movl -60(%rbp), %ebx
+; CHECK-NEXT: movl -56(%rbp), %r14d
+; CHECK-NEXT: movl -52(%rbp), %r15d
+; CHECK-NEXT: movl -48(%rbp), %r12d
+; CHECK-NEXT: movl -44(%rbp), %r13d
+; CHECK-NEXT: #APP
+; CHECK-NEXT: nop
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: movl %eax, -96(%rbp)
+; CHECK-NEXT: movl %ecx, -92(%rbp)
+; CHECK-NEXT: movl %edx, -88(%rbp)
+; CHECK-NEXT: movl %esi, -84(%rbp)
+; CHECK-NEXT: movl %edi, -80(%rbp)
+; CHECK-NEXT: movl %r8d, -76(%rbp)
+; CHECK-NEXT: movl %r9d, -72(%rbp)
+; CHECK-NEXT: movl %r10d, -68(%rbp)
+; CHECK-NEXT: movl %r11d, -64(%rbp)
+; CHECK-NEXT: movl %ebx, -60(%rbp)
+; CHECK-NEXT: movl %r14d, -56(%rbp)
+; CHECK-NEXT: movl %r15d, -52(%rbp)
+; CHECK-NEXT: movl %r12d, -48(%rbp)
+; CHECK-NEXT: movl %r13d, -44(%rbp)
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r12
+; CHECK-NEXT: popq %r13
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: .cfi_def_cfa %rsp, 8
+; CHECK-NEXT: retq
+entry:
+ %reg0 = alloca i32, align 4
+ %reg1 = alloca i32, align 4
+ %reg2 = alloca i32, align 4
+ %reg3 = alloca i32, align 4
+ %reg4 = alloca i32, align 4
+ %reg5 = alloca i32, align 4
+ %reg6 = alloca i32, align 4
+ %reg7 = alloca i32, align 4
+ %reg8 = alloca i32, align 4
+ %reg9 = alloca i32, align 4
+ %reg10 = alloca i32, align 4
+ %reg11 = alloca i32, align 4
+ %reg12 = alloca i32, align 4
+ %reg13 = alloca i32, align 4
+ store volatile i32 0, ptr %reg0, align 4
+ store volatile i32 1, ptr %reg1, align 4
+ store volatile i32 2, ptr %reg2, align 4
+ store volatile i32 3, ptr %reg3, align 4
+ store volatile i32 4, ptr %reg4, align 4
+ store volatile i32 5, ptr %reg5, align 4
+ store volatile i32 6, ptr %reg6, align 4
+ store volatile i32 7, ptr %reg7, align 4
+ store volatile i32 8, ptr %reg8, align 4
+ store volatile i32 9, ptr %reg9, align 4
+ store volatile i32 16, ptr %reg10, align 4
+ store volatile i32 17, ptr %reg11, align 4
+ store volatile i32 18, ptr %reg12, align 4
+ store volatile i32 19, ptr %reg13, align 4
+ %0 = load volatile i32, ptr %reg0, align 4
+ %1 = load volatile i32, ptr %reg1, align 4
+ %2 = load volatile i32, ptr %reg2, align 4
+ %3 = load volatile i32, ptr %reg3, align 4
+ %4 = load volatile i32, ptr %reg4, align 4
+ %5 = load volatile i32, ptr %reg5, align 4
+ %6 = load volatile i32, ptr %reg6, align 4
+ %7 = load volatile i32, ptr %reg7, align 4
+ %8 = load volatile i32, ptr %reg8, align 4
+ %9 = load volatile i32, ptr %reg9, align 4
+ %10 = load volatile i32, ptr %reg10, align 4
+ %11 = load volatile i32, ptr %reg11, align 4
+ %12 = load volatile i32, ptr %reg12, align 4
+ %13 = load volatile i32, ptr %reg13, align 4
+ %14 = call { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } asm "nop", "=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,8,9,10,11,12,13,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13) #1
+ %asmresult = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 0
+ %asmresult1 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 1
+ %asmresult2 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 2
+ %asmresult3 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 3
+ %asmresult4 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 4
+ %asmresult5 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 5
+ %asmresult6 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 6
+ %asmresult7 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 7
+ %asmresult8 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 8
+ %asmresult9 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 9
+ %asmresult10 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 10
+ %asmresult11 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 11
+ %asmresult12 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 12
+ %asmresult13 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %14, 13
+ store volatile i32 %asmresult, ptr %reg0, align 4
+ store volatile i32 %asmresult1, ptr %reg1, align 4
+ store volatile i32 %asmresult2, ptr %reg2, align 4
+ store volatile i32 %asmresult3, ptr %reg3, align 4
+ store volatile i32 %asmresult4, ptr %reg4, align 4
+ store volatile i32 %asmresult5, ptr %reg5, align 4
+ store volatile i32 %asmresult6, ptr %reg6, align 4
+ store volatile i32 %asmresult7, ptr %reg7, align 4
+ store volatile i32 %asmresult8, ptr %reg8, align 4
+ store volatile i32 %asmresult9, ptr %reg9, align 4
+ store volatile i32 %asmresult10, ptr %reg10, align 4
+ store volatile i32 %asmresult11, ptr %reg11, align 4
+ store volatile i32 %asmresult12, ptr %reg12, align 4
+ store volatile i32 %asmresult13, ptr %reg13, align 4
+ ret i32 0
+}