1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-unknown | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown-gnux32 | FileCheck %s --check-prefixes=X32
; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
define void @add(ptr %p, ptr %q) nounwind {
; X86-LABEL: add:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: subl $8, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl 28(%ecx), %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl 24(%ecx), %eax
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
; X86-NEXT: movl 20(%ecx), %esi
; X86-NEXT: movl 16(%ecx), %edi
; X86-NEXT: movl 12(%ecx), %ebx
; X86-NEXT: movl 8(%ecx), %ebp
; X86-NEXT: movl (%ecx), %edx
; X86-NEXT: movl 4(%ecx), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %edx, (%eax)
; X86-NEXT: adcl %ecx, 4(%eax)
; X86-NEXT: adcl %ebp, 8(%eax)
; X86-NEXT: adcl %ebx, 12(%eax)
; X86-NEXT: adcl %edi, 16(%eax)
; X86-NEXT: adcl %esi, 20(%eax)
; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
; X86-NEXT: adcl %ecx, 24(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: adcl %ecx, 28(%eax)
; X86-NEXT: addl $8, %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X32-LABEL: add:
; X32: # %bb.0:
; X32-NEXT: movq 24(%esi), %rax
; X32-NEXT: movq 16(%esi), %rcx
; X32-NEXT: movq (%esi), %rdx
; X32-NEXT: movq 8(%esi), %rsi
; X32-NEXT: addq %rdx, (%edi)
; X32-NEXT: adcq %rsi, 8(%edi)
; X32-NEXT: adcq %rcx, 16(%edi)
; X32-NEXT: adcq %rax, 24(%edi)
; X32-NEXT: retq
;
; X64-LABEL: add:
; X64: # %bb.0:
; X64-NEXT: movq 24(%rsi), %rax
; X64-NEXT: movq 16(%rsi), %rcx
; X64-NEXT: movq (%rsi), %rdx
; X64-NEXT: movq 8(%rsi), %rsi
; X64-NEXT: addq %rdx, (%rdi)
; X64-NEXT: adcq %rsi, 8(%rdi)
; X64-NEXT: adcq %rcx, 16(%rdi)
; X64-NEXT: adcq %rax, 24(%rdi)
; X64-NEXT: retq
%a = load i256, ptr %p
%b = load i256, ptr %q
%c = add i256 %a, %b
store i256 %c, ptr %p
ret void
}
define void @sub(ptr %p, ptr %q) nounwind {
; X86-LABEL: sub:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: subl $8, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl 28(%ecx), %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl 24(%ecx), %eax
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
; X86-NEXT: movl 20(%ecx), %esi
; X86-NEXT: movl 16(%ecx), %edi
; X86-NEXT: movl 12(%ecx), %ebx
; X86-NEXT: movl 8(%ecx), %ebp
; X86-NEXT: movl (%ecx), %edx
; X86-NEXT: movl 4(%ecx), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: subl %edx, (%eax)
; X86-NEXT: sbbl %ecx, 4(%eax)
; X86-NEXT: sbbl %ebp, 8(%eax)
; X86-NEXT: sbbl %ebx, 12(%eax)
; X86-NEXT: sbbl %edi, 16(%eax)
; X86-NEXT: sbbl %esi, 20(%eax)
; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
; X86-NEXT: sbbl %ecx, 24(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: sbbl %ecx, 28(%eax)
; X86-NEXT: addl $8, %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X32-LABEL: sub:
; X32: # %bb.0:
; X32-NEXT: movq 24(%esi), %rax
; X32-NEXT: movq 16(%esi), %rcx
; X32-NEXT: movq (%esi), %rdx
; X32-NEXT: movq 8(%esi), %rsi
; X32-NEXT: subq %rdx, (%edi)
; X32-NEXT: sbbq %rsi, 8(%edi)
; X32-NEXT: sbbq %rcx, 16(%edi)
; X32-NEXT: sbbq %rax, 24(%edi)
; X32-NEXT: retq
;
; X64-LABEL: sub:
; X64: # %bb.0:
; X64-NEXT: movq 24(%rsi), %rax
; X64-NEXT: movq 16(%rsi), %rcx
; X64-NEXT: movq (%rsi), %rdx
; X64-NEXT: movq 8(%rsi), %rsi
; X64-NEXT: subq %rdx, (%rdi)
; X64-NEXT: sbbq %rsi, 8(%rdi)
; X64-NEXT: sbbq %rcx, 16(%rdi)
; X64-NEXT: sbbq %rax, 24(%rdi)
; X64-NEXT: retq
%a = load i256, ptr %p
%b = load i256, ptr %q
%c = sub i256 %a, %b
store i256 %c, ptr %p
ret void
}
|