1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
|
/* PR target/98737 */
/* { dg-do compile { target i?86-*-* x86_64-*-* powerpc*-*-* aarch64*-*-* } } */
/* { dg-options "-O2 -fdump-tree-optimized -fcompare-debug" } */
/* { dg-additional-options "-march=i686" { target ia32 } } */
/* { dg-final { scan-tree-dump-not "__atomic_\[^f]" "optimized" } } */
/* { dg-final { scan-tree-dump-not "__sync_\[^f]" "optimized" } } */
typedef signed char schar;
typedef unsigned long ulong;
typedef unsigned int uint;
typedef unsigned short ushort;
typedef unsigned char uchar;
long vlong;
int vint;
short vshort;
schar vschar;
ulong vulong;
uint vuint;
ushort vushort;
uchar vuchar;
#define A(n, t, ut, f, o, ...) \
t fn##n (t x) \
{ \
ut z = f (&v##t, x, ##__VA_ARGS__); \
t w = (t) z; \
return w o x; \
}
#define B(n, f, o, ...) \
A(n##0, long, ulong, f, o, ##__VA_ARGS__) \
A(n##1, int, uint, f, o, ##__VA_ARGS__) \
A(n##2, short, ushort, f, o, ##__VA_ARGS__) \
A(n##3, schar, uchar, f, o, ##__VA_ARGS__) \
A(n##4, ulong, ulong, f, o, ##__VA_ARGS__) \
A(n##5, uint, uint, f, o, ##__VA_ARGS__) \
A(n##6, ushort, ushort, f, o, ##__VA_ARGS__) \
A(n##7, uchar, uchar, f, o, ##__VA_ARGS__)
B(00, __atomic_add_fetch, -, __ATOMIC_RELAXED)
B(01, __atomic_sub_fetch, +, __ATOMIC_RELAXED)
B(03, __atomic_xor_fetch, ^, __ATOMIC_RELAXED)
B(05, __sync_add_and_fetch, -)
B(06, __sync_sub_and_fetch, +)
B(08, __sync_xor_and_fetch, ^)
#undef A
#define A(n, t, ut, f, o, ...) \
t fn##n (void) \
{ \
ut z = f (&v##t, 42, ##__VA_ARGS__); \
t w = (t) z; \
return w o 42; \
}
B(10, __atomic_add_fetch, -, __ATOMIC_RELAXED)
B(11, __atomic_sub_fetch, +, __ATOMIC_RELAXED)
B(13, __atomic_xor_fetch, ^, __ATOMIC_RELAXED)
B(15, __sync_add_and_fetch, -)
B(16, __sync_sub_and_fetch, +)
B(18, __sync_xor_and_fetch, ^)
#undef A
#define A(n, t, ut, f, o, ...) \
t fn##n (t x) \
{ \
ut z = f (&v##t, x, ##__VA_ARGS__); \
t w = (t) z; \
t v = w o x; \
return v == 0; \
}
B(20, __atomic_add_fetch, -, __ATOMIC_RELAXED)
B(21, __atomic_sub_fetch, +, __ATOMIC_RELAXED)
B(23, __atomic_xor_fetch, ^, __ATOMIC_RELAXED)
B(25, __sync_add_and_fetch, -)
B(26, __sync_sub_and_fetch, +)
B(28, __sync_xor_and_fetch, ^)
#undef A
#define A(n, t, ut, f, o, ...) \
t fn##n (void) \
{ \
ut z = f (&v##t, 42, ##__VA_ARGS__); \
t w = (t) z; \
t v = w o 42; \
return v != 0; \
}
B(30, __atomic_add_fetch, -, __ATOMIC_RELAXED)
B(31, __atomic_sub_fetch, +, __ATOMIC_RELAXED)
B(33, __atomic_xor_fetch, ^, __ATOMIC_RELAXED)
B(35, __sync_add_and_fetch, -)
B(36, __sync_sub_and_fetch, +)
B(38, __sync_xor_and_fetch, ^)
#undef A
#define A(n, t, ut, f, o, ...) \
t fn##n (t x) \
{ \
return (t) (((t) f (&v##t, x, ##__VA_ARGS__)) \
o x) != 0; \
}
B(40, __atomic_add_fetch, -, __ATOMIC_RELAXED)
B(41, __atomic_sub_fetch, +, __ATOMIC_RELAXED)
B(43, __atomic_xor_fetch, ^, __ATOMIC_RELAXED)
B(45, __sync_add_and_fetch, -)
B(46, __sync_sub_and_fetch, +)
B(48, __sync_xor_and_fetch, ^)
#undef A
#define A(n, t, ut, f, o, ...) \
t fn##n (void) \
{ \
return (t) (((t) f (&v##t, 42, ##__VA_ARGS__))\
o 42) == 0; \
}
B(50, __atomic_add_fetch, -, __ATOMIC_RELAXED)
B(51, __atomic_sub_fetch, +, __ATOMIC_RELAXED)
B(53, __atomic_xor_fetch, ^, __ATOMIC_RELAXED)
B(55, __sync_add_and_fetch, -)
B(56, __sync_sub_and_fetch, +)
B(58, __sync_xor_and_fetch, ^)
|