1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
|
/* Copyright (C) 2005-2022 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>.
This file is part of the GNU Offloading and Multi Processing Library
(libgomp).
Libgomp is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file handles the SECTIONS construct. */
#include "libgomp.h"
#include <string.h>
ialias_redirect (GOMP_taskgroup_reduction_register)
/* Initialize the given work share construct from the given arguments. */
static inline void
gomp_sections_init (struct gomp_work_share *ws, unsigned count)
{
ws->sched = GFS_DYNAMIC;
ws->chunk_size = 1;
ws->end = count + 1L;
ws->incr = 1;
ws->next = 1;
#ifdef HAVE_SYNC_BUILTINS
/* Prepare things to make each iteration faster. */
if (sizeof (long) > sizeof (unsigned))
ws->mode = 1;
else
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
long nthreads = team ? team->nthreads : 1;
ws->mode = ((nthreads | ws->end)
< 1UL << (sizeof (long) * __CHAR_BIT__ / 2 - 1));
}
#else
ws->mode = 0;
#endif
}
/* This routine is called when first encountering a sections construct
that is not bound directly to a parallel construct. The first thread
that arrives will create the work-share construct; subsequent threads
will see the construct exists and allocate work from it.
COUNT is the number of sections in this construct.
Returns the 1-based section number for this thread to perform, or 0 if
all work was assigned to other threads prior to this thread's arrival. */
unsigned
GOMP_sections_start (unsigned count)
{
struct gomp_thread *thr = gomp_thread ();
long s, e, ret;
if (gomp_work_share_start (0))
{
gomp_sections_init (thr->ts.work_share, count);
gomp_work_share_init_done ();
}
#ifdef HAVE_SYNC_BUILTINS
if (gomp_iter_dynamic_next (&s, &e))
ret = s;
else
ret = 0;
#else
gomp_mutex_lock (&thr->ts.work_share->lock);
if (gomp_iter_dynamic_next_locked (&s, &e))
ret = s;
else
ret = 0;
gomp_mutex_unlock (&thr->ts.work_share->lock);
#endif
return ret;
}
unsigned
GOMP_sections2_start (unsigned count, uintptr_t *reductions, void **mem)
{
struct gomp_thread *thr = gomp_thread ();
long s, e, ret;
if (reductions)
gomp_workshare_taskgroup_start ();
if (gomp_work_share_start (0))
{
gomp_sections_init (thr->ts.work_share, count);
if (reductions)
{
GOMP_taskgroup_reduction_register (reductions);
thr->task->taskgroup->workshare = true;
thr->ts.work_share->task_reductions = reductions;
}
if (mem)
{
uintptr_t size = (uintptr_t) *mem;
#define INLINE_ORDERED_TEAM_IDS_OFF \
((offsetof (struct gomp_work_share, inline_ordered_team_ids) \
+ __alignof__ (long long) - 1) & ~(__alignof__ (long long) - 1))
if (sizeof (struct gomp_work_share)
<= INLINE_ORDERED_TEAM_IDS_OFF
|| __alignof__ (struct gomp_work_share) < __alignof__ (long long)
|| size > (sizeof (struct gomp_work_share)
- INLINE_ORDERED_TEAM_IDS_OFF))
*mem
= (void *) (thr->ts.work_share->ordered_team_ids
= gomp_malloc_cleared (size));
else
*mem = memset (((char *) thr->ts.work_share)
+ INLINE_ORDERED_TEAM_IDS_OFF, '\0', size);
}
gomp_work_share_init_done ();
}
else
{
if (reductions)
{
uintptr_t *first_reductions = thr->ts.work_share->task_reductions;
gomp_workshare_task_reduction_register (reductions,
first_reductions);
}
if (mem)
{
if ((offsetof (struct gomp_work_share, inline_ordered_team_ids)
& (__alignof__ (long long) - 1)) == 0)
*mem = (void *) thr->ts.work_share->ordered_team_ids;
else
{
uintptr_t p = (uintptr_t) thr->ts.work_share->ordered_team_ids;
p += __alignof__ (long long) - 1;
p &= ~(__alignof__ (long long) - 1);
*mem = (void *) p;
}
}
}
#ifdef HAVE_SYNC_BUILTINS
if (gomp_iter_dynamic_next (&s, &e))
ret = s;
else
ret = 0;
#else
gomp_mutex_lock (&thr->ts.work_share->lock);
if (gomp_iter_dynamic_next_locked (&s, &e))
ret = s;
else
ret = 0;
gomp_mutex_unlock (&thr->ts.work_share->lock);
#endif
return ret;
}
/* This routine is called when the thread completes processing of the
section currently assigned to it. If the work-share construct is
bound directly to a parallel construct, then the construct may have
been set up before the parallel. In which case, this may be the
first iteration for the thread.
Returns the 1-based section number for this thread to perform, or 0 if
all work was assigned to other threads prior to this thread's arrival. */
unsigned
GOMP_sections_next (void)
{
long s, e, ret;
#ifdef HAVE_SYNC_BUILTINS
if (gomp_iter_dynamic_next (&s, &e))
ret = s;
else
ret = 0;
#else
struct gomp_thread *thr = gomp_thread ();
gomp_mutex_lock (&thr->ts.work_share->lock);
if (gomp_iter_dynamic_next_locked (&s, &e))
ret = s;
else
ret = 0;
gomp_mutex_unlock (&thr->ts.work_share->lock);
#endif
return ret;
}
/* This routine pre-initializes a work-share construct to avoid one
synchronization once we get into the loop. */
void
GOMP_parallel_sections_start (void (*fn) (void *), void *data,
unsigned num_threads, unsigned count)
{
struct gomp_team *team;
num_threads = gomp_resolve_num_threads (num_threads, count);
team = gomp_new_team (num_threads);
gomp_sections_init (&team->work_shares[0], count);
gomp_team_start (fn, data, num_threads, 0, team, NULL);
}
ialias_redirect (GOMP_parallel_end)
void
GOMP_parallel_sections (void (*fn) (void *), void *data,
unsigned num_threads, unsigned count, unsigned flags)
{
struct gomp_team *team;
num_threads = gomp_resolve_num_threads (num_threads, count);
team = gomp_new_team (num_threads);
gomp_sections_init (&team->work_shares[0], count);
gomp_team_start (fn, data, num_threads, flags, team, NULL);
fn (data);
GOMP_parallel_end ();
}
/* The GOMP_section_end* routines are called after the thread is told
that all sections are complete. The first two versions synchronize
all threads; the nowait version does not. */
void
GOMP_sections_end (void)
{
gomp_work_share_end ();
}
bool
GOMP_sections_end_cancel (void)
{
return gomp_work_share_end_cancel ();
}
void
GOMP_sections_end_nowait (void)
{
gomp_work_share_end_nowait ();
}
|