1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
|
//===- Synchronization.cpp - OpenMP Device synchronization API ---- c++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Include all synchronization.
//
//===----------------------------------------------------------------------===//
#include "Synchronization.h"
#include "Debug.h"
#include "DeviceTypes.h"
#include "DeviceUtils.h"
#include "Interface.h"
#include "Mapping.h"
#include "State.h"
using namespace ompx;
namespace impl {
/// Atomics
///
///{
///}
/// AMDGCN Implementation
///
///{
#ifdef __AMDGPU__
uint32_t atomicInc(uint32_t *A, uint32_t V, atomic::OrderingTy Ordering,
atomic::MemScopeTy MemScope) {
// builtin_amdgcn_atomic_inc32 should expand to this switch when
// passed a runtime value, but does not do so yet. Workaround here.
#define ScopeSwitch(ORDER) \
switch (MemScope) { \
case atomic::MemScopeTy::system: \
return __builtin_amdgcn_atomic_inc32(A, V, ORDER, ""); \
case atomic::MemScopeTy::device: \
return __builtin_amdgcn_atomic_inc32(A, V, ORDER, "agent"); \
case atomic::MemScopeTy::workgroup: \
return __builtin_amdgcn_atomic_inc32(A, V, ORDER, "workgroup"); \
case atomic::MemScopeTy::wavefront: \
return __builtin_amdgcn_atomic_inc32(A, V, ORDER, "wavefront"); \
case atomic::MemScopeTy::single: \
return __builtin_amdgcn_atomic_inc32(A, V, ORDER, "singlethread"); \
}
#define Case(ORDER) \
case ORDER: \
ScopeSwitch(ORDER)
switch (Ordering) {
default:
__builtin_unreachable();
Case(atomic::relaxed);
Case(atomic::acquire);
Case(atomic::release);
Case(atomic::acq_rel);
Case(atomic::seq_cst);
#undef Case
#undef ScopeSwitch
}
}
[[clang::loader_uninitialized]] Local<uint32_t> namedBarrierTracker;
void namedBarrierInit() {
// Don't have global ctors, and shared memory is not zero init
atomic::store(&namedBarrierTracker, 0u, atomic::release);
}
void namedBarrier() {
uint32_t NumThreads = omp_get_num_threads();
// assert(NumThreads % 32 == 0);
uint32_t WarpSize = mapping::getWarpSize();
uint32_t NumWaves = NumThreads / WarpSize;
fence::team(atomic::acquire);
// named barrier implementation for amdgcn.
// Uses two 16 bit unsigned counters. One for the number of waves to have
// reached the barrier, and one to count how many times the barrier has been
// passed. These are packed in a single atomically accessed 32 bit integer.
// Low bits for the number of waves, assumed zero before this call.
// High bits to count the number of times the barrier has been passed.
// precondition: NumWaves != 0;
// invariant: NumWaves * WarpSize == NumThreads;
// precondition: NumWaves < 0xffffu;
// Increment the low 16 bits once, using the lowest active thread.
if (mapping::isLeaderInWarp()) {
uint32_t load = atomic::add(&namedBarrierTracker, 1,
atomic::relaxed); // commutative
// Record the number of times the barrier has been passed
uint32_t generation = load & 0xffff0000u;
if ((load & 0x0000ffffu) == (NumWaves - 1)) {
// Reached NumWaves in low bits so this is the last wave.
// Set low bits to zero and increment high bits
load += 0x00010000u; // wrap is safe
load &= 0xffff0000u; // because bits zeroed second
// Reset the wave counter and release the waiting waves
atomic::store(&namedBarrierTracker, load, atomic::relaxed);
} else {
// more waves still to go, spin until generation counter changes
do {
__builtin_amdgcn_s_sleep(0);
load = atomic::load(&namedBarrierTracker, atomic::relaxed);
} while ((load & 0xffff0000u) == generation);
}
}
fence::team(atomic::release);
}
void fenceTeam(atomic::OrderingTy Ordering) {
return __scoped_atomic_thread_fence(Ordering, atomic::workgroup);
}
void fenceKernel(atomic::OrderingTy Ordering) {
return __scoped_atomic_thread_fence(Ordering, atomic::device);
}
void fenceSystem(atomic::OrderingTy Ordering) {
return __scoped_atomic_thread_fence(Ordering, atomic::system);
}
void syncWarp(__kmpc_impl_lanemask_t) {
// This is a no-op on current AMDGPU hardware but it is used by the optimizer
// to enforce convergent behaviour between control flow graphs.
__builtin_amdgcn_wave_barrier();
}
void syncThreads(atomic::OrderingTy Ordering) {
if (Ordering != atomic::relaxed)
fenceTeam(Ordering == atomic::acq_rel ? atomic::release : atomic::seq_cst);
__builtin_amdgcn_s_barrier();
if (Ordering != atomic::relaxed)
fenceTeam(Ordering == atomic::acq_rel ? atomic::acquire : atomic::seq_cst);
}
void syncThreadsAligned(atomic::OrderingTy Ordering) { syncThreads(Ordering); }
// TODO: Don't have wavefront lane locks. Possibly can't have them.
void unsetLock(omp_lock_t *) { __builtin_trap(); }
int testLock(omp_lock_t *) { __builtin_trap(); }
void initLock(omp_lock_t *) { __builtin_trap(); }
void destroyLock(omp_lock_t *) { __builtin_trap(); }
void setLock(omp_lock_t *) { __builtin_trap(); }
constexpr uint32_t UNSET = 0;
constexpr uint32_t SET = 1;
void unsetCriticalLock(omp_lock_t *Lock) {
(void)atomicExchange((uint32_t *)Lock, UNSET, atomic::acq_rel);
}
void setCriticalLock(omp_lock_t *Lock) {
uint64_t LowestActiveThread = utils::ffs(mapping::activemask()) - 1;
if (mapping::getThreadIdInWarp() == LowestActiveThread) {
fenceKernel(atomic::release);
while (
!cas((uint32_t *)Lock, UNSET, SET, atomic::relaxed, atomic::relaxed)) {
__builtin_amdgcn_s_sleep(32);
}
fenceKernel(atomic::acquire);
}
}
#endif
///}
/// NVPTX Implementation
///
///{
#ifdef __NVPTX__
uint32_t atomicInc(uint32_t *Address, uint32_t Val, atomic::OrderingTy Ordering,
atomic::MemScopeTy MemScope) {
return __nvvm_atom_inc_gen_ui(Address, Val);
}
void namedBarrierInit() {}
void namedBarrier() {
uint32_t NumThreads = omp_get_num_threads();
ASSERT(NumThreads % 32 == 0, nullptr);
// The named barrier for active parallel threads of a team in an L1 parallel
// region to synchronize with each other.
constexpr int BarrierNo = 7;
__nvvm_barrier_sync_cnt(BarrierNo, NumThreads);
}
void fenceTeam(atomic::OrderingTy) { __nvvm_membar_cta(); }
void fenceKernel(atomic::OrderingTy) { __nvvm_membar_gl(); }
void fenceSystem(atomic::OrderingTy) { __nvvm_membar_sys(); }
void syncWarp(__kmpc_impl_lanemask_t Mask) { __nvvm_bar_warp_sync(Mask); }
void syncThreads(atomic::OrderingTy Ordering) {
constexpr int BarrierNo = 8;
__nvvm_barrier_sync(BarrierNo);
}
void syncThreadsAligned(atomic::OrderingTy Ordering) { __syncthreads(); }
constexpr uint32_t OMP_SPIN = 1000;
constexpr uint32_t UNSET = 0;
constexpr uint32_t SET = 1;
// TODO: This seems to hide a bug in the declare variant handling. If it is
// called before it is defined
// here the overload won't happen. Investigate lalter!
void unsetLock(omp_lock_t *Lock) {
(void)atomicExchange((uint32_t *)Lock, UNSET, atomic::seq_cst);
}
int testLock(omp_lock_t *Lock) {
return atomic::add((uint32_t *)Lock, 0u, atomic::seq_cst);
}
void initLock(omp_lock_t *Lock) { unsetLock(Lock); }
void destroyLock(omp_lock_t *Lock) { unsetLock(Lock); }
void setLock(omp_lock_t *Lock) {
// TODO: not sure spinning is a good idea here..
while (atomic::cas((uint32_t *)Lock, UNSET, SET, atomic::seq_cst,
atomic::seq_cst) != UNSET) {
int32_t start = __nvvm_read_ptx_sreg_clock();
int32_t now;
for (;;) {
now = __nvvm_read_ptx_sreg_clock();
int32_t cycles = now > start ? now - start : now + (0xffffffff - start);
if (cycles >= OMP_SPIN * mapping::getBlockIdInKernel()) {
break;
}
}
} // wait for 0 to be the read value
}
void unsetCriticalLock(omp_lock_t *Lock) { unsetLock(Lock); }
void setCriticalLock(omp_lock_t *Lock) { setLock(Lock); }
#endif
///}
} // namespace impl
void synchronize::init(bool IsSPMD) {
if (!IsSPMD)
impl::namedBarrierInit();
}
void synchronize::warp(LaneMaskTy Mask) { impl::syncWarp(Mask); }
void synchronize::threads(atomic::OrderingTy Ordering) {
impl::syncThreads(Ordering);
}
void synchronize::threadsAligned(atomic::OrderingTy Ordering) {
impl::syncThreadsAligned(Ordering);
}
void fence::team(atomic::OrderingTy Ordering) { impl::fenceTeam(Ordering); }
void fence::kernel(atomic::OrderingTy Ordering) { impl::fenceKernel(Ordering); }
void fence::system(atomic::OrderingTy Ordering) { impl::fenceSystem(Ordering); }
uint32_t atomic::inc(uint32_t *Addr, uint32_t V, atomic::OrderingTy Ordering,
atomic::MemScopeTy MemScope) {
return impl::atomicInc(Addr, V, Ordering, MemScope);
}
void unsetCriticalLock(omp_lock_t *Lock) { impl::unsetLock(Lock); }
void setCriticalLock(omp_lock_t *Lock) { impl::setLock(Lock); }
extern "C" {
void __kmpc_ordered(IdentTy *Loc, int32_t TId) {}
void __kmpc_end_ordered(IdentTy *Loc, int32_t TId) {}
int32_t __kmpc_cancel_barrier(IdentTy *Loc, int32_t TId) {
__kmpc_barrier(Loc, TId);
return 0;
}
void __kmpc_barrier(IdentTy *Loc, int32_t TId) {
if (mapping::isSPMDMode())
return __kmpc_barrier_simple_spmd(Loc, TId);
// Generic parallel regions are run with multiple of the warp size or single
// threaded, in the latter case we need to stop here.
if (omp_get_num_threads() == 1)
return __kmpc_flush(Loc);
impl::namedBarrier();
}
[[clang::noinline]] void __kmpc_barrier_simple_spmd(IdentTy *Loc, int32_t TId) {
synchronize::threadsAligned(atomic::OrderingTy::seq_cst);
}
[[clang::noinline]] void __kmpc_barrier_simple_generic(IdentTy *Loc,
int32_t TId) {
synchronize::threads(atomic::OrderingTy::seq_cst);
}
int32_t __kmpc_master(IdentTy *Loc, int32_t TId) {
return omp_get_thread_num() == 0;
}
void __kmpc_end_master(IdentTy *Loc, int32_t TId) {}
int32_t __kmpc_masked(IdentTy *Loc, int32_t TId, int32_t Filter) {
return omp_get_thread_num() == Filter;
}
void __kmpc_end_masked(IdentTy *Loc, int32_t TId) {}
int32_t __kmpc_single(IdentTy *Loc, int32_t TId) {
return __kmpc_master(Loc, TId);
}
void __kmpc_end_single(IdentTy *Loc, int32_t TId) {
// The barrier is explicitly called.
}
void __kmpc_flush(IdentTy *Loc) { fence::kernel(atomic::seq_cst); }
uint64_t __kmpc_warp_active_thread_mask(void) { return mapping::activemask(); }
void __kmpc_syncwarp(uint64_t Mask) { synchronize::warp(Mask); }
void __kmpc_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
impl::setCriticalLock(reinterpret_cast<omp_lock_t *>(Name));
}
void __kmpc_end_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
impl::unsetCriticalLock(reinterpret_cast<omp_lock_t *>(Name));
}
void omp_init_lock(omp_lock_t *Lock) { impl::initLock(Lock); }
void omp_destroy_lock(omp_lock_t *Lock) { impl::destroyLock(Lock); }
void omp_set_lock(omp_lock_t *Lock) { impl::setLock(Lock); }
void omp_unset_lock(omp_lock_t *Lock) { impl::unsetLock(Lock); }
int omp_test_lock(omp_lock_t *Lock) { return impl::testLock(Lock); }
void ompx_sync_block(int Ordering) {
impl::syncThreadsAligned(atomic::OrderingTy(Ordering));
}
void ompx_sync_block_acq_rel() {
impl::syncThreadsAligned(atomic::OrderingTy::acq_rel);
}
void ompx_sync_block_divergent(int Ordering) {
impl::syncThreads(atomic::OrderingTy(Ordering));
}
} // extern "C"
|