1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
|
//===- Synchronization.cpp - OpenMP Device synchronization API ---- c++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Include all synchronization.
//
//===----------------------------------------------------------------------===//
#include "Synchronization.h"
#include "Debug.h"
#include "DeviceTypes.h"
#include "DeviceUtils.h"
#include "Interface.h"
#include "Mapping.h"
#include "State.h"
using namespace ompx;
namespace impl {
/// Atomics
///
///{
///}
/// AMDGCN Implementation
///
///{
#ifdef __AMDGPU__
[[clang::loader_uninitialized]] Local<uint32_t> namedBarrierTracker;
void namedBarrierInit() {
// Don't have global ctors, and shared memory is not zero init
atomic::store(&namedBarrierTracker, 0u, atomic::release);
}
void namedBarrier() {
uint32_t NumThreads = omp_get_num_threads();
// assert(NumThreads % 32 == 0);
uint32_t WarpSize = mapping::getWarpSize();
uint32_t NumWaves = NumThreads / WarpSize;
fence::team(atomic::acquire);
// named barrier implementation for amdgcn.
// Uses two 16 bit unsigned counters. One for the number of waves to have
// reached the barrier, and one to count how many times the barrier has been
// passed. These are packed in a single atomically accessed 32 bit integer.
// Low bits for the number of waves, assumed zero before this call.
// High bits to count the number of times the barrier has been passed.
// precondition: NumWaves != 0;
// invariant: NumWaves * WarpSize == NumThreads;
// precondition: NumWaves < 0xffffu;
// Increment the low 16 bits once, using the lowest active thread.
if (mapping::isLeaderInWarp()) {
uint32_t load = atomic::add(&namedBarrierTracker, 1,
atomic::relaxed); // commutative
// Record the number of times the barrier has been passed
uint32_t generation = load & 0xffff0000u;
if ((load & 0x0000ffffu) == (NumWaves - 1)) {
// Reached NumWaves in low bits so this is the last wave.
// Set low bits to zero and increment high bits
load += 0x00010000u; // wrap is safe
load &= 0xffff0000u; // because bits zeroed second
// Reset the wave counter and release the waiting waves
atomic::store(&namedBarrierTracker, load, atomic::relaxed);
} else {
// more waves still to go, spin until generation counter changes
do {
__builtin_amdgcn_s_sleep(0);
load = atomic::load(&namedBarrierTracker, atomic::relaxed);
} while ((load & 0xffff0000u) == generation);
}
}
fence::team(atomic::release);
}
void syncThreadsAligned(atomic::OrderingTy Ordering) {
synchronize::threads(Ordering);
}
// TODO: Don't have wavefront lane locks. Possibly can't have them.
void unsetLock(omp_lock_t *) { __builtin_trap(); }
int testLock(omp_lock_t *) { __builtin_trap(); }
void initLock(omp_lock_t *) { __builtin_trap(); }
void destroyLock(omp_lock_t *) { __builtin_trap(); }
void setLock(omp_lock_t *) { __builtin_trap(); }
constexpr uint32_t UNSET = 0;
constexpr uint32_t SET = 1;
void unsetCriticalLock(omp_lock_t *Lock) {
[[maybe_unused]] uint32_t before =
atomicExchange((uint32_t *)Lock, UNSET, atomic::acq_rel);
}
void setCriticalLock(omp_lock_t *Lock) {
uint64_t LowestActiveThread = utils::ffs(mapping::activemask()) - 1;
if (mapping::getThreadIdInWarp() == LowestActiveThread) {
fence::kernel(atomic::release);
while (
!cas((uint32_t *)Lock, UNSET, SET, atomic::relaxed, atomic::relaxed)) {
__builtin_amdgcn_s_sleep(32);
}
fence::kernel(atomic::acquire);
}
}
#endif
///}
/// NVPTX Implementation
///
///{
#ifdef __NVPTX__
void namedBarrierInit() {}
void namedBarrier() {
uint32_t NumThreads = omp_get_num_threads();
ASSERT(NumThreads % 32 == 0, nullptr);
// The named barrier for active parallel threads of a team in an L1 parallel
// region to synchronize with each other.
constexpr int BarrierNo = 7;
__nvvm_barrier_sync_cnt(BarrierNo, NumThreads);
}
void syncThreadsAligned(atomic::OrderingTy Ordering) { __syncthreads(); }
constexpr uint32_t OMP_SPIN = 1000;
constexpr uint32_t UNSET = 0;
constexpr uint32_t SET = 1;
void unsetLock(omp_lock_t *Lock) {
[[maybe_unused]] uint32_t before = atomicExchange(
reinterpret_cast<uint32_t *>(Lock), UNSET, atomic::seq_cst);
}
int testLock(omp_lock_t *Lock) {
return atomic::add(reinterpret_cast<uint32_t *>(Lock), 0u, atomic::seq_cst);
}
void initLock(omp_lock_t *Lock) { unsetLock(Lock); }
void destroyLock(omp_lock_t *Lock) { unsetLock(Lock); }
void setLock(omp_lock_t *Lock) {
// TODO: not sure spinning is a good idea here..
while (atomic::cas(reinterpret_cast<uint32_t *>(Lock), UNSET, SET,
atomic::seq_cst, atomic::seq_cst) != UNSET) {
int32_t start = __nvvm_read_ptx_sreg_clock();
int32_t now;
for (;;) {
now = __nvvm_read_ptx_sreg_clock();
int32_t cycles = now > start ? now - start : now + (0xffffffff - start);
if (cycles >= OMP_SPIN * mapping::getBlockIdInKernel()) {
break;
}
}
} // wait for 0 to be the read value
}
void unsetCriticalLock(omp_lock_t *Lock) { unsetLock(Lock); }
void setCriticalLock(omp_lock_t *Lock) { setLock(Lock); }
#endif
///}
#if defined(__SPIRV__)
void namedBarrierInit() { __builtin_trap(); } // TODO
void namedBarrier() { __builtin_trap(); } // TODO
void unsetLock(omp_lock_t *Lock) {
atomic::store((int32_t *)Lock, 0, atomic::seq_cst);
}
int testLock(omp_lock_t *Lock) {
return atomic::add((int32_t *)Lock, 0, atomic::seq_cst);
}
void initLock(omp_lock_t *Lock) { unsetLock(Lock); }
void destroyLock(omp_lock_t *Lock) { unsetLock(Lock); }
void setLock(omp_lock_t *Lock) {
int32_t *Lock_ptr = (int32_t *)Lock;
bool Acquired = false;
int32_t Expected;
while (!Acquired) {
Expected = 0;
if (Expected == atomic::load(Lock_ptr, atomic::seq_cst))
Acquired =
atomic::cas(Lock_ptr, Expected, 1, atomic::seq_cst, atomic::seq_cst);
}
}
void unsetCriticalLock(omp_lock_t *Lock) { unsetLock(Lock); }
void setCriticalLock(omp_lock_t *Lock) { setLock(Lock); }
void syncThreadsAligned(atomic::OrderingTy Ordering) {
synchronize::threads(Ordering);
}
#endif
} // namespace impl
void synchronize::init(bool IsSPMD) {
if (!IsSPMD)
impl::namedBarrierInit();
}
void synchronize::threadsAligned(atomic::OrderingTy Ordering) {
impl::syncThreadsAligned(Ordering);
}
void unsetCriticalLock(omp_lock_t *Lock) { impl::unsetLock(Lock); }
void setCriticalLock(omp_lock_t *Lock) { impl::setLock(Lock); }
extern "C" {
void __kmpc_ordered(IdentTy *Loc, int32_t TId) {}
void __kmpc_end_ordered(IdentTy *Loc, int32_t TId) {}
int32_t __kmpc_cancel_barrier(IdentTy *Loc, int32_t TId) {
__kmpc_barrier(Loc, TId);
return 0;
}
void __kmpc_barrier(IdentTy *Loc, int32_t TId) {
if (mapping::isSPMDMode())
return __kmpc_barrier_simple_spmd(Loc, TId);
// Generic parallel regions are run with multiple of the warp size or single
// threaded, in the latter case we need to stop here.
if (omp_get_num_threads() == 1)
return __kmpc_flush(Loc);
impl::namedBarrier();
}
[[clang::noinline]] void __kmpc_barrier_simple_spmd(IdentTy *Loc, int32_t TId) {
synchronize::threadsAligned(atomic::OrderingTy::seq_cst);
}
[[clang::noinline]] void __kmpc_barrier_simple_generic(IdentTy *Loc,
int32_t TId) {
synchronize::threads(atomic::OrderingTy::seq_cst);
}
int32_t __kmpc_master(IdentTy *Loc, int32_t TId) {
return omp_get_thread_num() == 0;
}
void __kmpc_end_master(IdentTy *Loc, int32_t TId) {}
int32_t __kmpc_masked(IdentTy *Loc, int32_t TId, int32_t Filter) {
return omp_get_thread_num() == Filter;
}
void __kmpc_end_masked(IdentTy *Loc, int32_t TId) {}
int32_t __kmpc_single(IdentTy *Loc, int32_t TId) {
return __kmpc_master(Loc, TId);
}
void __kmpc_end_single(IdentTy *Loc, int32_t TId) {
// The barrier is explicitly called.
}
void __kmpc_flush(IdentTy *Loc) { fence::kernel(atomic::seq_cst); }
uint64_t __kmpc_warp_active_thread_mask(void) { return mapping::activemask(); }
void __kmpc_syncwarp(uint64_t Mask) { synchronize::warp(Mask); }
void __kmpc_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
impl::setCriticalLock(reinterpret_cast<omp_lock_t *>(Name));
}
void __kmpc_end_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
impl::unsetCriticalLock(reinterpret_cast<omp_lock_t *>(Name));
}
void omp_init_lock(omp_lock_t *Lock) { impl::initLock(Lock); }
void omp_destroy_lock(omp_lock_t *Lock) { impl::destroyLock(Lock); }
void omp_set_lock(omp_lock_t *Lock) { impl::setLock(Lock); }
void omp_unset_lock(omp_lock_t *Lock) { impl::unsetLock(Lock); }
int omp_test_lock(omp_lock_t *Lock) { return impl::testLock(Lock); }
void ompx_sync_block(int Ordering) {
impl::syncThreadsAligned(atomic::OrderingTy(Ordering));
}
void ompx_sync_block_acq_rel() {
impl::syncThreadsAligned(atomic::OrderingTy::acq_rel);
}
void ompx_sync_block_divergent(int Ordering) {
synchronize::threads(atomic::OrderingTy(Ordering));
}
} // extern "C"
|