1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
|
/*
* Support file for amdgcn in newlib.
* Copyright (c) 2024 BayLibre.
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
/* Lock routines for AMD GPU devices.
The lock is a 32-bit int:
- bits 0-3: wavefront id
- bits 4-23: workgroup id (+1, so never zero)
- bits 24-31: recursive lock count.
The purpose of the "relaxed" loads and stores being "atomic" here is
mostly just to ensure we punch through the caches consistently.
Non-recursive locks may be unlocked by any thread. It's an error to
attempt to unlock a recursive lock from the wrong thread.
The DEBUG statements here use sprintf and write to avoid taking locks
themselves. */
#include <sys/lock.h>
#include <assert.h>
#define DEBUG 0
#if DEBUG
extern void write(int, char *, int);
#endif
static unsigned
__gcn_thread_id ()
{
/* Dim(0) is the workgroup ID; range 0 to maybe thousands.
Dim(1) is the wavefront ID; range 0 to 15. */
return (((__builtin_gcn_dim_pos (0) + 1) << 4)
+ __builtin_gcn_dim_pos (1));
}
static int
__gcn_lock_acquire_int (_LOCK_T *lock_ptr, int _try)
{
int id = __gcn_thread_id ();
#if DEBUG
char buf[1000];
__builtin_sprintf (buf,"acquire:%p(%d) lock_value:0x%x id:0x%x", lock_ptr,
_try, *lock_ptr, id);
write (1, buf, __builtin_strlen(buf));
#endif
int expected = 0;
while (!__atomic_compare_exchange_n (lock_ptr, &expected, id, 0,
__ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
{
/* Lock *not* acquired. */
if (_try)
return 0;
else
{
asm ("s_sleep 64");
expected = 0;
}
}
#if DEBUG
__builtin_sprintf (buf,"acquired:%p(%d) lock_value:0x%x id:0x%x", lock_ptr,
_try, *lock_ptr, id);
write (1, buf, __builtin_strlen(buf));
#endif
return 1;
}
int
__gcn_try_lock_acquire (_LOCK_T *lock_ptr)
{
return __gcn_lock_acquire_int (lock_ptr, 1);
}
void
__gcn_lock_acquire (_LOCK_T *lock_ptr)
{
__gcn_lock_acquire_int (lock_ptr, 0);
}
static int
__gcn_lock_acquire_recursive_int (_LOCK_T *lock_ptr, int _try)
{
int id = __gcn_thread_id ();
#if DEBUG
char buf[1000];
__builtin_sprintf (buf,"acquire recursive:%p(%d) lock_value:0x%x id:0x%x",
lock_ptr, _try, *lock_ptr, id);
write (1, buf, __builtin_strlen(buf));
#endif
unsigned int lock_value = __atomic_load_n (lock_ptr, __ATOMIC_RELAXED);
if ((lock_value & 0xffffff) == id)
{
/* This thread already holds the lock.
Increment the recursion counter and update the lock. */
int count = lock_value >> 24;
lock_value = ((count + 1) << 24) | id;
__atomic_store_n (lock_ptr, lock_value, __ATOMIC_RELAXED);
#if DEBUG
__builtin_sprintf (buf,
"increment recursive:%p(%d) lock_value:0x%x id:0x%x",
lock_ptr, _try, *lock_ptr, id);
write (1, buf, __builtin_strlen(buf));
#endif
return 1;
}
else
return __gcn_lock_acquire_int (lock_ptr, _try);
}
int
__gcn_lock_try_acquire_recursive (_LOCK_T *lock_ptr)
{
return __gcn_lock_acquire_recursive_int (lock_ptr, 1);
}
void
__gcn_lock_acquire_recursive (_LOCK_T *lock_ptr)
{
__gcn_lock_acquire_recursive_int (lock_ptr, 0);
}
void
__gcn_lock_release (_LOCK_T *lock_ptr)
{
#if DEBUG
char buf[1000];
__builtin_sprintf (buf,"release:%p lock_value:0x%x id:0x%x", lock_ptr,
*lock_ptr, __gcn_thread_id());
write (1, buf, __builtin_strlen(buf));
#endif
__atomic_store_n (lock_ptr, 0, __ATOMIC_RELEASE);
}
void
__gcn_lock_release_recursive (_LOCK_T *lock_ptr)
{
int id = __gcn_thread_id ();
unsigned int lock_value = __atomic_load_n (lock_ptr, __ATOMIC_RELAXED);
#if DEBUG
char buf[1000];
__builtin_sprintf (buf, "release recursive:%p lock_value:0x%x id:0x%x",
lock_ptr, lock_value, id);
write (1, buf, __builtin_strlen(buf));
#endif
/* It is an error to call this function from the wrong thread. */
assert ((lock_value & 0xffffff) == id);
/* Decrement or release the lock. */
int count = lock_value >> 24;
if (count > 0)
{
lock_value = ((count - 1) << 24) | id;
__atomic_store_n (lock_ptr, lock_value, __ATOMIC_RELAXED);
#if DEBUG
__builtin_sprintf (buf, "decrement recursive:%p lock_value:0x%x id:0x%x",
lock_ptr, *lock_ptr, id);
write (1, buf, __builtin_strlen(buf));
#endif
}
else
__gcn_lock_release (lock_ptr);
}
|