1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
|
/* Implementation of the getrandom system call.
Copyright (C) 2016-2024 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <sys/random.h>
#include <errno.h>
#include <unistd.h>
#include <sysdep-cancel.h>
#include <sysdep.h>
#include <sysdep-vdso.h>
static inline ssize_t
getrandom_syscall (void *buffer, size_t length, unsigned int flags,
bool cancel)
{
return cancel
? SYSCALL_CANCEL (getrandom, buffer, length, flags)
: INLINE_SYSCALL_CALL (getrandom, buffer, length, flags);
}
#ifdef HAVE_GETRANDOM_VSYSCALL
# include <assert.h>
# include <ldsodefs.h>
# include <libc-lock.h>
# include <list.h>
# include <setvmaname.h>
# include <sys/mman.h>
# include <sys/sysinfo.h>
# include <tls-internal.h>
/* These values will be initialized at loading time by calling the
_dl_vdso_getrandom with a special value. The 'state_size' is the opaque
state size per-thread allocated with a mmap using 'mmap_prot' and
'mmap_flags' argument. */
static uint32_t state_size;
static uint32_t state_size_cache_aligned;
static uint32_t mmap_prot;
static uint32_t mmap_flags;
/* The function below are used on reentracy handling with (i.e. SA_NODEFER).
Before allocating a new state or issue the vDSO, atomically read the
current thread buffer, and if this is already reserved (is_reserved_ptr)
fallback to the syscall. Otherwise, reserve the buffer by atomically
setting the LSB of the opaque state pointer. The bit is cleared after the
vDSO is called, or before issuing the fallback syscall. */
static inline void *reserve_ptr (void *p)
{
return (void *) ((uintptr_t) (p) | 1UL);
}
static inline void *release_ptr (void *p)
{
return (void *) ((uintptr_t) (p) & ~1UL);
}
static inline bool is_reserved_ptr (void *p)
{
return (uintptr_t) (p) & 1UL;
}
static struct
{
__libc_lock_define (, lock);
void **states; /* Queue of opaque states allocated with the kernel
provided flags and used on getrandom vDSO call. */
size_t len; /* Number of available free states in the queue. */
size_t total; /* Number of states allocated from the kernel. */
size_t cap; /* Total number of states that 'states' can hold before
needed to be resized. */
} grnd_alloc = {
.lock = LLL_LOCK_INITIALIZER
};
static bool
vgetrandom_get_state_alloc (void)
{
/* Start by allocating one page for the opaque states. */
size_t block_size = ALIGN_UP (state_size_cache_aligned, GLRO(dl_pagesize));
size_t states_per_page = GLRO (dl_pagesize) / state_size_cache_aligned;
void *block = __mmap (NULL, GLRO(dl_pagesize), mmap_prot, mmap_flags, -1, 0);
if (block == MAP_FAILED)
return false;
__set_vma_name (block, block_size, " glibc: getrandom");
if (grnd_alloc.total + states_per_page > grnd_alloc.cap)
{
/* Use a new mmap instead of trying to mremap. It avoids a
potential multithread fork issue where fork is called just after
mremap returns but before assigning to the grnd_alloc.states,
thus making the its value invalid in the child. */
void *old_states = grnd_alloc.states;
size_t new_states_size = ALIGN_UP ((grnd_alloc.total + states_per_page)
* sizeof (*grnd_alloc.states),
GLRO(dl_pagesize));
/* There is no need to memcpy any opaque state information because
all the allocated opaque states are assigned to running threads
(meaning that if we iterate over them we can reconstruct the state
list). */
void **states = __mmap (NULL, new_states_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (states == MAP_FAILED)
{
__munmap (block, block_size);
return false;
}
/* Atomically replace the old state, so if a fork happens the child
process will see a consistent free state buffer. The size might
not be updated, but it does not really matter since the buffer is
always increased. */
grnd_alloc.states = states;
atomic_thread_fence_seq_cst ();
if (old_states != NULL)
__munmap (old_states, grnd_alloc.cap * sizeof (*grnd_alloc.states));
__set_vma_name (states, new_states_size, " glibc: getrandom states");
grnd_alloc.cap = new_states_size / sizeof (*grnd_alloc.states);
atomic_thread_fence_seq_cst ();
}
for (size_t i = 0; i < states_per_page; ++i)
{
/* There is no need to handle states that straddle a page because
we allocate only one page. */
grnd_alloc.states[i] = block;
block += state_size_cache_aligned;
}
/* Concurrent fork should not observe the previous pointer value. */
grnd_alloc.len = states_per_page;
grnd_alloc.total += states_per_page;
atomic_thread_fence_seq_cst ();
return true;
}
/* Allocate an opaque state for vgetrandom. If the grnd_alloc does not have
any, mmap() another page of them using the vgetrandom parameters. */
static void *
vgetrandom_get_state (void)
{
void *state = NULL;
/* The signal blocking avoid the potential issue where _Fork() (which is
async-signal-safe) is called with the lock taken. The function is
called only once during thread lifetime, so the overhead should be
minimal. */
internal_sigset_t set;
internal_signal_block_all (&set);
__libc_lock_lock (grnd_alloc.lock);
if (grnd_alloc.len > 0 || vgetrandom_get_state_alloc ())
state = grnd_alloc.states[--grnd_alloc.len];
__libc_lock_unlock (grnd_alloc.lock);
internal_signal_restore_set (&set);
return state;
}
/* Returns true when vgetrandom is used successfully. Returns false if the
syscall fallback should be issued in the case the vDSO is not present, in
the case of reentrancy, or if any memory allocation fails. */
static ssize_t
getrandom_vdso (void *buffer, size_t length, unsigned int flags, bool cancel)
{
if (__glibc_unlikely (state_size == 0))
return getrandom_syscall (buffer, length, flags, cancel);
struct pthread *self = THREAD_SELF;
void *state = atomic_load_relaxed (&self->getrandom_buf);
if (is_reserved_ptr (state))
return getrandom_syscall (buffer, length, flags, cancel);
atomic_store_relaxed (&self->getrandom_buf, reserve_ptr (state));
__atomic_signal_fence (__ATOMIC_ACQ_REL);
bool r = false;
if (state == NULL)
{
state = vgetrandom_get_state ();
if (state == NULL)
goto out;
}
/* Since the vDSO implementation does not issue the syscall with the
cancellation bridge (__syscall_cancel_arch), use GRND_NONBLOCK so there
is no potential unbounded blocking in the kernel. It should be a rare
situation, only at system startup when RNG is not initialized. */
long int ret = INTERNAL_VSYSCALL_CALL (GLRO (dl_vdso_getrandom), 5,
buffer,
length,
flags | GRND_NONBLOCK,
state,
state_size);
if (INTERNAL_SYSCALL_ERROR_P (ret))
{
/* Fallback to the syscall if the kernel would block. */
int err = INTERNAL_SYSCALL_ERRNO (ret);
if (err == EAGAIN && !(flags & GRND_NONBLOCK))
goto out;
__set_errno (err);
ret = -1;
}
r = true;
out:
__atomic_signal_fence (__ATOMIC_ACQ_REL);
atomic_store_relaxed (&self->getrandom_buf, state);
return r ? ret : getrandom_syscall (buffer, length, flags, cancel);
}
#endif
void
__getrandom_early_init (_Bool initial)
{
#ifdef HAVE_GETRANDOM_VSYSCALL
/* libcs loaded for audit modules, dlmopen, etc. fallback to syscall. */
if (initial && (GLRO (dl_vdso_getrandom) != NULL))
{
/* Used to query the vDSO for the required mmap flags and the opaque
per-thread state size. Defined by linux/random.h. */
struct vgetrandom_opaque_params
{
uint32_t size_of_opaque_state;
uint32_t mmap_prot;
uint32_t mmap_flags;
uint32_t reserved[13];
} params;
long int ret = INTERNAL_VSYSCALL_CALL (GLRO(dl_vdso_getrandom),
5, NULL, 0, 0, ¶ms, ~0UL);
if (! INTERNAL_SYSCALL_ERROR_P (ret))
{
/* Align each opaque state to L1 data cache size to avoid false
sharing. If the size can not be obtained, use the kernel
provided one. */
state_size = params.size_of_opaque_state;
long int ld1sz = __sysconf (_SC_LEVEL1_DCACHE_LINESIZE);
if (ld1sz <= 0)
ld1sz = 1;
state_size_cache_aligned = ALIGN_UP (state_size, ld1sz);
/* Do not enable vDSO if the required opaque state size is larger
than a page because we only allocate one page per time to hold
the states. */
if (state_size_cache_aligned > GLRO(dl_pagesize))
{
state_size = 0;
return;
}
mmap_prot = params.mmap_prot;
mmap_flags = params.mmap_flags;
}
}
#endif
}
/* Re-add the state state from CURP on the free list. This function is
called after fork returns in the child, so no locking is required. */
void
__getrandom_reset_state (struct pthread *curp)
{
#ifdef HAVE_GETRANDOM_VSYSCALL
if (grnd_alloc.states == NULL || curp->getrandom_buf == NULL)
return;
assert (grnd_alloc.len < grnd_alloc.cap);
grnd_alloc.states[grnd_alloc.len++] = release_ptr (curp->getrandom_buf);
curp->getrandom_buf = NULL;
#endif
}
/* Called when a thread terminates, and adds its random buffer back into the
allocator pool for use in a future thread. This is called by
pthread_create during thread termination, and after signal has been
blocked. */
void
__getrandom_vdso_release (struct pthread *curp)
{
#ifdef HAVE_GETRANDOM_VSYSCALL
if (curp->getrandom_buf == NULL)
return;
__libc_lock_lock (grnd_alloc.lock);
grnd_alloc.states[grnd_alloc.len++] = curp->getrandom_buf;
__libc_lock_unlock (grnd_alloc.lock);
#endif
}
/* Reset the internal lock state in case another thread has locked while
this thread calls fork. The stale thread states will be handled by
reclaim_stacks which calls __getrandom_reset_state on each thread. */
void
__getrandom_fork_subprocess (void)
{
#ifdef HAVE_GETRANDOM_VSYSCALL
grnd_alloc.lock = LLL_LOCK_INITIALIZER;
#endif
}
ssize_t
__getrandom_nocancel (void *buffer, size_t length, unsigned int flags)
{
#ifdef HAVE_GETRANDOM_VSYSCALL
return getrandom_vdso (buffer, length, flags, false);
#else
return getrandom_syscall (buffer, length, flags, false);
#endif
}
/* Write up to LENGTH bytes of randomness starting at BUFFER.
Return the number of bytes written, or -1 on error. */
ssize_t
__getrandom (void *buffer, size_t length, unsigned int flags)
{
#ifdef HAVE_GETRANDOM_VSYSCALL
return getrandom_vdso (buffer, length, flags, true);
#else
return getrandom_syscall (buffer, length, flags, true);
#endif
}
libc_hidden_def (__getrandom)
weak_alias (__getrandom, getrandom)
|