aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom Stellard <tstellar@redhat.com>2017-11-14 19:21:30 +0000
committerTom Stellard <tstellar@redhat.com>2017-11-14 19:21:30 +0000
commitb32afe4684b90410ba99850c9287e166f9615f1c (patch)
tree1384e63db0a438f7edfb260d1a996115b08446f7
parentdef3a920c374b7c10623cda5de7c38e288878a5e (diff)
downloadllvm-b32afe4684b90410ba99850c9287e166f9615f1c.zip
llvm-b32afe4684b90410ba99850c9287e166f9615f1c.tar.gz
llvm-b32afe4684b90410ba99850c9287e166f9615f1c.tar.bz2
Merging r317115:
------------------------------------------------------------------------ r317115 | jlpeyton | 2017-11-01 12:44:42 -0700 (Wed, 01 Nov 2017) | 19 lines [OpenMP] Fix race condition in omp_init_lock This is a partial fix for bug 34050. This prevents callers of omp_set_lock (which does not hold __kmp_global_lock) from ever seeing an uninitialized version of __kmp_i_lock_table.table. It does not solve a use-after-free race condition if omp_set_lock obtains a pointer to __kmp_i_lock_table.table before it is updated and then attempts to dereference afterwards. That race is far less likely and can be handled in a separate patch. The unit test usually segfaults on the current trunk revision. It passes with the patch. Patch by Adam Azarchs Differential Revision: https://reviews.llvm.org/D39439 ------------------------------------------------------------------------ llvm-svn: 318178
-rw-r--r--openmp/runtime/src/kmp_lock.cpp7
-rw-r--r--openmp/runtime/test/lock/omp_init_lock.c42
2 files changed, 46 insertions, 3 deletions
diff --git a/openmp/runtime/src/kmp_lock.cpp b/openmp/runtime/src/kmp_lock.cpp
index e174981..ecdaca5 100644
--- a/openmp/runtime/src/kmp_lock.cpp
+++ b/openmp/runtime/src/kmp_lock.cpp
@@ -3061,11 +3061,12 @@ kmp_indirect_lock_t *__kmp_allocate_indirect_lock(void **user_lock,
if (idx == __kmp_i_lock_table.size) {
// Double up the space for block pointers
int row = __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK;
- kmp_indirect_lock_t **old_table = __kmp_i_lock_table.table;
- __kmp_i_lock_table.table = (kmp_indirect_lock_t **)__kmp_allocate(
+ kmp_indirect_lock_t **new_table = (kmp_indirect_lock_t **)__kmp_allocate(
2 * row * sizeof(kmp_indirect_lock_t *));
- KMP_MEMCPY(__kmp_i_lock_table.table, old_table,
+ KMP_MEMCPY(new_table, __kmp_i_lock_table.table,
row * sizeof(kmp_indirect_lock_t *));
+ kmp_indirect_lock_t **old_table = __kmp_i_lock_table.table;
+ __kmp_i_lock_table.table = new_table;
__kmp_free(old_table);
// Allocate new objects in the new blocks
for (int i = row; i < 2 * row; ++i)
diff --git a/openmp/runtime/test/lock/omp_init_lock.c b/openmp/runtime/test/lock/omp_init_lock.c
new file mode 100644
index 0000000..24b60d1
--- /dev/null
+++ b/openmp/runtime/test/lock/omp_init_lock.c
@@ -0,0 +1,42 @@
+// RUN: %libomp-compile-and-run
+#include "omp_testsuite.h"
+#include <stdio.h>
+
+// This should be slightly less than KMP_I_LOCK_CHUNK, which is 1024
+#define LOCKS_PER_ITER 1000
+#define ITERATIONS (REPETITIONS + 1)
+
+// This tests concurrently using locks on one thread while initializing new
+// ones on another thread. This exercises the global lock pool.
+int test_omp_init_lock() {
+ int i;
+ omp_lock_t lcks[ITERATIONS * LOCKS_PER_ITER];
+#pragma omp parallel for schedule(static) num_threads(NUM_TASKS)
+ for (i = 0; i < ITERATIONS; i++) {
+ int j;
+ omp_lock_t *my_lcks = &lcks[i * LOCKS_PER_ITER];
+ for (j = 0; j < LOCKS_PER_ITER; j++) {
+ omp_init_lock(&my_lcks[j]);
+ }
+ for (j = 0; j < LOCKS_PER_ITER * 100; j++) {
+ omp_set_lock(&my_lcks[j % LOCKS_PER_ITER]);
+ omp_unset_lock(&my_lcks[j % LOCKS_PER_ITER]);
+ }
+ }
+ // Wait until all repititions are done. The test is exercising growth of
+ // the global lock pool, which does not shrink when no locks are allocated.
+ {
+ int j;
+ for (j = 0; j < ITERATIONS * LOCKS_PER_ITER; j++) {
+ omp_destroy_lock(&lcks[j]);
+ }
+ }
+
+ return 0;
+}
+
+int main() {
+ // No use repeating this test, since it's exercising a private global pool
+ // which is not reset between test iterations.
+ return test_omp_init_lock();
+}