aboutsummaryrefslogtreecommitdiff
path: root/libgomp/config/linux/bar.c
diff options
context:
space:
mode:
Diffstat (limited to 'libgomp/config/linux/bar.c')
-rw-r--r--libgomp/config/linux/bar.c106
1 files changed, 96 insertions, 10 deletions
diff --git a/libgomp/config/linux/bar.c b/libgomp/config/linux/bar.c
index 35baa88..6b591e5 100644
--- a/libgomp/config/linux/bar.c
+++ b/libgomp/config/linux/bar.c
@@ -33,11 +33,11 @@
void
gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
{
- if (__builtin_expect ((state & 1) != 0, 0))
+ if (__builtin_expect (state & BAR_WAS_LAST, 0))
{
/* Next time we'll be awaiting TOTAL threads again. */
bar->awaited = bar->total;
- __atomic_store_n (&bar->generation, bar->generation + 4,
+ __atomic_store_n (&bar->generation, bar->generation + BAR_INCR,
MEMMODEL_RELEASE);
futex_wake ((int *) &bar->generation, INT_MAX);
}
@@ -66,7 +66,7 @@ void
gomp_barrier_wait_last (gomp_barrier_t *bar)
{
gomp_barrier_state_t state = gomp_barrier_wait_start (bar);
- if (state & 1)
+ if (state & BAR_WAS_LAST)
gomp_barrier_wait_end (bar, state);
}
@@ -81,40 +81,43 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
{
unsigned int generation, gen;
- if (__builtin_expect ((state & 1) != 0, 0))
+ if (__builtin_expect (state & BAR_WAS_LAST, 0))
{
/* Next time we'll be awaiting TOTAL threads again. */
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
bar->awaited = bar->total;
+ team->work_share_cancelled = 0;
if (__builtin_expect (team->task_count, 0))
{
gomp_barrier_handle_tasks (state);
- state &= ~1;
+ state &= ~BAR_WAS_LAST;
}
else
{
- __atomic_store_n (&bar->generation, state + 3, MEMMODEL_RELEASE);
+ state &= ~BAR_CANCELLED;
+ state += BAR_INCR - BAR_WAS_LAST;
+ __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
futex_wake ((int *) &bar->generation, INT_MAX);
return;
}
}
generation = state;
+ state &= ~BAR_CANCELLED;
do
{
do_wait ((int *) &bar->generation, generation);
gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
- if (__builtin_expect (gen & 1, 0))
+ if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
{
gomp_barrier_handle_tasks (state);
gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
}
- if ((gen & 2) != 0)
- generation |= 2;
+ generation |= gen & BAR_WAITING_FOR_TASK;
}
- while (gen != state + 4);
+ while (gen != state + BAR_INCR);
}
void
@@ -122,3 +125,86 @@ gomp_team_barrier_wait (gomp_barrier_t *bar)
{
gomp_team_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
}
+
+void
+gomp_team_barrier_wait_final (gomp_barrier_t *bar)
+{
+ gomp_barrier_state_t state = gomp_barrier_wait_final_start (bar);
+ if (__builtin_expect (state & BAR_WAS_LAST, 0))
+ bar->awaited_final = bar->total;
+ gomp_team_barrier_wait_end (bar, state);
+}
+
+bool
+gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
+ gomp_barrier_state_t state)
+{
+ unsigned int generation, gen;
+
+ if (__builtin_expect (state & BAR_WAS_LAST, 0))
+ {
+ /* Next time we'll be awaiting TOTAL threads again. */
+ /* BAR_CANCELLED should never be set in state here, because
+ cancellation means that at least one of the threads has been
+ cancelled, thus on a cancellable barrier we should never see
+ all threads to arrive. */
+ struct gomp_thread *thr = gomp_thread ();
+ struct gomp_team *team = thr->ts.team;
+
+ bar->awaited = bar->total;
+ team->work_share_cancelled = 0;
+ if (__builtin_expect (team->task_count, 0))
+ {
+ gomp_barrier_handle_tasks (state);
+ state &= ~BAR_WAS_LAST;
+ }
+ else
+ {
+ state += BAR_INCR - BAR_WAS_LAST;
+ __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
+ futex_wake ((int *) &bar->generation, INT_MAX);
+ return false;
+ }
+ }
+
+ if (__builtin_expect (state & BAR_CANCELLED, 0))
+ return true;
+
+ generation = state;
+ do
+ {
+ do_wait ((int *) &bar->generation, generation);
+ gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
+ if (__builtin_expect (gen & BAR_CANCELLED, 0))
+ return true;
+ if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
+ {
+ gomp_barrier_handle_tasks (state);
+ gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
+ }
+ generation |= gen & BAR_WAITING_FOR_TASK;
+ }
+ while (gen != state + BAR_INCR);
+
+ return false;
+}
+
+bool
+gomp_team_barrier_wait_cancel (gomp_barrier_t *bar)
+{
+ return gomp_team_barrier_wait_cancel_end (bar, gomp_barrier_wait_start (bar));
+}
+
+void
+gomp_team_barrier_cancel (struct gomp_team *team)
+{
+ gomp_mutex_lock (&team->task_lock);
+ if (team->barrier.generation & BAR_CANCELLED)
+ {
+ gomp_mutex_unlock (&team->task_lock);
+ return;
+ }
+ team->barrier.generation |= BAR_CANCELLED;
+ gomp_mutex_unlock (&team->task_lock);
+ futex_wake ((int *) &team->barrier.generation, INT_MAX);
+}