aboutsummaryrefslogtreecommitdiff
path: root/libgomp/config/posix
diff options
context:
space:
mode:
Diffstat (limited to 'libgomp/config/posix')
-rw-r--r--libgomp/config/posix/affinity.c79
-rw-r--r--libgomp/config/posix/bar.c132
-rw-r--r--libgomp/config/posix/bar.h58
3 files changed, 254 insertions, 15 deletions
diff --git a/libgomp/config/posix/affinity.c b/libgomp/config/posix/affinity.c
index ac3d14e..e7f97ab 100644
--- a/libgomp/config/posix/affinity.c
+++ b/libgomp/config/posix/affinity.c
@@ -32,7 +32,84 @@ gomp_init_affinity (void)
}
void
-gomp_init_thread_affinity (pthread_attr_t *attr)
+gomp_init_thread_affinity (pthread_attr_t *attr, unsigned int place)
{
(void) attr;
+ (void) place;
+}
+
+void **
+gomp_affinity_alloc (unsigned long count, bool quiet)
+{
+ (void) count;
+ if (!quiet)
+ gomp_error ("Affinity not supported on this configuration");
+ return NULL;
+}
+
+void
+gomp_affinity_init_place (void *p)
+{
+ (void) p;
+}
+
+bool
+gomp_affinity_add_cpus (void *p, unsigned long num,
+ unsigned long len, long stride, bool quiet)
+{
+ (void) p;
+ (void) num;
+ (void) len;
+ (void) stride;
+ (void) quiet;
+ return false;
+}
+
+bool
+gomp_affinity_remove_cpu (void *p, unsigned long num)
+{
+ (void) p;
+ (void) num;
+ return false;
+}
+
+bool
+gomp_affinity_copy_place (void *p, void *q, long stride)
+{
+ (void) p;
+ (void) q;
+ (void) stride;
+ return false;
+}
+
+bool
+gomp_affinity_same_place (void *p, void *q)
+{
+ (void) p;
+ (void) q;
+ return false;
+}
+
+bool
+gomp_affinity_finalize_place_list (bool quiet)
+{
+ (void) quiet;
+ return false;
+}
+
+bool
+gomp_affinity_init_level (int level, unsigned long count, bool quiet)
+{
+ (void) level;
+ (void) count;
+ (void) quiet;
+ if (!quiet)
+ gomp_error ("Affinity not supported on this configuration");
+ return NULL;
+}
+
+void
+gomp_affinity_print_place (void *p)
+{
+ (void) p;
}
diff --git a/libgomp/config/posix/bar.c b/libgomp/config/posix/bar.c
index 06a3185..bdf3978 100644
--- a/libgomp/config/posix/bar.c
+++ b/libgomp/config/posix/bar.c
@@ -42,6 +42,7 @@ gomp_barrier_init (gomp_barrier_t *bar, unsigned count)
bar->total = count;
bar->arrived = 0;
bar->generation = 0;
+ bar->cancellable = false;
}
void
@@ -72,7 +73,7 @@ gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
{
unsigned int n;
- if (state & 1)
+ if (state & BAR_WAS_LAST)
{
n = --bar->arrived;
if (n > 0)
@@ -113,12 +114,14 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
{
unsigned int n;
- if (state & 1)
+ state &= ~BAR_CANCELLED;
+ if (state & BAR_WAS_LAST)
{
n = --bar->arrived;
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
+ team->work_share_cancelled = 0;
if (team->task_count)
{
gomp_barrier_handle_tasks (state);
@@ -128,7 +131,7 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
return;
}
- bar->generation = state + 3;
+ bar->generation = state + BAR_INCR - BAR_WAS_LAST;
if (n > 0)
{
do
@@ -141,13 +144,18 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
else
{
gomp_mutex_unlock (&bar->mutex1);
+ int gen;
do
{
gomp_sem_wait (&bar->sem1);
- if (bar->generation & 1)
- gomp_barrier_handle_tasks (state);
+ gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
+ if (gen & BAR_TASK_PENDING)
+ {
+ gomp_barrier_handle_tasks (state);
+ gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
+ }
}
- while (bar->generation != state + 4);
+ while (gen != state + BAR_INCR);
#ifdef HAVE_SYNC_BUILTINS
n = __sync_add_and_fetch (&bar->arrived, -1);
@@ -162,6 +170,81 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
}
}
+bool
+gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
+ gomp_barrier_state_t state)
+{
+ unsigned int n;
+
+ if (state & BAR_WAS_LAST)
+ {
+ bar->cancellable = false;
+ n = --bar->arrived;
+ struct gomp_thread *thr = gomp_thread ();
+ struct gomp_team *team = thr->ts.team;
+
+ team->work_share_cancelled = 0;
+ if (team->task_count)
+ {
+ gomp_barrier_handle_tasks (state);
+ if (n > 0)
+ gomp_sem_wait (&bar->sem2);
+ gomp_mutex_unlock (&bar->mutex1);
+ return false;
+ }
+
+ bar->generation = state + BAR_INCR - BAR_WAS_LAST;
+ if (n > 0)
+ {
+ do
+ gomp_sem_post (&bar->sem1);
+ while (--n != 0);
+ gomp_sem_wait (&bar->sem2);
+ }
+ gomp_mutex_unlock (&bar->mutex1);
+ }
+ else
+ {
+ if (state & BAR_CANCELLED)
+ {
+ gomp_mutex_unlock (&bar->mutex1);
+ return true;
+ }
+ bar->cancellable = true;
+ gomp_mutex_unlock (&bar->mutex1);
+ int gen;
+ do
+ {
+ gomp_sem_wait (&bar->sem1);
+ gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
+ if (gen & BAR_CANCELLED)
+ break;
+ if (gen & BAR_TASK_PENDING)
+ {
+ gomp_barrier_handle_tasks (state);
+ gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
+ if (gen & BAR_CANCELLED)
+ break;
+ }
+ }
+ while (gen != state + BAR_INCR);
+
+#ifdef HAVE_SYNC_BUILTINS
+ n = __sync_add_and_fetch (&bar->arrived, -1);
+#else
+ gomp_mutex_lock (&bar->mutex2);
+ n = --bar->arrived;
+ gomp_mutex_unlock (&bar->mutex2);
+#endif
+
+ if (n == 0)
+ gomp_sem_post (&bar->sem2);
+ if (gen & BAR_CANCELLED)
+ return true;
+ }
+ return false;
+}
+
void
gomp_team_barrier_wait (gomp_barrier_t *barrier)
{
@@ -176,3 +259,40 @@ gomp_team_barrier_wake (gomp_barrier_t *bar, int count)
while (count-- > 0)
gomp_sem_post (&bar->sem1);
}
+
+bool
+gomp_team_barrier_wait_cancel (gomp_barrier_t *bar)
+{
+ gomp_barrier_state_t state = gomp_barrier_wait_cancel_start (bar);
+ return gomp_team_barrier_wait_cancel_end (bar, state);
+}
+
+void
+gomp_team_barrier_cancel (struct gomp_team *team)
+{
+ if (team->barrier.generation & BAR_CANCELLED)
+ return;
+ gomp_mutex_lock (&team->barrier.mutex1);
+ gomp_mutex_lock (&team->task_lock);
+ if (team->barrier.generation & BAR_CANCELLED)
+ {
+ gomp_mutex_unlock (&team->task_lock);
+ gomp_mutex_unlock (&team->barrier.mutex1);
+ return;
+ }
+ team->barrier.generation |= BAR_CANCELLED;
+ gomp_mutex_unlock (&team->task_lock);
+ if (team->barrier.cancellable)
+ {
+ int n = team->barrier.arrived;
+ if (n > 0)
+ {
+ do
+ gomp_sem_post (&team->barrier.sem1);
+ while (--n != 0);
+ gomp_sem_wait (&team->barrier.sem2);
+ }
+ team->barrier.cancellable = false;
+ }
+ gomp_mutex_unlock (&team->barrier.mutex1);
+}
diff --git a/libgomp/config/posix/bar.h b/libgomp/config/posix/bar.h
index 1a16ca8..9fcd4da 100644
--- a/libgomp/config/posix/bar.h
+++ b/libgomp/config/posix/bar.h
@@ -43,9 +43,20 @@ typedef struct
unsigned total;
unsigned arrived;
unsigned generation;
+ bool cancellable;
} gomp_barrier_t;
+
typedef unsigned int gomp_barrier_state_t;
+/* The generation field contains a counter in the high bits, with a few
+ low bits dedicated to flags. Note that TASK_PENDING and WAS_LAST can
+ share space because WAS_LAST is never stored back to generation. */
+#define BAR_TASK_PENDING 1
+#define BAR_WAS_LAST 1
+#define BAR_WAITING_FOR_TASK 2
+#define BAR_CANCELLED 4
+#define BAR_INCR 8
+
extern void gomp_barrier_init (gomp_barrier_t *, unsigned);
extern void gomp_barrier_reinit (gomp_barrier_t *, unsigned);
extern void gomp_barrier_destroy (gomp_barrier_t *);
@@ -55,22 +66,47 @@ extern void gomp_barrier_wait_end (gomp_barrier_t *, gomp_barrier_state_t);
extern void gomp_team_barrier_wait (gomp_barrier_t *);
extern void gomp_team_barrier_wait_end (gomp_barrier_t *,
gomp_barrier_state_t);
+extern bool gomp_team_barrier_wait_cancel (gomp_barrier_t *);
+extern bool gomp_team_barrier_wait_cancel_end (gomp_barrier_t *,
+ gomp_barrier_state_t);
extern void gomp_team_barrier_wake (gomp_barrier_t *, int);
+struct gomp_team;
+extern void gomp_team_barrier_cancel (struct gomp_team *);
static inline gomp_barrier_state_t
gomp_barrier_wait_start (gomp_barrier_t *bar)
{
unsigned int ret;
gomp_mutex_lock (&bar->mutex1);
- ret = bar->generation & ~3;
- ret += ++bar->arrived == bar->total;
+ ret = bar->generation & (-BAR_INCR | BAR_CANCELLED);
+ if (++bar->arrived == bar->total)
+ ret |= BAR_WAS_LAST;
+ return ret;
+}
+
+static inline gomp_barrier_state_t
+gomp_barrier_wait_cancel_start (gomp_barrier_t *bar)
+{
+ unsigned int ret;
+ gomp_mutex_lock (&bar->mutex1);
+ ret = bar->generation & (-BAR_INCR | BAR_CANCELLED);
+ if (ret & BAR_CANCELLED)
+ return ret;
+ if (++bar->arrived == bar->total)
+ ret |= BAR_WAS_LAST;
return ret;
}
+static inline void
+gomp_team_barrier_wait_final (gomp_barrier_t *bar)
+{
+ gomp_team_barrier_wait (bar);
+}
+
static inline bool
gomp_barrier_last_thread (gomp_barrier_state_t state)
{
- return state & 1;
+ return state & BAR_WAS_LAST;
}
static inline void
@@ -85,31 +121,37 @@ gomp_barrier_wait_last (gomp_barrier_t *bar)
static inline void
gomp_team_barrier_set_task_pending (gomp_barrier_t *bar)
{
- bar->generation |= 1;
+ bar->generation |= BAR_TASK_PENDING;
}
static inline void
gomp_team_barrier_clear_task_pending (gomp_barrier_t *bar)
{
- bar->generation &= ~1;
+ bar->generation &= ~BAR_TASK_PENDING;
}
static inline void
gomp_team_barrier_set_waiting_for_tasks (gomp_barrier_t *bar)
{
- bar->generation |= 2;
+ bar->generation |= BAR_WAITING_FOR_TASK;
}
static inline bool
gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar)
{
- return (bar->generation & 2) != 0;
+ return (bar->generation & BAR_WAITING_FOR_TASK) != 0;
+}
+
+static inline bool
+gomp_team_barrier_cancelled (gomp_barrier_t *bar)
+{
+ return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0);
}
static inline void
gomp_team_barrier_done (gomp_barrier_t *bar, gomp_barrier_state_t state)
{
- bar->generation = (state & ~3) + 4;
+ bar->generation = (state & -BAR_INCR) + BAR_INCR;
}
#endif /* GOMP_BARRIER_H */