aboutsummaryrefslogtreecommitdiff
path: root/libgomp/config
diff options
context:
space:
mode:
Diffstat (limited to 'libgomp/config')
-rw-r--r--libgomp/config/linux/affinity.c351
-rw-r--r--libgomp/config/linux/bar.c106
-rw-r--r--libgomp/config/linux/bar.h62
-rw-r--r--libgomp/config/linux/proc.c96
-rw-r--r--libgomp/config/linux/proc.h5
-rw-r--r--libgomp/config/posix/affinity.c79
-rw-r--r--libgomp/config/posix/bar.c132
-rw-r--r--libgomp/config/posix/bar.h58
8 files changed, 778 insertions, 111 deletions
diff --git a/libgomp/config/linux/affinity.c b/libgomp/config/linux/affinity.c
index dc6c7e5..789cdce 100644
--- a/libgomp/config/linux/affinity.c
+++ b/libgomp/config/linux/affinity.c
@@ -29,90 +29,327 @@
#endif
#include "libgomp.h"
#include "proc.h"
+#include <errno.h>
#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
#include <unistd.h>
#ifdef HAVE_PTHREAD_AFFINITY_NP
-static unsigned int affinity_counter;
+#ifndef CPU_ALLOC_SIZE
+#define CPU_ISSET_S(idx, size, set) CPU_ISSET(idx, set)
+#define CPU_ZERO_S(size, set) CPU_ZERO(set)
+#define CPU_SET_S(idx, size, set) CPU_SET(idx, set)
+#define CPU_CLR_S(idx, size, set) CPU_CLR(idx, set)
+#endif
void
gomp_init_affinity (void)
{
- cpu_set_t cpuset, cpusetnew;
- size_t idx, widx;
- unsigned long cpus = 0;
+ if (gomp_places_list == NULL)
+ {
+ if (!gomp_affinity_init_level (1, ULONG_MAX, true))
+ return;
+ }
+
+ struct gomp_thread *thr = gomp_thread ();
+ pthread_setaffinity_np (pthread_self (), gomp_cpuset_size,
+ (cpu_set_t *) gomp_places_list[0]);
+ thr->place = 1;
+ thr->ts.place_partition_off = 0;
+ thr->ts.place_partition_len = gomp_places_list_len;
+}
+
+void
+gomp_init_thread_affinity (pthread_attr_t *attr, unsigned int place)
+{
+ pthread_attr_setaffinity_np (attr, gomp_cpuset_size,
+ (cpu_set_t *) gomp_places_list[place]);
+}
+
+void **
+gomp_affinity_alloc (unsigned long count, bool quiet)
+{
+ unsigned long i;
+ void **ret;
+ char *p;
+
+ if (gomp_cpusetp == NULL)
+ {
+ if (!quiet)
+ gomp_error ("Could not get CPU affinity set");
+ return NULL;
+ }
- if (pthread_getaffinity_np (pthread_self (), sizeof (cpuset), &cpuset))
+ ret = malloc (count * sizeof (void *) + count * gomp_cpuset_size);
+ if (ret == NULL)
{
- gomp_error ("could not get CPU affinity set");
- free (gomp_cpu_affinity);
- gomp_cpu_affinity = NULL;
- gomp_cpu_affinity_len = 0;
- return;
+ if (!quiet)
+ gomp_error ("Out of memory trying to allocate places list");
+ return NULL;
}
- CPU_ZERO (&cpusetnew);
- if (gomp_cpu_affinity_len == 0)
+ p = (char *) (ret + count);
+ for (i = 0; i < count; i++, p += gomp_cpuset_size)
+ ret[i] = p;
+ return ret;
+}
+
+void
+gomp_affinity_init_place (void *p)
+{
+ cpu_set_t *cpusetp = (cpu_set_t *) p;
+ CPU_ZERO_S (gomp_cpuset_size, cpusetp);
+}
+
+bool
+gomp_affinity_add_cpus (void *p, unsigned long num,
+ unsigned long len, long stride, bool quiet)
+{
+ cpu_set_t *cpusetp = (cpu_set_t *) p;
+ unsigned long max = 8 * gomp_cpuset_size;
+ for (;;)
{
- unsigned long count = gomp_cpuset_popcount (&cpuset);
- if (count >= 65536)
- count = 65536;
- gomp_cpu_affinity = malloc (count * sizeof (unsigned short));
- if (gomp_cpu_affinity == NULL)
+ if (num >= max)
+ {
+ if (!quiet)
+ gomp_error ("Logical CPU number %lu out of range", num);
+ return false;
+ }
+ CPU_SET_S (num, gomp_cpuset_size, cpusetp);
+ if (--len == 0)
+ return true;
+ if ((stride < 0 && num + stride > num)
+ || (stride > 0 && num + stride < num))
{
- gomp_error ("not enough memory to store CPU affinity list");
- return;
+ if (!quiet)
+ gomp_error ("Logical CPU number %lu+%ld out of range",
+ num, stride);
+ return false;
}
- for (widx = idx = 0; widx < count && idx < 65536; idx++)
- if (CPU_ISSET (idx, &cpuset))
+ num += stride;
+ }
+}
+
+bool
+gomp_affinity_remove_cpu (void *p, unsigned long num)
+{
+ cpu_set_t *cpusetp = (cpu_set_t *) p;
+ if (num >= 8 * gomp_cpuset_size)
+ {
+ gomp_error ("Logical CPU number %lu out of range", num);
+ return false;
+ }
+ if (!CPU_ISSET_S (num, gomp_cpuset_size, cpusetp))
+ {
+ gomp_error ("Logical CPU %lu to be removed is not in the set", num);
+ return false;
+ }
+ CPU_CLR_S (num, gomp_cpuset_size, cpusetp);
+ return true;
+}
+
+bool
+gomp_affinity_copy_place (void *p, void *q, long stride)
+{
+ unsigned long i, max = 8 * gomp_cpuset_size;
+ cpu_set_t *destp = (cpu_set_t *) p;
+ cpu_set_t *srcp = (cpu_set_t *) q;
+
+ CPU_ZERO_S (gomp_cpuset_size, destp);
+ for (i = 0; i < max; i++)
+ if (CPU_ISSET_S (i, gomp_cpuset_size, srcp))
+ {
+ if ((stride < 0 && i + stride > i)
+ || (stride > 0 && (i + stride < i || i + stride >= max)))
+ {
+ gomp_error ("Logical CPU number %lu+%ld out of range", i, stride);
+ return false;
+ }
+ CPU_SET_S (i + stride, gomp_cpuset_size, destp);
+ }
+ return true;
+}
+
+bool
+gomp_affinity_same_place (void *p, void *q)
+{
+#ifdef CPU_EQUAL_S
+ return CPU_EQUAL_S (gomp_cpuset_size, (cpu_set_t *) p, (cpu_set_t *) q);
+#else
+ return memcmp (p, q, gomp_cpuset_size) == 0;
+#endif
+}
+
+bool
+gomp_affinity_finalize_place_list (bool quiet)
+{
+ unsigned long i, j;
+
+ for (i = 0, j = 0; i < gomp_places_list_len; i++)
+ {
+ cpu_set_t *cpusetp = (cpu_set_t *) gomp_places_list[i];
+ bool nonempty = false;
+#ifdef CPU_AND_S
+ CPU_AND_S (gomp_cpuset_size, cpusetp, cpusetp, gomp_cpusetp);
+ nonempty = gomp_cpuset_popcount (gomp_cpuset_size, cpusetp) != 0;
+#else
+ unsigned long k, max = gomp_cpuset_size / sizeof (cpusetp->__bits[0]);
+ for (k = 0; k < max; k++)
+ if ((cpusetp->__bits[k] &= gomp_cpusetp->__bits[k]) != 0)
+ nonempty = true;
+#endif
+ if (nonempty)
+ gomp_places_list[j++] = gomp_places_list[i];
+ }
+
+ if (j == 0)
+ {
+ if (!quiet)
+ gomp_error ("None of the places contain usable logical CPUs");
+ return false;
+ }
+ else if (j < gomp_places_list_len)
+ {
+ if (!quiet)
+ gomp_error ("Number of places reduced from %ld to %ld because some "
+ "places didn't contain any usable logical CPUs",
+ gomp_places_list_len, j);
+ gomp_places_list_len = j;
+ }
+ return true;
+}
+
+bool
+gomp_affinity_init_level (int level, unsigned long count, bool quiet)
+{
+ unsigned long i, max = 8 * gomp_cpuset_size;
+
+ if (gomp_cpusetp)
+ {
+ unsigned long maxcount
+ = gomp_cpuset_popcount (gomp_cpuset_size, gomp_cpusetp);
+ if (count > maxcount)
+ count = maxcount;
+ }
+ gomp_places_list = gomp_affinity_alloc (count, quiet);
+ gomp_places_list_len = 0;
+ if (gomp_places_list == NULL)
+ return false;
+ /* SMT (threads). */
+ if (level == 1)
+ {
+ for (i = 0; i < max && gomp_places_list_len < count; i++)
+ if (CPU_ISSET_S (i, gomp_cpuset_size, gomp_cpusetp))
{
- cpus++;
- gomp_cpu_affinity[widx++] = idx;
+ gomp_affinity_init_place (gomp_places_list[gomp_places_list_len]);
+ gomp_affinity_add_cpus (gomp_places_list[gomp_places_list_len],
+ i, 1, 0, true);
+ ++gomp_places_list_len;
}
+ return true;
}
else
- for (widx = idx = 0; idx < gomp_cpu_affinity_len; idx++)
- if (gomp_cpu_affinity[idx] < CPU_SETSIZE
- && CPU_ISSET (gomp_cpu_affinity[idx], &cpuset))
+ {
+ char name[sizeof ("/sys/devices/system/cpu/cpu/topology/"
+ "thread_siblings_list") + 3 * sizeof (unsigned long)];
+ size_t prefix_len = sizeof ("/sys/devices/system/cpu/cpu") - 1;
+ cpu_set_t *copy = gomp_alloca (gomp_cpuset_size);
+ FILE *f;
+ char *line = NULL;
+ size_t linelen = 0;
+
+ memcpy (name, "/sys/devices/system/cpu/cpu", prefix_len);
+ memcpy (copy, gomp_cpusetp, gomp_cpuset_size);
+ for (i = 0; i < max && gomp_places_list_len < count; i++)
+ if (CPU_ISSET_S (i, gomp_cpuset_size, copy))
+ {
+ sprintf (name + prefix_len, "%lu/topology/%s_siblings_list",
+ i, level == 2 ? "thread" : "core");
+ f = fopen (name, "r");
+ if (f != NULL)
+ {
+ if (getline (&line, &linelen, f) > 0)
+ {
+ char *p = line;
+ bool seen_i = false;
+ void *pl = gomp_places_list[gomp_places_list_len];
+ gomp_affinity_init_place (pl);
+ while (*p && *p != '\n')
+ {
+ unsigned long first, last;
+ errno = 0;
+ first = strtoul (p, &p, 10);
+ if (errno)
+ break;
+ last = first;
+ if (*p == '-')
+ {
+ errno = 0;
+ last = strtoul (p + 1, &p, 10);
+ if (errno || last < first)
+ break;
+ }
+ for (; first <= last; first++)
+ if (CPU_ISSET_S (first, gomp_cpuset_size, copy)
+ && gomp_affinity_add_cpus (pl, first, 1, 0,
+ true))
+ {
+ CPU_CLR_S (first, gomp_cpuset_size, copy);
+ if (first == i)
+ seen_i = true;
+ }
+ if (*p == ',')
+ ++p;
+ }
+ if (seen_i)
+ gomp_places_list_len++;
+ }
+ fclose (f);
+ }
+ }
+ if (gomp_places_list == 0)
{
- if (! CPU_ISSET (gomp_cpu_affinity[idx], &cpusetnew))
- {
- cpus++;
- CPU_SET (gomp_cpu_affinity[idx], &cpusetnew);
- }
- gomp_cpu_affinity[widx++] = gomp_cpu_affinity[idx];
+ if (!quiet)
+ gomp_error ("Error reading %s topology",
+ level == 2 ? "core" : "socket");
+ free (gomp_places_list);
+ gomp_places_list = NULL;
+ return false;
}
-
- if (widx == 0)
- {
- gomp_error ("no CPUs left for affinity setting");
- free (gomp_cpu_affinity);
- gomp_cpu_affinity = NULL;
- gomp_cpu_affinity_len = 0;
- return;
+ return true;
}
-
- gomp_cpu_affinity_len = widx;
- if (cpus < gomp_available_cpus)
- gomp_available_cpus = cpus;
- CPU_ZERO (&cpuset);
- CPU_SET (gomp_cpu_affinity[0], &cpuset);
- pthread_setaffinity_np (pthread_self (), sizeof (cpuset), &cpuset);
- affinity_counter = 1;
+ return false;
}
void
-gomp_init_thread_affinity (pthread_attr_t *attr)
+gomp_affinity_print_place (void *p)
{
- unsigned int cpu;
- cpu_set_t cpuset;
-
- cpu = __atomic_fetch_add (&affinity_counter, 1, MEMMODEL_RELAXED);
- cpu %= gomp_cpu_affinity_len;
- CPU_ZERO (&cpuset);
- CPU_SET (gomp_cpu_affinity[cpu], &cpuset);
- pthread_attr_setaffinity_np (attr, sizeof (cpu_set_t), &cpuset);
+ unsigned long i, max = 8 * gomp_cpuset_size, len;
+ cpu_set_t *cpusetp = (cpu_set_t *) p;
+ bool notfirst = false;
+
+ for (i = 0, len = 0; i < max; i++)
+ if (CPU_ISSET_S (i, gomp_cpuset_size, cpusetp))
+ {
+ if (len == 0)
+ {
+ if (notfirst)
+ fputc (',', stderr);
+ notfirst = true;
+ fprintf (stderr, "%lu", i);
+ }
+ ++len;
+ }
+ else
+ {
+ if (len > 1)
+ fprintf (stderr, ":%lu", len);
+ len = 0;
+ }
+ if (len > 1)
+ fprintf (stderr, ":%lu", len);
}
#else
diff --git a/libgomp/config/linux/bar.c b/libgomp/config/linux/bar.c
index 35baa88..6b591e5 100644
--- a/libgomp/config/linux/bar.c
+++ b/libgomp/config/linux/bar.c
@@ -33,11 +33,11 @@
void
gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
{
- if (__builtin_expect ((state & 1) != 0, 0))
+ if (__builtin_expect (state & BAR_WAS_LAST, 0))
{
/* Next time we'll be awaiting TOTAL threads again. */
bar->awaited = bar->total;
- __atomic_store_n (&bar->generation, bar->generation + 4,
+ __atomic_store_n (&bar->generation, bar->generation + BAR_INCR,
MEMMODEL_RELEASE);
futex_wake ((int *) &bar->generation, INT_MAX);
}
@@ -66,7 +66,7 @@ void
gomp_barrier_wait_last (gomp_barrier_t *bar)
{
gomp_barrier_state_t state = gomp_barrier_wait_start (bar);
- if (state & 1)
+ if (state & BAR_WAS_LAST)
gomp_barrier_wait_end (bar, state);
}
@@ -81,40 +81,43 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
{
unsigned int generation, gen;
- if (__builtin_expect ((state & 1) != 0, 0))
+ if (__builtin_expect (state & BAR_WAS_LAST, 0))
{
/* Next time we'll be awaiting TOTAL threads again. */
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
bar->awaited = bar->total;
+ team->work_share_cancelled = 0;
if (__builtin_expect (team->task_count, 0))
{
gomp_barrier_handle_tasks (state);
- state &= ~1;
+ state &= ~BAR_WAS_LAST;
}
else
{
- __atomic_store_n (&bar->generation, state + 3, MEMMODEL_RELEASE);
+ state &= ~BAR_CANCELLED;
+ state += BAR_INCR - BAR_WAS_LAST;
+ __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
futex_wake ((int *) &bar->generation, INT_MAX);
return;
}
}
generation = state;
+ state &= ~BAR_CANCELLED;
do
{
do_wait ((int *) &bar->generation, generation);
gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
- if (__builtin_expect (gen & 1, 0))
+ if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
{
gomp_barrier_handle_tasks (state);
gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
}
- if ((gen & 2) != 0)
- generation |= 2;
+ generation |= gen & BAR_WAITING_FOR_TASK;
}
- while (gen != state + 4);
+ while (gen != state + BAR_INCR);
}
void
@@ -122,3 +125,86 @@ gomp_team_barrier_wait (gomp_barrier_t *bar)
{
gomp_team_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
}
+
+void
+gomp_team_barrier_wait_final (gomp_barrier_t *bar)
+{
+ gomp_barrier_state_t state = gomp_barrier_wait_final_start (bar);
+ if (__builtin_expect (state & BAR_WAS_LAST, 0))
+ bar->awaited_final = bar->total;
+ gomp_team_barrier_wait_end (bar, state);
+}
+
+bool
+gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
+ gomp_barrier_state_t state)
+{
+ unsigned int generation, gen;
+
+ if (__builtin_expect (state & BAR_WAS_LAST, 0))
+ {
+ /* Next time we'll be awaiting TOTAL threads again. */
+ /* BAR_CANCELLED should never be set in state here, because
+ cancellation means that at least one of the threads has been
+ cancelled, thus on a cancellable barrier we should never see
+ all threads to arrive. */
+ struct gomp_thread *thr = gomp_thread ();
+ struct gomp_team *team = thr->ts.team;
+
+ bar->awaited = bar->total;
+ team->work_share_cancelled = 0;
+ if (__builtin_expect (team->task_count, 0))
+ {
+ gomp_barrier_handle_tasks (state);
+ state &= ~BAR_WAS_LAST;
+ }
+ else
+ {
+ state += BAR_INCR - BAR_WAS_LAST;
+ __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
+ futex_wake ((int *) &bar->generation, INT_MAX);
+ return false;
+ }
+ }
+
+ if (__builtin_expect (state & BAR_CANCELLED, 0))
+ return true;
+
+ generation = state;
+ do
+ {
+ do_wait ((int *) &bar->generation, generation);
+ gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
+ if (__builtin_expect (gen & BAR_CANCELLED, 0))
+ return true;
+ if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
+ {
+ gomp_barrier_handle_tasks (state);
+ gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
+ }
+ generation |= gen & BAR_WAITING_FOR_TASK;
+ }
+ while (gen != state + BAR_INCR);
+
+ return false;
+}
+
+bool
+gomp_team_barrier_wait_cancel (gomp_barrier_t *bar)
+{
+ return gomp_team_barrier_wait_cancel_end (bar, gomp_barrier_wait_start (bar));
+}
+
+void
+gomp_team_barrier_cancel (struct gomp_team *team)
+{
+ gomp_mutex_lock (&team->task_lock);
+ if (team->barrier.generation & BAR_CANCELLED)
+ {
+ gomp_mutex_unlock (&team->task_lock);
+ return;
+ }
+ team->barrier.generation |= BAR_CANCELLED;
+ gomp_mutex_unlock (&team->task_lock);
+ futex_wake ((int *) &team->barrier.generation, INT_MAX);
+}
diff --git a/libgomp/config/linux/bar.h b/libgomp/config/linux/bar.h
index 69b9706..914c867 100644
--- a/libgomp/config/linux/bar.h
+++ b/libgomp/config/linux/bar.h
@@ -38,13 +38,25 @@ typedef struct
unsigned total __attribute__((aligned (64)));
unsigned generation;
unsigned awaited __attribute__((aligned (64)));
+ unsigned awaited_final;
} gomp_barrier_t;
+
typedef unsigned int gomp_barrier_state_t;
+/* The generation field contains a counter in the high bits, with a few
+ low bits dedicated to flags. Note that TASK_PENDING and WAS_LAST can
+ share space because WAS_LAST is never stored back to generation. */
+#define BAR_TASK_PENDING 1
+#define BAR_WAS_LAST 1
+#define BAR_WAITING_FOR_TASK 2
+#define BAR_CANCELLED 4
+#define BAR_INCR 8
+
static inline void gomp_barrier_init (gomp_barrier_t *bar, unsigned count)
{
bar->total = count;
bar->awaited = count;
+ bar->awaited_final = count;
bar->generation = 0;
}
@@ -62,27 +74,55 @@ extern void gomp_barrier_wait (gomp_barrier_t *);
extern void gomp_barrier_wait_last (gomp_barrier_t *);
extern void gomp_barrier_wait_end (gomp_barrier_t *, gomp_barrier_state_t);
extern void gomp_team_barrier_wait (gomp_barrier_t *);
+extern void gomp_team_barrier_wait_final (gomp_barrier_t *);
extern void gomp_team_barrier_wait_end (gomp_barrier_t *,
gomp_barrier_state_t);
+extern bool gomp_team_barrier_wait_cancel (gomp_barrier_t *);
+extern bool gomp_team_barrier_wait_cancel_end (gomp_barrier_t *,
+ gomp_barrier_state_t);
extern void gomp_team_barrier_wake (gomp_barrier_t *, int);
+struct gomp_team;
+extern void gomp_team_barrier_cancel (struct gomp_team *);
static inline gomp_barrier_state_t
gomp_barrier_wait_start (gomp_barrier_t *bar)
{
- unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE) & ~3;
+ unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
+ ret &= -BAR_INCR | BAR_CANCELLED;
/* A memory barrier is needed before exiting from the various forms
of gomp_barrier_wait, to satisfy OpenMP API version 3.1 section
2.8.6 flush Construct, which says there is an implicit flush during
a barrier region. This is a convenient place to add the barrier,
so we use MEMMODEL_ACQ_REL here rather than MEMMODEL_ACQUIRE. */
- ret += __atomic_add_fetch (&bar->awaited, -1, MEMMODEL_ACQ_REL) == 0;
+ if (__atomic_add_fetch (&bar->awaited, -1, MEMMODEL_ACQ_REL) == 0)
+ ret |= BAR_WAS_LAST;
+ return ret;
+}
+
+static inline gomp_barrier_state_t
+gomp_barrier_wait_cancel_start (gomp_barrier_t *bar)
+{
+ return gomp_barrier_wait_start (bar);
+}
+
+/* This is like gomp_barrier_wait_start, except it decrements
+ bar->awaited_final rather than bar->awaited and should be used
+ for the gomp_team_end barrier only. */
+static inline gomp_barrier_state_t
+gomp_barrier_wait_final_start (gomp_barrier_t *bar)
+{
+ unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
+ ret &= -BAR_INCR | BAR_CANCELLED;
+ /* See above gomp_barrier_wait_start comment. */
+ if (__atomic_add_fetch (&bar->awaited_final, -1, MEMMODEL_ACQ_REL) == 0)
+ ret |= BAR_WAS_LAST;
return ret;
}
static inline bool
gomp_barrier_last_thread (gomp_barrier_state_t state)
{
- return state & 1;
+ return state & BAR_WAS_LAST;
}
/* All the inlines below must be called with team->task_lock
@@ -91,31 +131,37 @@ gomp_barrier_last_thread (gomp_barrier_state_t state)
static inline void
gomp_team_barrier_set_task_pending (gomp_barrier_t *bar)
{
- bar->generation |= 1;
+ bar->generation |= BAR_TASK_PENDING;
}
static inline void
gomp_team_barrier_clear_task_pending (gomp_barrier_t *bar)
{
- bar->generation &= ~1;
+ bar->generation &= ~BAR_TASK_PENDING;
}
static inline void
gomp_team_barrier_set_waiting_for_tasks (gomp_barrier_t *bar)
{
- bar->generation |= 2;
+ bar->generation |= BAR_WAITING_FOR_TASK;
}
static inline bool
gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar)
{
- return (bar->generation & 2) != 0;
+ return (bar->generation & BAR_WAITING_FOR_TASK) != 0;
+}
+
+static inline bool
+gomp_team_barrier_cancelled (gomp_barrier_t *bar)
+{
+ return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0);
}
static inline void
gomp_team_barrier_done (gomp_barrier_t *bar, gomp_barrier_state_t state)
{
- bar->generation = (state & ~3) + 4;
+ bar->generation = (state & -BAR_INCR) + BAR_INCR;
}
#endif /* GOMP_BARRIER_H */
diff --git a/libgomp/config/linux/proc.c b/libgomp/config/linux/proc.c
index cbb773e..d4ae116 100644
--- a/libgomp/config/linux/proc.c
+++ b/libgomp/config/linux/proc.c
@@ -30,6 +30,7 @@
#endif
#include "libgomp.h"
#include "proc.h"
+#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
#ifdef HAVE_GETLOADAVG
@@ -39,19 +40,28 @@
#endif
#ifdef HAVE_PTHREAD_AFFINITY_NP
+unsigned long gomp_cpuset_size;
+static unsigned long gomp_get_cpuset_size;
+cpu_set_t *gomp_cpusetp;
+
unsigned long
-gomp_cpuset_popcount (cpu_set_t *cpusetp)
+gomp_cpuset_popcount (unsigned long cpusetsize, cpu_set_t *cpusetp)
{
-#ifdef CPU_COUNT
- /* glibc 2.6 and above provide a macro for this. */
- return CPU_COUNT (cpusetp);
+#ifdef CPU_COUNT_S
+ /* glibc 2.7 and above provide a macro for this. */
+ return CPU_COUNT_S (cpusetsize, cpusetp);
#else
+#ifdef CPU_COUNT
+ if (cpusetsize == sizeof (cpu_set_t))
+ /* glibc 2.6 and above provide a macro for this. */
+ return CPU_COUNT (cpusetp);
+#endif
size_t i;
unsigned long ret = 0;
- extern int check[sizeof (cpusetp->__bits[0]) == sizeof (unsigned long int)];
+ extern int check[sizeof (cpusetp->__bits[0]) == sizeof (unsigned long int)
+ ? 1 : -1];
- (void) check;
- for (i = 0; i < sizeof (*cpusetp) / sizeof (cpusetp->__bits[0]); i++)
+ for (i = 0; i < cpusetsize / sizeof (cpusetp->__bits[0]); i++)
{
unsigned long int mask = cpusetp->__bits[i];
if (mask == 0)
@@ -70,16 +80,63 @@ void
gomp_init_num_threads (void)
{
#ifdef HAVE_PTHREAD_AFFINITY_NP
- cpu_set_t cpuset;
+#if defined (_SC_NPROCESSORS_CONF) && defined (CPU_ALLOC_SIZE)
+ gomp_cpuset_size = sysconf (_SC_NPROCESSORS_CONF);
+ gomp_cpuset_size = CPU_ALLOC_SIZE (gomp_cpuset_size);
+#else
+ gomp_cpuset_size = sizeof (cpu_set_t);
+#endif
- if (pthread_getaffinity_np (pthread_self (), sizeof (cpuset), &cpuset) == 0)
+ gomp_cpusetp = (cpu_set_t *) gomp_malloc (gomp_cpuset_size);
+ do
{
- /* Count only the CPUs this process can use. */
- gomp_global_icv.nthreads_var = gomp_cpuset_popcount (&cpuset);
- if (gomp_global_icv.nthreads_var == 0)
- gomp_global_icv.nthreads_var = 1;
- return;
+ int ret = pthread_getaffinity_np (pthread_self (), gomp_cpuset_size,
+ gomp_cpusetp);
+ if (ret == 0)
+ {
+ unsigned long i;
+ /* Count only the CPUs this process can use. */
+ gomp_global_icv.nthreads_var
+ = gomp_cpuset_popcount (gomp_cpuset_size, gomp_cpusetp);
+ if (gomp_global_icv.nthreads_var == 0)
+ break;
+ gomp_get_cpuset_size = gomp_cpuset_size;
+#ifdef CPU_ALLOC_SIZE
+ for (i = gomp_cpuset_size * 8; i; i--)
+ if (CPU_ISSET_S (i - 1, gomp_cpuset_size, gomp_cpusetp))
+ break;
+ gomp_cpuset_size = CPU_ALLOC_SIZE (i);
+#endif
+ return;
+ }
+ if (ret != EINVAL)
+ break;
+#ifdef CPU_ALLOC_SIZE
+ if (gomp_cpuset_size < sizeof (cpu_set_t))
+ gomp_cpuset_size = sizeof (cpu_set_t);
+ else
+ gomp_cpuset_size = gomp_cpuset_size * 2;
+ if (gomp_cpuset_size < 8 * sizeof (cpu_set_t))
+ gomp_cpusetp
+ = (cpu_set_t *) gomp_realloc (gomp_cpusetp, gomp_cpuset_size);
+ else
+ {
+ /* Avoid gomp_fatal if too large memory allocation would be
+ requested, e.g. kernel returning EINVAL all the time. */
+ void *p = realloc (gomp_cpusetp, gomp_cpuset_size);
+ if (p == NULL)
+ break;
+ gomp_cpusetp = (cpu_set_t *) p;
+ }
+#else
+ break;
+#endif
}
+ while (1);
+ gomp_cpuset_size = 0;
+ gomp_global_icv.nthreads_var = 1;
+ free (gomp_cpusetp);
+ gomp_cpusetp = NULL;
#endif
#ifdef _SC_NPROCESSORS_ONLN
gomp_global_icv.nthreads_var = sysconf (_SC_NPROCESSORS_ONLN);
@@ -90,15 +147,14 @@ static int
get_num_procs (void)
{
#ifdef HAVE_PTHREAD_AFFINITY_NP
- cpu_set_t cpuset;
-
- if (gomp_cpu_affinity == NULL)
+ if (gomp_places_list == NULL)
{
/* Count only the CPUs this process can use. */
- if (pthread_getaffinity_np (pthread_self (), sizeof (cpuset),
- &cpuset) == 0)
+ if (gomp_cpusetp
+ && pthread_getaffinity_np (pthread_self (), gomp_get_cpuset_size,
+ gomp_cpusetp) == 0)
{
- int ret = gomp_cpuset_popcount (&cpuset);
+ int ret = gomp_cpuset_popcount (gomp_get_cpuset_size, gomp_cpusetp);
return ret != 0 ? ret : 1;
}
}
diff --git a/libgomp/config/linux/proc.h b/libgomp/config/linux/proc.h
index cba7f4a..bdc85db 100644
--- a/libgomp/config/linux/proc.h
+++ b/libgomp/config/linux/proc.h
@@ -28,7 +28,10 @@
#include <sched.h>
#ifdef HAVE_PTHREAD_AFFINITY_NP
-extern unsigned long gomp_cpuset_popcount (cpu_set_t *);
+extern unsigned long gomp_cpuset_size attribute_hidden;
+extern cpu_set_t *gomp_cpusetp attribute_hidden;
+extern unsigned long gomp_cpuset_popcount (unsigned long, cpu_set_t *)
+ attribute_hidden;
#endif
#endif /* GOMP_PROC_H */
diff --git a/libgomp/config/posix/affinity.c b/libgomp/config/posix/affinity.c
index ac3d14e..e7f97ab 100644
--- a/libgomp/config/posix/affinity.c
+++ b/libgomp/config/posix/affinity.c
@@ -32,7 +32,84 @@ gomp_init_affinity (void)
}
void
-gomp_init_thread_affinity (pthread_attr_t *attr)
+gomp_init_thread_affinity (pthread_attr_t *attr, unsigned int place)
{
(void) attr;
+ (void) place;
+}
+
+void **
+gomp_affinity_alloc (unsigned long count, bool quiet)
+{
+ (void) count;
+ if (!quiet)
+ gomp_error ("Affinity not supported on this configuration");
+ return NULL;
+}
+
+void
+gomp_affinity_init_place (void *p)
+{
+ (void) p;
+}
+
+bool
+gomp_affinity_add_cpus (void *p, unsigned long num,
+ unsigned long len, long stride, bool quiet)
+{
+ (void) p;
+ (void) num;
+ (void) len;
+ (void) stride;
+ (void) quiet;
+ return false;
+}
+
+bool
+gomp_affinity_remove_cpu (void *p, unsigned long num)
+{
+ (void) p;
+ (void) num;
+ return false;
+}
+
+bool
+gomp_affinity_copy_place (void *p, void *q, long stride)
+{
+ (void) p;
+ (void) q;
+ (void) stride;
+ return false;
+}
+
+bool
+gomp_affinity_same_place (void *p, void *q)
+{
+ (void) p;
+ (void) q;
+ return false;
+}
+
+bool
+gomp_affinity_finalize_place_list (bool quiet)
+{
+ (void) quiet;
+ return false;
+}
+
+bool
+gomp_affinity_init_level (int level, unsigned long count, bool quiet)
+{
+ (void) level;
+ (void) count;
+ (void) quiet;
+ if (!quiet)
+ gomp_error ("Affinity not supported on this configuration");
+ return NULL;
+}
+
+void
+gomp_affinity_print_place (void *p)
+{
+ (void) p;
}
diff --git a/libgomp/config/posix/bar.c b/libgomp/config/posix/bar.c
index 06a3185..bdf3978 100644
--- a/libgomp/config/posix/bar.c
+++ b/libgomp/config/posix/bar.c
@@ -42,6 +42,7 @@ gomp_barrier_init (gomp_barrier_t *bar, unsigned count)
bar->total = count;
bar->arrived = 0;
bar->generation = 0;
+ bar->cancellable = false;
}
void
@@ -72,7 +73,7 @@ gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
{
unsigned int n;
- if (state & 1)
+ if (state & BAR_WAS_LAST)
{
n = --bar->arrived;
if (n > 0)
@@ -113,12 +114,14 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
{
unsigned int n;
- if (state & 1)
+ state &= ~BAR_CANCELLED;
+ if (state & BAR_WAS_LAST)
{
n = --bar->arrived;
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
+ team->work_share_cancelled = 0;
if (team->task_count)
{
gomp_barrier_handle_tasks (state);
@@ -128,7 +131,7 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
return;
}
- bar->generation = state + 3;
+ bar->generation = state + BAR_INCR - BAR_WAS_LAST;
if (n > 0)
{
do
@@ -141,13 +144,18 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
else
{
gomp_mutex_unlock (&bar->mutex1);
+ int gen;
do
{
gomp_sem_wait (&bar->sem1);
- if (bar->generation & 1)
- gomp_barrier_handle_tasks (state);
+ gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
+ if (gen & BAR_TASK_PENDING)
+ {
+ gomp_barrier_handle_tasks (state);
+ gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
+ }
}
- while (bar->generation != state + 4);
+ while (gen != state + BAR_INCR);
#ifdef HAVE_SYNC_BUILTINS
n = __sync_add_and_fetch (&bar->arrived, -1);
@@ -162,6 +170,81 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
}
}
+bool
+gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
+ gomp_barrier_state_t state)
+{
+ unsigned int n;
+
+ if (state & BAR_WAS_LAST)
+ {
+ bar->cancellable = false;
+ n = --bar->arrived;
+ struct gomp_thread *thr = gomp_thread ();
+ struct gomp_team *team = thr->ts.team;
+
+ team->work_share_cancelled = 0;
+ if (team->task_count)
+ {
+ gomp_barrier_handle_tasks (state);
+ if (n > 0)
+ gomp_sem_wait (&bar->sem2);
+ gomp_mutex_unlock (&bar->mutex1);
+ return false;
+ }
+
+ bar->generation = state + BAR_INCR - BAR_WAS_LAST;
+ if (n > 0)
+ {
+ do
+ gomp_sem_post (&bar->sem1);
+ while (--n != 0);
+ gomp_sem_wait (&bar->sem2);
+ }
+ gomp_mutex_unlock (&bar->mutex1);
+ }
+ else
+ {
+ if (state & BAR_CANCELLED)
+ {
+ gomp_mutex_unlock (&bar->mutex1);
+ return true;
+ }
+ bar->cancellable = true;
+ gomp_mutex_unlock (&bar->mutex1);
+ int gen;
+ do
+ {
+ gomp_sem_wait (&bar->sem1);
+ gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
+ if (gen & BAR_CANCELLED)
+ break;
+ if (gen & BAR_TASK_PENDING)
+ {
+ gomp_barrier_handle_tasks (state);
+ gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
+ if (gen & BAR_CANCELLED)
+ break;
+ }
+ }
+ while (gen != state + BAR_INCR);
+
+#ifdef HAVE_SYNC_BUILTINS
+ n = __sync_add_and_fetch (&bar->arrived, -1);
+#else
+ gomp_mutex_lock (&bar->mutex2);
+ n = --bar->arrived;
+ gomp_mutex_unlock (&bar->mutex2);
+#endif
+
+ if (n == 0)
+ gomp_sem_post (&bar->sem2);
+ if (gen & BAR_CANCELLED)
+ return true;
+ }
+ return false;
+}
+
void
gomp_team_barrier_wait (gomp_barrier_t *barrier)
{
@@ -176,3 +259,40 @@ gomp_team_barrier_wake (gomp_barrier_t *bar, int count)
while (count-- > 0)
gomp_sem_post (&bar->sem1);
}
+
+bool
+gomp_team_barrier_wait_cancel (gomp_barrier_t *bar)
+{
+ gomp_barrier_state_t state = gomp_barrier_wait_cancel_start (bar);
+ return gomp_team_barrier_wait_cancel_end (bar, state);
+}
+
+void
+gomp_team_barrier_cancel (struct gomp_team *team)
+{
+ if (team->barrier.generation & BAR_CANCELLED)
+ return;
+ gomp_mutex_lock (&team->barrier.mutex1);
+ gomp_mutex_lock (&team->task_lock);
+ if (team->barrier.generation & BAR_CANCELLED)
+ {
+ gomp_mutex_unlock (&team->task_lock);
+ gomp_mutex_unlock (&team->barrier.mutex1);
+ return;
+ }
+ team->barrier.generation |= BAR_CANCELLED;
+ gomp_mutex_unlock (&team->task_lock);
+ if (team->barrier.cancellable)
+ {
+ int n = team->barrier.arrived;
+ if (n > 0)
+ {
+ do
+ gomp_sem_post (&team->barrier.sem1);
+ while (--n != 0);
+ gomp_sem_wait (&team->barrier.sem2);
+ }
+ team->barrier.cancellable = false;
+ }
+ gomp_mutex_unlock (&team->barrier.mutex1);
+}
diff --git a/libgomp/config/posix/bar.h b/libgomp/config/posix/bar.h
index 1a16ca8..9fcd4da 100644
--- a/libgomp/config/posix/bar.h
+++ b/libgomp/config/posix/bar.h
@@ -43,9 +43,20 @@ typedef struct
unsigned total;
unsigned arrived;
unsigned generation;
+ bool cancellable;
} gomp_barrier_t;
+
typedef unsigned int gomp_barrier_state_t;
+/* The generation field contains a counter in the high bits, with a few
+ low bits dedicated to flags. Note that TASK_PENDING and WAS_LAST can
+ share space because WAS_LAST is never stored back to generation. */
+#define BAR_TASK_PENDING 1
+#define BAR_WAS_LAST 1
+#define BAR_WAITING_FOR_TASK 2
+#define BAR_CANCELLED 4
+#define BAR_INCR 8
+
extern void gomp_barrier_init (gomp_barrier_t *, unsigned);
extern void gomp_barrier_reinit (gomp_barrier_t *, unsigned);
extern void gomp_barrier_destroy (gomp_barrier_t *);
@@ -55,22 +66,47 @@ extern void gomp_barrier_wait_end (gomp_barrier_t *, gomp_barrier_state_t);
extern void gomp_team_barrier_wait (gomp_barrier_t *);
extern void gomp_team_barrier_wait_end (gomp_barrier_t *,
gomp_barrier_state_t);
+extern bool gomp_team_barrier_wait_cancel (gomp_barrier_t *);
+extern bool gomp_team_barrier_wait_cancel_end (gomp_barrier_t *,
+ gomp_barrier_state_t);
extern void gomp_team_barrier_wake (gomp_barrier_t *, int);
+struct gomp_team;
+extern void gomp_team_barrier_cancel (struct gomp_team *);
static inline gomp_barrier_state_t
gomp_barrier_wait_start (gomp_barrier_t *bar)
{
unsigned int ret;
gomp_mutex_lock (&bar->mutex1);
- ret = bar->generation & ~3;
- ret += ++bar->arrived == bar->total;
+ ret = bar->generation & (-BAR_INCR | BAR_CANCELLED);
+ if (++bar->arrived == bar->total)
+ ret |= BAR_WAS_LAST;
+ return ret;
+}
+
+static inline gomp_barrier_state_t
+gomp_barrier_wait_cancel_start (gomp_barrier_t *bar)
+{
+ unsigned int ret;
+ gomp_mutex_lock (&bar->mutex1);
+ ret = bar->generation & (-BAR_INCR | BAR_CANCELLED);
+ if (ret & BAR_CANCELLED)
+ return ret;
+ if (++bar->arrived == bar->total)
+ ret |= BAR_WAS_LAST;
return ret;
}
+static inline void
+gomp_team_barrier_wait_final (gomp_barrier_t *bar)
+{
+ gomp_team_barrier_wait (bar);
+}
+
static inline bool
gomp_barrier_last_thread (gomp_barrier_state_t state)
{
- return state & 1;
+ return state & BAR_WAS_LAST;
}
static inline void
@@ -85,31 +121,37 @@ gomp_barrier_wait_last (gomp_barrier_t *bar)
static inline void
gomp_team_barrier_set_task_pending (gomp_barrier_t *bar)
{
- bar->generation |= 1;
+ bar->generation |= BAR_TASK_PENDING;
}
static inline void
gomp_team_barrier_clear_task_pending (gomp_barrier_t *bar)
{
- bar->generation &= ~1;
+ bar->generation &= ~BAR_TASK_PENDING;
}
static inline void
gomp_team_barrier_set_waiting_for_tasks (gomp_barrier_t *bar)
{
- bar->generation |= 2;
+ bar->generation |= BAR_WAITING_FOR_TASK;
}
static inline bool
gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar)
{
- return (bar->generation & 2) != 0;
+ return (bar->generation & BAR_WAITING_FOR_TASK) != 0;
+}
+
+static inline bool
+gomp_team_barrier_cancelled (gomp_barrier_t *bar)
+{
+ return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0);
}
static inline void
gomp_team_barrier_done (gomp_barrier_t *bar, gomp_barrier_state_t state)
{
- bar->generation = (state & ~3) + 4;
+ bar->generation = (state & -BAR_INCR) + BAR_INCR;
}
#endif /* GOMP_BARRIER_H */