diff options
author | Jakub Jelinek <jakub@redhat.com> | 2013-10-11 11:26:50 +0200 |
---|---|---|
committer | Jakub Jelinek <jakub@gcc.gnu.org> | 2013-10-11 11:26:50 +0200 |
commit | acf0174b6f7a3f8fe1e2a27361bbf87dfe454530 (patch) | |
tree | 2a3d60fbf15f9346c02647762dcc441fe3841855 /libgomp/testsuite/libgomp.c | |
parent | f7191ecdbd3adad32b561db40fac6978df6409fe (diff) | |
download | gcc-acf0174b6f7a3f8fe1e2a27361bbf87dfe454530.zip gcc-acf0174b6f7a3f8fe1e2a27361bbf87dfe454530.tar.gz gcc-acf0174b6f7a3f8fe1e2a27361bbf87dfe454530.tar.bz2 |
target.c: New file.
libgomp/
* target.c: New file.
* Makefile.am (libgomp_la_SOURCES): Add target.c.
* Makefile.in: Regenerated.
* libgomp_g.h (GOMP_task): Add depend argument.
(GOMP_barrier_cancel, GOMP_loop_end_cancel,
GOMP_sections_end_cancel, GOMP_target, GOMP_target_data,
GOMP_target_end_data, GOMP_target_update, GOMP_teams,
GOMP_parallel_loop_static, GOMP_parallel_loop_dynamic,
GOMP_parallel_loop_guided, GOMP_parallel_loop_runtime,
GOMP_parallel, GOMP_cancel, GOMP_cancellation_point,
GOMP_taskgroup_start, GOMP_taskgroup_end,
GOMP_parallel_sections): New prototypes.
* fortran.c (omp_is_initial_device): Add ialias_redirect.
(omp_is_initial_device_): New function.
(ULP, STR1, STR2, ialias_redirect): Removed.
(omp_get_cancellation_, omp_get_proc_bind_, omp_set_default_device_,
omp_set_default_device_8_, omp_get_default_device_,
omp_get_num_devices_, omp_get_num_teams_, omp_get_team_num_): New
functions.
* libgomp.map (GOMP_barrier_cancel, GOMP_loop_end_cancel,
GOMP_sections_end_cancel, GOMP_target, GOMP_target_data,
GOMP_target_end_data, GOMP_target_update, GOMP_teams): Export
@@GOMP_4.0.
(omp_is_initial_device, omp_is_initial_device_, omp_get_cancellation,
omp_get_cancellation_, omp_get_proc_bind, omp_get_proc_bind_,
omp_set_default_device, omp_set_default_device_,
omp_set_default_device_8_, omp_get_default_device,
omp_get_default_device_, omp_get_num_devices, omp_get_num_devices_,
omp_get_num_teams, omp_get_num_teams_, omp_get_team_num,
omp_get_team_num_): Export @@OMP_4.0.
* team.c (struct gomp_thread_start_data): Add place field.
(gomp_thread_start): Clear thr->thread_pool and
thr->task before returning. Use gomp_team_barrier_wait_final
instead of gomp_team_barrier_wait. Initialize thr->place.
(gomp_new_team): Initialize work_shares_to_free, work_share_cancelled,
team_cancelled and task_queued_count fields.
(gomp_free_pool_helper): Clear thr->thread_pool and thr->task
before calling pthread_exit.
(gomp_free_thread): No longer static. Use
gomp_managed_threads_lock instead of gomp_remaining_threads_lock.
(gomp_team_start): Add flags argument. Set
thr->thread_pool->threads_busy to nthreads immediately after creating
new pool. Use gomp_managed_threads_lock instead of
gomp_remaining_threads_lock. Handle OpenMP 4.0 affinity.
(gomp_team_end): Use gomp_managed_threads_lock instead of
gomp_remaining_threads_lock. Use gomp_team_barrier_wait_final instead
of gomp_team_barrier_wait. If team->team_cancelled, call
gomp_fini_worshare on ws chain starting at team->work_shares_to_free
rather than thr->ts.work_share.
(initialize_team): Don't call gomp_sem_init here.
* sections.c (GOMP_parallel_sections_start): Adjust gomp_team_start
caller.
(GOMP_parallel_sections, GOMP_sections_end_cancel): New functions.
* env.c (gomp_global_icv): Add default_device_var, target_data and
bind_var initializers.
(gomp_cpu_affinity, gomp_cpu_affinity_len): Remove.
(gomp_bind_var_list, gomp_bind_var_list_len, gomp_places_list,
gomp_places_list_len): New variables.
(parse_bind_var, parse_one_place, parse_places_var): New functions.
(parse_affinity): Rewritten to construct OMP_PLACES list with unit
sized places.
(gomp_cancel_var): New global variable.
(parse_int): New function.
(handle_omp_display_env): New function.
(initialize_env): Use it. Initialize default_device_var.
Parse OMP_CANCELLATION env var. Use parse_bind_var to parse
OMP_PROC_BIND instead of parse_boolean. Use parse_places_var for
OMP_PLACES parsing. Don't call parse_affinity if OMP_PLACES has
been successfully parsed (and call gomp_init_affinity in that case).
(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
omp_get_team_num, omp_is_initial_device): New functions.
* libgomp.h: Include stdlib.h.
(ialias_ulp, ialias_str1, ialias_str2, ialias_redirect, ialias_call):
Define.
(struct target_mem_desc): Forward declare.
(struct gomp_task_icv): Add default_device_var, target_data, bind_var
and thread_limit_var fields.
(gomp_get_num_devices): New prototype.
(gomp_cancel_var): New extern decl.
(struct gomp_team): Add work_shares_to_free, work_share_cancelled,
team_cancelled and task_queued_count fields. Add comments about
task_{,queued_,running_}count.
(gomp_cancel_kind): New enum.
(gomp_work_share_end_cancel): New prototype.
(struct gomp_task): Add next_taskgroup, prev_taskgroup, taskgroup,
copy_ctors_done, dependers, depend_hash, depend_count, num_dependees
and depend fields.
(struct gomp_taskgroup): New type.
(struct gomp_task_depend_entry,
struct gomp_dependers_vec): New types.
(gomp_finish_task): Free depend_hash if non-NULL.
(struct gomp_team_state): Add place_partition_off
and place_partition_len fields.
(gomp_bind_var_list, gomp_bind_var_list_len, gomp_places_list,
gomp_places_list_len): New extern decls.
(struct gomp_thread): Add place field.
(gomp_cpu_affinity, gomp_cpu_affinity_len): Remove.
(gomp_init_thread_affinity): Add place argument.
(gomp_affinity_alloc, gomp_affinity_init_place, gomp_affinity_add_cpus,
gomp_affinity_remove_cpu, gomp_affinity_copy_place,
gomp_affinity_same_place, gomp_affinity_finalize_place_list,
gomp_affinity_init_level, gomp_affinity_print_place): New
prototypes.
(gomp_team_start): Add flags argument.
(gomp_thread_limit_var, gomp_remaining_threads_count,
gomp_remaining_threads_lock): Remove.
(gomp_managed_threads_lock): New variable.
(struct gomp_thread_pool): Add threads_busy field.
(gomp_free_thread): New prototype.
* task.c: Include hashtab.h.
(hash_entry_type): New typedef.
(htab_alloc, htab_free, htab_hash, htab_eq): New inlines.
(gomp_init_task): Clear dependers, depend_hash, depend_count,
copy_ctors_done and taskgroup fields.
(GOMP_task): Add depend argument, handle depend clauses. If
gomp_team_barrier_cancelled or if it's taskgroup has been
cancelled, don't queue or start new tasks. Set copy_ctors_done
field if needed. Initialize taskgroup field. If copy_ctors_done
and already cancelled, don't discard the task. If taskgroup is
non-NULL, enqueue the task into taskgroup queue. Increment
num_children field in taskgroup. Increment task_queued_count.
(gomp_task_run_pre, gomp_task_run_post_remove_parent,
gomp_task_run_post_remove_taskgroup): New inline functions.
(gomp_task_run_post_handle_depend_hash,
gomp_task_run_post_handle_dependers,
gomp_task_run_post_handle_depend): New functions.
(GOMP_taskwait): Use them. If more than one new tasks
have been queued, wake other threads if needed.
(gomp_barrier_handle_tasks): Likewise. If
gomp_team_barrier_cancelled, don't start any new tasks, just free
all tasks.
(GOMP_taskgroup_start, GOMP_taskgroup_end): New functions.
* omp_lib.f90.in
(omp_proc_bind_kind, omp_proc_bind_false,
omp_proc_bind_true, omp_proc_bind_master, omp_proc_bind_close,
omp_proc_bind_spread): New params.
(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
omp_get_team_num, omp_is_initial_device): New interfaces.
(omp_get_dynamic, omp_get_nested, omp_in_parallel,
omp_get_max_threads, omp_get_num_procs, omp_get_num_threads,
omp_get_thread_num, omp_get_thread_limit, omp_set_max_active_levels,
omp_get_max_active_levels, omp_get_level, omp_get_ancestor_thread_num,
omp_get_team_size, omp_get_active_level, omp_in_final): Remove
useless use omp_lib_kinds.
* omp.h.in (omp_proc_bind_t): New typedef.
(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
omp_get_team_num, omp_is_initial_device): New prototypes.
* loop.c (gomp_parallel_loop_start): Add flags argument, pass it
through to gomp_team_start.
(GOMP_parallel_loop_static_start, GOMP_parallel_loop_dynamic_start,
GOMP_parallel_loop_guided_start, GOMP_parallel_loop_runtime_start):
Adjust gomp_parallel_loop_start callers.
(GOMP_parallel_loop_static, GOMP_parallel_loop_dynamic,
GOMP_parallel_loop_guided, GOMP_parallel_loop_runtime,
GOMP_loop_end_cancel): New functions.
(GOMP_parallel_end): Add ialias_redirect.
* hashtab.h: New file.
* libgomp.texi (Environment Variables): Minor cleanup,
update section refs to OpenMP 4.0rc2.
(OMP_DISPLAY_ENV, GOMP_SPINCOUNT): Document these
environment variables.
* work.c (gomp_work_share_end, gomp_work_share_end_nowait): Set
team->work_shares_to_free to thr->ts.work_share before calling
free_work_share.
(gomp_work_share_end_cancel): New function.
* config/linux/proc.c: Include errno.h.
(gomp_get_cpuset_size, gomp_cpuset_size, gomp_cpusetp): New variables.
(gomp_cpuset_popcount): Add cpusetsize argument, use it instead of
sizeof (cpu_set_t) to determine number of iterations. Fix up check
extern decl. Use CPU_COUNT_S if available, or CPU_COUNT if
gomp_cpuset_size is sizeof (cpu_set_t).
(gomp_init_num_threads): Initialize gomp_cpuset_size,
gomp_get_cpuset_size and gomp_cpusetp here, use gomp_cpusetp instead
of &cpuset and pass gomp_cpuset_size instead of sizeof (cpu_set_t)
to pthread_getaffinity_np. Free and clear gomp_cpusetp if it didn't
contain any logical CPUs.
(get_num_procs): Don't call pthread_getaffinity_np if gomp_cpusetp
is NULL. Use gomp_cpusetp instead of &cpuset and pass
gomp_get_cpuset_size instead of sizeof (cpu_set_t) to
pthread_getaffinity_np. Check gomp_places_list instead of
gomp_cpu_affinity. Adjust gomp_cpuset_popcount caller.
* config/linux/bar.c (gomp_barrier_wait_end,
gomp_barrier_wait_last): Use BAR_* defines.
(gomp_team_barrier_wait_end): Likewise. Clear BAR_CANCELLED
from state where needed. Set work_share_cancelled to 0 on last
thread.
(gomp_team_barrier_wait_final, gomp_team_barrier_wait_cancel_end,
gomp_team_barrier_wait_cancel, gomp_team_barrier_cancel): New
functions.
* config/linux/proc.h (gomp_cpuset_popcount): Add attribute_hidden.
Add cpusetsize argument.
(gomp_cpuset_size, gomp_cpusetp): Declare.
* config/linux/affinity.c: Include errno.h, stdio.h and string.h.
(affinity_counter): Remove.
(CPU_ISSET_S, CPU_ZERO_S, CPU_SET_S, CPU_CLR_S): Define
if CPU_ALLOC_SIZE isn't defined.
(gomp_init_affinity): Rewritten, if gomp_places_list is NULL, try
silently create OMP_PLACES=threads, if it is non-NULL afterwards,
bind current thread to the first place.
(gomp_init_thread_affinity): Rewritten. Add place argument, just
pthread_setaffinity_np to gomp_places_list[place].
(gomp_affinity_alloc, gomp_affinity_init_place, gomp_affinity_add_cpus,
gomp_affinity_remove_cpu, gomp_affinity_copy_place,
gomp_affinity_same_place, gomp_affinity_finalize_place_list,
gomp_affinity_init_level, gomp_affinity_print_place): New functions.
* config/linux/bar.h (BAR_TASK_PENDING, BAR_WAS_LAST,
BAR_WAITING_FOR_TASK, BAR_INCR, BAR_CANCELLED): Define.
(gomp_barrier_t): Add awaited_final field.
(gomp_barrier_init): Initialize awaited_final field.
(gomp_team_barrier_wait_final, gomp_team_barrier_wait_cancel,
gomp_team_barrier_wait_cancel_end, gomp_team_barrier_cancel): New
prototypes.
(gomp_barrier_wait_start): Preserve BAR_CANCELLED bit. Use BAR_*
defines.
(gomp_barrier_wait_cancel_start, gomp_team_barrier_wait_final_start,
gomp_team_barrier_cancelled): New inline functions.
(gomp_barrier_last_thread,
gomp_team_barrier_set_task_pending,
gomp_team_barrier_clear_task_pending,
gomp_team_barrier_set_waiting_for_tasks,
gomp_team_barrier_waiting_for_tasks,
gomp_team_barrier_done): Use BAR_* defines.
* config/posix/bar.c (gomp_barrier_init): Clear cancellable field.
(gomp_barrier_wait_end): Use BAR_* defines.
(gomp_team_barrier_wait_end): Clear BAR_CANCELLED from state.
Set work_share_cancelled to 0 on last thread, use __atomic_load_n.
Use BAR_* defines.
(gomp_team_barrier_wait_cancel_end, gomp_team_barrier_wait_cancel,
gomp_team_barrier_cancel): New functions.
* config/posix/affinity.c (gomp_init_thread_affinity): Add place
argument.
(gomp_affinity_alloc, gomp_affinity_init_place, gomp_affinity_add_cpus,
gomp_affinity_remove_cpu, gomp_affinity_copy_place,
gomp_affinity_same_place, gomp_affinity_finalize_place_list,
gomp_affinity_init_level, gomp_affinity_print_place): New stubs.
* config/posix/bar.h (BAR_TASK_PENDING, BAR_WAS_LAST,
BAR_WAITING_FOR_TASK, BAR_INCR, BAR_CANCELLED): Define.
(gomp_barrier_t): Add cancellable field.
(gomp_team_barrier_wait_cancel, gomp_team_barrier_wait_cancel_end,
gomp_team_barrier_cancel): New prototypes.
(gomp_barrier_wait_start): Preserve BAR_CANCELLED bit.
(gomp_barrier_wait_cancel_start, gomp_team_barrier_wait_final,
gomp_team_barrier_cancelled): New inline functions.
(gomp_barrier_wait_start, gomp_barrier_last_thread,
gomp_team_barrier_set_task_pending,
gomp_team_barrier_clear_task_pending,
gomp_team_barrier_set_waiting_for_tasks,
gomp_team_barrier_waiting_for_tasks,
gomp_team_barrier_done): Use BAR_* defines.
* barrier.c (GOMP_barrier_cancel): New function.
* omp_lib.h.in (omp_proc_bind_kind, omp_proc_bind_false,
omp_proc_bind_true, omp_proc_bind_master, omp_proc_bind_close,
omp_proc_bind_spread): New params.
(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
omp_get_team_num, omp_is_initial_device): New externals.
* parallel.c (GOMP_parallel, GOMP_cancel, GOMP_cancellation_point):
New functions.
(gomp_resolve_num_threads): Adjust for thread_limit now being in
icv->thread_limit_var. Use UINT_MAX instead of ULONG_MAX as
infinity. If not nested, just return minimum of max_num_threads
and icv->thread_limit_var and if thr->thread_pool, set threads_busy
to the returned value. Otherwise, don't update atomically
gomp_remaining_threads_count, but instead thr->thread_pool->threads_busy.
(GOMP_parallel_end): Adjust for thread_limit now being in
icv->thread_limit_var. Use UINT_MAX instead of ULONG_MAX as
infinity. Adjust threads_busy in the pool rather than
gomp_remaining_threads_count. Remember team->nthreads and call
gomp_team_end before adjusting threads_busy, if not nested
afterwards, just set it to 1 non-atomically. Add ialias.
(GOMP_parallel_start): Adjust gomp_team_start caller.
* testsuite/libgomp.c/atomic-14.c: Add parens to make it valid.
* testsuite/libgomp.c/affinity-1.c: New test.
* testsuite/libgomp.c/atomic-15.c: New test.
* testsuite/libgomp.c/atomic-16.c: New test.
* testsuite/libgomp.c/atomic-17.c: New test.
* testsuite/libgomp.c/cancel-for-1.c: New test.
* testsuite/libgomp.c/cancel-for-2.c: New test.
* testsuite/libgomp.c/cancel-parallel-1.c: New test.
* testsuite/libgomp.c/cancel-parallel-2.c: New test.
* testsuite/libgomp.c/cancel-parallel-3.c: New test.
* testsuite/libgomp.c/cancel-sections-1.c: New test.
* testsuite/libgomp.c/cancel-taskgroup-1.c: New test.
* testsuite/libgomp.c/cancel-taskgroup-2.c: New test.
* testsuite/libgomp.c/depend-1.c: New test.
* testsuite/libgomp.c/depend-2.c: New test.
* testsuite/libgomp.c/depend-3.c: New test.
* testsuite/libgomp.c/depend-4.c: New test.
* testsuite/libgomp.c/for-1.c: New test.
* testsuite/libgomp.c/for-1.h: New file.
* testsuite/libgomp.c/for-2.c: New test.
* testsuite/libgomp.c/for-2.h: New file.
* testsuite/libgomp.c/for-3.c: New test.
* testsuite/libgomp.c/pr58392.c: New test.
* testsuite/libgomp.c/simd-1.c: New test.
* testsuite/libgomp.c/simd-2.c: New test.
* testsuite/libgomp.c/simd-3.c: New test.
* testsuite/libgomp.c/simd-4.c: New test.
* testsuite/libgomp.c/simd-5.c: New test.
* testsuite/libgomp.c/simd-6.c: New test.
* testsuite/libgomp.c/target-1.c: New test.
* testsuite/libgomp.c/target-2.c: New test.
* testsuite/libgomp.c/target-3.c: New test.
* testsuite/libgomp.c/target-4.c: New test.
* testsuite/libgomp.c/target-5.c: New test.
* testsuite/libgomp.c/target-6.c: New test.
* testsuite/libgomp.c/target-7.c: New test.
* testsuite/libgomp.c/taskgroup-1.c: New test.
* testsuite/libgomp.c/thread-limit-1.c: New test.
* testsuite/libgomp.c/thread-limit-2.c: New test.
* testsuite/libgomp.c/thread-limit-3.c: New test.
* testsuite/libgomp.c/udr-1.c: New test.
* testsuite/libgomp.c/udr-2.c: New test.
* testsuite/libgomp.c/udr-3.c: New test.
* testsuite/libgomp.c++/affinity-1.C: New test.
* testsuite/libgomp.c++/atomic-10.C: New test.
* testsuite/libgomp.c++/atomic-11.C: New test.
* testsuite/libgomp.c++/atomic-12.C: New test.
* testsuite/libgomp.c++/atomic-13.C: New test.
* testsuite/libgomp.c++/atomic-14.C: New test.
* testsuite/libgomp.c++/atomic-15.C: New test.
* testsuite/libgomp.c++/cancel-for-1.C: New test.
* testsuite/libgomp.c++/cancel-for-2.C: New test.
* testsuite/libgomp.c++/cancel-parallel-1.C: New test.
* testsuite/libgomp.c++/cancel-parallel-2.C: New test.
* testsuite/libgomp.c++/cancel-parallel-3.C: New test.
* testsuite/libgomp.c++/cancel-sections-1.C: New test.
* testsuite/libgomp.c++/cancel-taskgroup-1.C: New test.
* testsuite/libgomp.c++/cancel-taskgroup-2.C: New test.
* testsuite/libgomp.c++/cancel-taskgroup-3.C: New test.
* testsuite/libgomp.c++/cancel-test.h: New file.
* testsuite/libgomp.c++/for-9.C: New test.
* testsuite/libgomp.c++/for-10.C: New test.
* testsuite/libgomp.c++/for-11.C: New test.
* testsuite/libgomp.c++/simd-1.C: New test.
* testsuite/libgomp.c++/simd-2.C: New test.
* testsuite/libgomp.c++/simd-3.C: New test.
* testsuite/libgomp.c++/simd-4.C: New test.
* testsuite/libgomp.c++/simd-5.C: New test.
* testsuite/libgomp.c++/simd-6.C: New test.
* testsuite/libgomp.c++/simd-7.C: New test.
* testsuite/libgomp.c++/simd-8.C: New test.
* testsuite/libgomp.c++/target-1.C: New test.
* testsuite/libgomp.c++/target-2.C: New test.
* testsuite/libgomp.c++/target-2-aux.cc: New file.
* testsuite/libgomp.c++/target-3.C: New test.
* testsuite/libgomp.c++/taskgroup-1.C: New test.
* testsuite/libgomp.c++/udr-1.C: New test.
* testsuite/libgomp.c++/udr-2.C: New test.
* testsuite/libgomp.c++/udr-3.C: New test.
* testsuite/libgomp.c++/udr-4.C: New test.
* testsuite/libgomp.c++/udr-5.C: New test.
* testsuite/libgomp.c++/udr-6.C: New test.
* testsuite/libgomp.c++/udr-7.C: New test.
* testsuite/libgomp.c++/udr-8.C: New test.
* testsuite/libgomp.c++/udr-9.C: New test.
gcc/
* tree-pretty-print.c (dump_omp_clause): Handle OMP_CLAUSE__LOOPTEMP_
and new OpenMP 4.0 clauses, handle UDR OMP_CLAUSE_REDUCTION,
formatting fixes, use pp_colon instead of pp_character (..., ':'),
similarly pp_right_paren.
(dump_generic_node): Handle OMP_DISTRIBUTE, OMP_TEAMS,
OMP_TARGET_DATA, OMP_TARGET, OMP_TARGET_UPDATE, OMP_TASKGROUP,
allow OMP_FOR_INIT to be NULL, handle OMP_ATOMIC_SEQ_CST.
* tree.c (omp_clause_num_ops, omp_clause_code_name): Add OpenMP 4.0
clauses.
(omp_declare_simd_clauses_equal,
omp_remove_redundant_declare_simd_attrs): New functions.
(attribute_value_equal): Use omp_declare_simd_clauses_equal.
(walk_tree_1): Handle new OpenMP 4.0 clauses.
* tree.h (OMP_LOOP_CHECK): Define.
(OMP_FOR_BODY, OMP_FOR_CLAUSES, OMP_FOR_INIT, OMP_FOR_COND,
OMP_FOR_INCR, OMP_FOR_PRE_BODY): Use it.
(OMP_TASKGROUP_BODY, OMP_TEAMS_BODY, OMP_TEAMS_CLAUSES,
OMP_TARGET_DATA_BODY, OMP_TARGET_DATA_CLAUSES, OMP_TARGET_BODY,
OMP_TARGET_CLAUSES, OMP_TARGET_UPDATE_CLAUSES, OMP_CLAUSE_SIZE,
OMP_ATOMIC_SEQ_CST, OMP_CLAUSE_DEPEND_KIND, OMP_CLAUSE_MAP_KIND,
OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION, OMP_CLAUSE_PROC_BIND_KIND,
OMP_CLAUSE_REDUCTION_OMP_ORIG_REF, OMP_CLAUSE_ALIGNED_ALIGNMENT,
OMP_CLAUSE_NUM_TEAMS_EXPR, OMP_CLAUSE_THREAD_LIMIT_EXPR,
OMP_CLAUSE_DEVICE_ID, OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR,
OMP_CLAUSE_SIMDLEN_EXPR): Define.
(OMP_CLAUSE_DECL): Change range up to OMP_CLAUSE__LOOPTEMP_.
(omp_remove_redundant_declare_simd_attrs): New prototype.
* gimple.def (GIMPLE_OMP_TASKGROUP, GIMPLE_OMP_TARGET,
GIMPLE_OMP_TEAMS): New codes.
(GIMPLE_OMP_RETURN): Use GSS_OMP_ATOMIC_STORE instead of GSS_BASE.
* omp-low.c (struct omp_context): Add cancel_label and cancellable
fields.
(target_nesting_level): New variable.
(extract_omp_for_data): Handle GF_OMP_FOR_KIND_DISTRIBUTE and
OMP_CLAUSE_DIST_SCHEDULE. Don't fallback to library implementation
for collapse > 1 static schedule unless ordered.
(get_ws_args_for): Add par_stmt argument. Handle combined loops.
(determine_parallel_type): Adjust get_ws_args_for caller.
(install_var_field): Handle mask & 4 for double indirection.
(scan_sharing_clauses): Ignore shared clause on teams construct.
Handle OMP_CLAUSE__LOOPTEMP_ and new OpenMP 4.0 clauses.
(create_omp_child_function): If inside target or declare target
constructs, set "omp declare target" attribute on the child
function.
(find_combined_for): New function.
(scan_omp_parallel): Handle combined loops.
(scan_omp_target, scan_omp_teams): New functions.
(check_omp_nesting_restrictions): Check new OpenMP 4.0 nesting
restrictions and set ctx->cancellable for cancellable constructs.
(scan_omp_1_stmt): Call check_omp_nesting_restrictions also on
selected builtin calls. Handle GIMPLE_OMP_TASKGROUP,
GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS.
(build_omp_barrier): Add lhs argument, return gimple rather than
tree.
(omp_clause_aligned_alignment): New function.
(lower_rec_simd_input_clauses): Only call SET_DECL_VALUE_EXPR
on decls.
(lower_rec_input_clauses): Add FD argument. Ignore shared clauses
on teams constructs. Handle user defined reductions and new
OpenMP 4.0 clauses.
(lower_reduction_clauses): Don't set placeholder to address of ref
if it has already the right type.
(lower_send_clauses): Handle OMP_CLAUSE__LOOPTEMP_.
(expand_parallel_call): Use the new non-_start suffixed builtins,
handle OMP_CLAUSE_PROC_BIND, don't call the outlined function
and GOMP_parallel_end after the call.
(expand_task_call): Handle OMP_CLAUSE_DEPEND.
(expand_omp_for_init_counts): Handle combined loops.
(expand_omp_for_init_vars): Add inner_stmt argument, handle combined
loops.
(expand_omp_for_generic): Likewise. Use GOMP_loop_end_cancel at the
end of cancellable loops.
(expand_omp_for_static_nochunk, expand_omp_for_static_chunk):
Likewise. Handle collapse > 1 loops.
(expand_omp_simd): Handle combined loops.
(expand_omp_for): Add inner_stmt argument, adjust callers of
expand_omp_for* functions, use expand_omp_for_static*chunk even
for collapse > 1 unless ordered.
(expand_omp_sections): Use GOMP_sections_end_cancel at the end
of cancellable sections.
(expand_omp_single): Remove need_barrier variable, just rely on
gimple_omp_return_nowait_p. Adjust build_omp_barrier caller.
(expand_omp_synch): Allow GIMPLE_OMP_TASKGROUP and GIMPLE_OMP_TEAMS.
(expand_omp_atomic_load, expand_omp_atomic_store,
expand_omp_atomic_fetch_op): Handle gimple_omp_atomic_seq_cst_p.
(expand_omp_target): New function.
(expand_omp): Handle combined loops. Handle GIMPLE_OMP_TASKGROUP,
GIMPLE_OMP_TEAMS, GIMPLE_OMP_TARGET.
(build_omp_regions_1): Immediately close region for
GF_OMP_TARGET_KIND_UPDATE.
(maybe_add_implicit_barrier_cancel): New function.
(lower_omp_sections): Adjust lower_rec_input_clauses caller. Handle
cancellation.
(lower_omp_single): Likewise. Add clobber after the barrier.
(lower_omp_taskgroup): New function.
(lower_omp_for): Handle combined loops. Adjust
lower_rec_input_clauses caller. Handle cancellation.
(lower_depend_clauses): New function.
(lower_omp_taskreg): Lower depend clauses. Adjust
lower_rec_input_clauses caller. Add clobber after the call. Handle
cancellation.
(lower_omp_target, lower_omp_teams): New functions.
(lower_omp_1): Handle cancellation. Handle GIMPLE_OMP_TASKGROUP,
GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GOMP_barrier, GOMP_cancel
and GOMP_cancellation_point calls.
(lower_omp): Fold stmts inside of target region.
(diagnose_sb_1, diagnose_sb_2): Handle GIMPLE_OMP_TASKGROUP,
GIMPLE_OMP_TARGET and GIMPLE_OMP_TEAMS.
* builtin-types.def (DEF_FUNCTION_TYPE_8): Document.
(BT_FN_VOID_OMPFN_PTR_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG,
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): Remove.
(BT_FN_VOID_OMPFN_PTR_UINT_UINT_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT,
BT_FN_BOOL_INT, BT_FN_BOOL_INT_BOOL, BT_FN_VOID_UINT_UINT,
BT_FN_VOID_INT_PTR_SIZE_PTR_PTR_PTR,
BT_FN_VOID_INT_OMPFN_PTR_SIZE_PTR_PTR_PTR,
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR): New.
* tree-ssa-alias.c (ref_maybe_used_by_call_p_1,
call_may_clobber_ref_p_1): Handle BUILT_IN_GOMP_BARRIER_CANCEL,
BUILT_IN_GOMP_TASKGROUP_END, BUILT_IN_GOMP_LOOP_END_CANCEL,
BUILT_IN_GOMP_SECTIONS_END_CANCEL. Don't handle
BUILT_IN_GOMP_PARALLEL_END.
* gimple-low.c (lower_stmt): Handle GIMPLE_OMP_TASKGROUP,
GIMPLE_OMP_TARGET and GIMPLE_OMP_TEAMS.
* gimple-pretty-print.c (dump_gimple_omp_for): Handle
GF_OMP_FOR_KIND_DISTRIBUTE.
(dump_gimple_omp_target, dump_gimple_omp_teams): New functions.
(dump_gimple_omp_block): Handle GIMPLE_OMP_TASKGROUP.
(dump_gimple_omp_return): Print lhs if it has any.
(dump_gimple_omp_atomic_load, dump_gimple_omp_atomic_store): Handle
gimple_omp_atomic_seq_cst_p.
(pp_gimple_stmt_1): Handle GIMPLE_OMP_TASKGROUP, GIMPLE_OMP_TARGET
and GIMPLE_OMP_TEAMS.
* langhooks.c (lhd_omp_mappable_type): New function.
* tree-vectorizer.c (struct simd_array_to_simduid): Fix up comment.
* langhooks.h (struct lang_hooks_for_types): Add omp_mappable_type
hook.
* gimplify.c (enum gimplify_omp_var_data): Add GOVD_MAP,
GOVD_ALIGNED and GOVD_MAP_TO_ONLY.
(enum omp_region_type): Add ORT_TEAMS, ORT_TARGET_DATA and
ORT_TARGET.
(struct gimplify_omp_ctx): Add combined_loop field.
(gimplify_call_expr, gimplify_modify_expr): Don't call fold_stmt
on stmts inside of target region.
(is_gimple_stmt): Return true for OMP_DISTRIBUTE and OMP_TASKGROUP.
(omp_firstprivatize_variable): Handle GOVD_MAP, GOVD_ALIGNED,
ORT_TARGET and ORT_TARGET_DATA.
(omp_add_variable): Avoid checks on readding var for GOVD_ALIGNED.
Handle GOVD_MAP.
(omp_notice_threadprivate_variable): Complain about threadprivate
variables in target region.
(omp_notice_variable): Complain about vars with non-mappable type
in target region. Handle ORT_TEAMS, ORT_TARGET and ORT_TARGET_DATA.
(omp_check_private): Ignore ORT_TARGET* regions.
(gimplify_scan_omp_clauses, gimplify_adjust_omp_clauses_1,
gimplify_adjust_omp_clauses): Handle new OpenMP 4.0 clauses.
(find_combined_omp_for): New function.
(gimplify_omp_for): Handle gimplification of combined loops.
(gimplify_omp_workshare): Gimplify also OMP_TARGET, OMP_TARGET_DATA,
OMP_TEAMS.
(gimplify_omp_target_update): New function.
(gimplify_omp_atomic): Handle OMP_ATOMIC_SEQ_CST.
(gimplify_expr): Handle OMP_DISTRIBUTE, OMP_TARGET, OMP_TARGET_DATA,
OMP_TARGET_UPDATE, OMP_TEAMS, OMP_TASKGROUP.
(gimplify_body): If fndecl has "omp declare target" attribute, add
implicit ORT_TARGET context around it.
* tree.def (OMP_DISTRIBUTE, OMP_TEAMS, OMP_TARGET_DATA, OMP_TARGET,
OMP_TASKGROUP, OMP_TARGET_UPDATE): New tree codes.
* tree-nested.c (convert_nonlocal_reference_stmt,
convert_local_reference_stmt, convert_gimple_call): Handle
GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
* omp-builtins.def (BUILT_IN_GOMP_TASK): Use
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR
instead of BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT.
(BUILT_IN_GOMP_TARGET, BUILT_IN_GOMP_TARGET_DATA,
BUILT_IN_GOMP_TARGET_END_DATA, BUILT_IN_GOMP_TARGET_UPDATE,
BUILT_IN_GOMP_TEAMS, BUILT_IN_BARRIER_CANCEL,
BUILT_IN_GOMP_LOOP_END_CANCEL,
BUILT_IN_GOMP_SECTIONS_END_CANCEL, BUILT_IN_OMP_GET_TEAM_NUM,
BUILT_IN_OMP_GET_NUM_TEAMS, BUILT_IN_GOMP_TASKGROUP_START,
BUILT_IN_GOMP_TASKGROUP_END, BUILT_IN_GOMP_PARALLEL_LOOP_STATIC,
BUILT_IN_GOMP_PARALLEL_LOOP_DYNAMIC,
BUILT_IN_GOMP_PARALLEL_LOOP_GUIDED,
BUILT_IN_GOMP_PARALLEL_LOOP_RUNTIME, BUILT_IN_GOMP_PARALLEL,
BUILT_IN_GOMP_PARALLEL_SECTIONS, BUILT_IN_GOMP_CANCEL,
BUILT_IN_GOMP_CANCELLATION_POINT): New built-ins.
(BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START,
BUILT_IN_GOMP_PARALLEL_LOOP_DYNAMIC_START,
BUILT_IN_GOMP_PARALLEL_LOOP_GUIDED_START,
BUILT_IN_GOMP_PARALLEL_LOOP_RUNTIME_START,
BUILT_IN_GOMP_PARALLEL_START, BUILT_IN_GOMP_PARALLEL_END,
BUILT_IN_GOMP_PARALLEL_SECTIONS_START): Remove.
* tree-inline.c (remap_gimple_stmt, estimate_num_insns):
Handle GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
* gimple.c (gimple_build_omp_taskgroup, gimple_build_omp_target,
gimple_build_omp_teams): New functions.
(walk_gimple_op): Handle GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and
GIMPLE_OMP_TASKGROUP. Walk optional lhs on GIMPLE_OMP_RETURN.
(walk_gimple_stmt, gimple_copy): Handle GIMPLE_OMP_TARGET,
GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
* gimple.h (enum gf_mask): GF_OMP_FOR_KIND_DISTRIBUTE,
GF_OMP_FOR_COMBINED, GF_OMP_FOR_COMBINED_INTO,
GF_OMP_TARGET_KIND_MASK, GF_OMP_TARGET_KIND_REGION,
GF_OMP_TARGET_KIND_DATA, GF_OMP_TARGET_KIND_UPDATE,
GF_OMP_ATOMIC_SEQ_CST): New.
(gimple_build_omp_taskgroup, gimple_build_omp_target,
gimple_build_omp_teams): New prototypes.
(gimple_has_substatements): Handle GIMPLE_OMP_TARGET,
GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
(gimple_omp_subcode): Use GIMPLE_OMP_TEAMS instead of
GIMPLE_OMP_SINGLE as end of range.
(gimple_omp_return_set_lhs, gimple_omp_return_lhs,
gimple_omp_return_lhs_ptr, gimple_omp_atomic_seq_cst_p,
gimple_omp_atomic_set_seq_cst, gimple_omp_for_combined_p,
gimple_omp_for_set_combined_p, gimple_omp_for_combined_into_p,
gimple_omp_for_set_combined_into_p, gimple_omp_target_clauses,
gimple_omp_target_clauses_ptr, gimple_omp_target_set_clauses,
gimple_omp_target_kind, gimple_omp_target_set_kind,
gimple_omp_target_child_fn, gimple_omp_target_child_fn_ptr,
gimple_omp_target_set_child_fn, gimple_omp_target_data_arg,
gimple_omp_target_data_arg_ptr, gimple_omp_target_set_data_arg,
gimple_omp_teams_clauses, gimple_omp_teams_clauses_ptr,
gimple_omp_teams_set_clauses): New inlines.
(CASE_GIMPLE_OMP): Add GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS
and GIMPLE_OMP_TASKGROUP.
* tree-core.h (enum omp_clause_code): Add new OpenMP 4.0 clause
codes.
(enum omp_clause_depend_kind, enum omp_clause_map_kind,
enum omp_clause_proc_bind_kind): New.
(union omp_clause_subcode): Add depend_kind, map_kind and
proc_bind_kind fields.
* tree-cfg.c (make_edges): Handle GIMPLE_OMP_TARGET,
GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
* langhooks-def.h (lhd_omp_mappable_type): New prototype.
(LANG_HOOKS_OMP_MAPPABLE_TYPE): Define.
(LANG_HOOKS_FOR_TYPES_INITIALIZER): Add it.
gcc/c-family/
* c-cppbuiltin.c (c_cpp_builtins): Predefine _OPENMP to
201307 instead of 201107.
* c-common.c (DEF_FUNCTION_TYPE_8): Define.
(c_common_attribute_table): Add "omp declare target" and
"omp declare simd" attributes.
(handle_omp_declare_target_attribute,
handle_omp_declare_simd_attribute): New functions.
* c-omp.c: Include c-pragma.h.
(c_finish_omp_taskgroup): New function.
(c_finish_omp_atomic): Add swapped argument, if true,
build the operation first with rhs, lhs arguments and use NOP_EXPR
build_modify_expr.
(c_finish_omp_for): Add code argument, pass it down to make_code.
(c_omp_split_clauses): New function.
(c_split_parallel_clauses): Removed.
(c_omp_declare_simd_clause_cmp, c_omp_declare_simd_clauses_to_numbers,
c_omp_declare_simd_clauses_to_decls): New functions.
* c-common.h (omp_clause_mask): New type.
(OMP_CLAUSE_MASK_1): Define.
(omp_clause_mask::omp_clause_mask, omp_clause_mask::operator &=,
omp_clause_mask::operator |=, omp_clause_mask::operator ~,
omp_clause_mask::operator |, omp_clause_mask::operator &,
omp_clause_mask::operator <<, omp_clause_mask::operator >>,
omp_clause_mask::operator ==): New methods.
(enum c_omp_clause_split): New.
(c_finish_omp_taskgroup): New prototype.
(c_finish_omp_atomic): Add swapped argument.
(c_finish_omp_for): Add code argument.
(c_omp_split_clauses): New prototype.
(c_split_parallel_clauses): Removed.
(c_omp_declare_simd_clauses_to_numbers,
c_omp_declare_simd_clauses_to_decls): New prototypes.
* c-pragma.c (omp_pragmas): Add new OpenMP 4.0 constructs.
* c-pragma.h (enum pragma_kind): Add PRAGMA_OMP_CANCEL,
PRAGMA_OMP_CANCELLATION_POINT, PRAGMA_OMP_DECLARE_REDUCTION,
PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_END_DECLARE_TARGET, PRAGMA_OMP_SIMD,
PRAGMA_OMP_TARGET, PRAGMA_OMP_TASKGROUP and PRAGMA_OMP_TEAMS.
Remove PRAGMA_OMP_PARALLEL_FOR and PRAGMA_OMP_PARALLEL_SECTIONS.
(enum pragma_omp_clause): Add PRAGMA_OMP_CLAUSE_ALIGNED,
PRAGMA_OMP_CLAUSE_DEPEND, PRAGMA_OMP_CLAUSE_DEVICE,
PRAGMA_OMP_CLAUSE_DIST_SCHEDULE, PRAGMA_OMP_CLAUSE_FOR,
PRAGMA_OMP_CLAUSE_FROM, PRAGMA_OMP_CLAUSE_INBRANCH,
PRAGMA_OMP_CLAUSE_LINEAR, PRAGMA_OMP_CLAUSE_MAP,
PRAGMA_OMP_CLAUSE_NOTINBRANCH, PRAGMA_OMP_CLAUSE_NUM_TEAMS,
PRAGMA_OMP_CLAUSE_PARALLEL, PRAGMA_OMP_CLAUSE_PROC_BIND,
PRAGMA_OMP_CLAUSE_SAFELEN, PRAGMA_OMP_CLAUSE_SECTIONS,
PRAGMA_OMP_CLAUSE_SIMDLEN, PRAGMA_OMP_CLAUSE_TASKGROUP,
PRAGMA_OMP_CLAUSE_THREAD_LIMIT, PRAGMA_OMP_CLAUSE_TO and
PRAGMA_OMP_CLAUSE_UNIFORM.
gcc/ada/
* gcc-interface/utils.c (DEF_FUNCTION_TYPE_8): Define.
gcc/fortran/
* trans-openmp.c (gfc_omp_clause_default_ctor,
gfc_omp_clause_dtor): Return NULL for OMP_CLAUSE_REDUCTION.
* f95-lang.c (ATTR_NULL, DEF_FUNCTION_TYPE_8): Define.
* types.def (DEF_FUNCTION_TYPE_8): Document.
(BT_FN_VOID_OMPFN_PTR_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG,
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): Remove.
(BT_FN_VOID_OMPFN_PTR_UINT_UINT_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT,
BT_FN_BOOL_INT, BT_FN_BOOL_INT_BOOL, BT_FN_VOID_UINT_UINT,
BT_FN_VOID_INT_PTR_SIZE_PTR_PTR_PTR,
BT_FN_VOID_INT_OMPFN_PTR_SIZE_PTR_PTR_PTR,
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR): New.
gcc/lto/
* lto-lang.c (DEF_FUNCTION_TYPE_8): Define.
gcc/c/
* c-lang.h (current_omp_declare_target_attribute): New extern
decl.
* c-parser.c: Include c-lang.h.
(struct c_parser): Change tokens to c_token *.
Add tokens_buf field. Change tokens_avail type to unsigned int.
(c_parser_consume_token): If parser->tokens isn't
&parser->tokens_buf[0], increment parser->tokens.
(c_parser_consume_pragma): Likewise.
(enum pragma_context): Add pragma_struct and pragma_param.
(c_parser_external_declaration): Adjust
c_parser_declaration_or_fndef caller.
(c_parser_declaration_or_fndef): Add omp_declare_simd_clauses
argument, if it is non-vNULL vector, call c_finish_omp_declare_simd.
Adjust recursive call.
(c_parser_struct_or_union_specifier): Use pragma_struct instead
of pragma_external.
(c_parser_parameter_declaration): Use pragma_param instead of
pragma_external.
(c_parser_compound_statement_nostart, c_parser_label,
c_parser_for_statement): Adjust
c_parser_declaration_or_fndef callers.
(c_parser_expr_no_commas): Add omp_atomic_lhs argument, pass
it through to c_parser_conditional_expression.
(c_parser_conditional_expression): Add omp_atomic_lhs argument,
pass it through to c_parser_binary_expression. Adjust recursive
call.
(c_parser_binary_expression): Remove prec argument, add
omp_atomic_lhs argument instead. Always start from PREC_NONE, if
omp_atomic_lhs is non-NULL and one of the arguments of toplevel
binop matches it, use build2 instead of parser_build_binary_op.
(c_parser_pragma): Handle PRAGMA_OMP_CANCEL,
PRAGMA_OMP_CANCELLATION_POINT, PRAGMA_OMP_TARGET,
PRAGMA_OMP_END_DECLARE_TARGET, PRAGMA_OMP_DECLARE_REDUCTION.
Handle pragma_struct and pragma_param the same as pragma_external.
(c_parser_omp_clause_name): Parse new OpenMP 4.0 clause names.
(c_parser_omp_variable_list): Parse array sections for
OMP_CLAUSE_{DEPEND,MAP,TO,FROM} clauses.
(c_parser_omp_clause_collapse): Fully fold collapse expression.
(c_parser_omp_clause_reduction): Handle user defined reductions.
(c_parser_omp_clause_branch, c_parser_omp_clause_cancelkind,
c_parser_omp_clause_num_teams, c_parser_omp_clause_thread_limit,
c_parser_omp_clause_aligned, c_parser_omp_clause_linear,
c_parser_omp_clause_safelen, c_parser_omp_clause_simdlen,
c_parser_omp_clause_depend, c_parser_omp_clause_map,
c_parser_omp_clause_device, c_parser_omp_clause_dist_schedule,
c_parser_omp_clause_proc_bind, c_parser_omp_clause_to,
c_parser_omp_clause_from, c_parser_omp_clause_uniform): New functions.
(c_parser_omp_all_clauses): Add finish_p argument. Don't call
c_finish_omp_clauses if it is false. Handle new OpenMP 4.0 clauses.
(c_parser_omp_atomic): Parse seq_cst clause, pass true if it is
present to c_finish_omp_atomic. Handle OpenMP 4.0 atomic forms.
(c_parser_omp_for_loop): Add CODE argument, pass it through
to c_finish_omp_for. Change last argument to cclauses,
and adjust uses to grab parallel clauses from the array of all
the split clauses. Adjust c_parser_binary_expression,
c_parser_declaration_or_fndef and c_finish_omp_for callers.
(omp_split_clauses): New function.
(c_parser_omp_simd): New function.
(c_parser_omp_for): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined constructs,
and call c_parser_omp_simd when parsing for simd.
(c_parser_omp_sections_scope): If section-sequence doesn't start with
#pragma omp section, require exactly one structured-block instead of
sequence of statements.
(c_parser_omp_sections): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined constructs.
(c_parser_omp_parallel): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined
constructs.
(c_parser_omp_taskgroup, c_parser_omp_cancel,
c_parser_omp_cancellation_point, c_parser_omp_distribute,
c_parser_omp_teams, c_parser_omp_target_data,
c_parser_omp_target_update, c_parser_omp_target,
c_parser_omp_declare_simd, c_finish_omp_declare_simd,
c_parser_omp_declare_target, c_parser_omp_end_declare_target,
c_parser_omp_declare_reduction, c_parser_omp_declare): New functions.
(c_parser_omp_construct): Add p_name and mask vars. Handle
PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_SIMD, PRAGMA_OMP_TASKGROUP,
PRAGMA_OMP_TEAMS. Adjust c_parser_omp_for, c_parser_omp_parallel
and c_parser_omp_sections callers.
(c_parse_file): Initialize tparser.tokens and the_parser->tokens here.
(OMP_FOR_CLAUSE_MASK, OMP_SECTIONS_CLAUSE_MASK,
OMP_SINGLE_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1.
(OMP_PARALLEL_CLAUSE_MASK): Likewise. Add OMP_CLAUSE_PROC_BIND.
(OMP_TASK_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1. Add
OMP_CLAUSE_DEPEND.
(OMP_SIMD_CLAUSE_MASK, OMP_CANCEL_CLAUSE_MASK,
OMP_CANCELLATION_POINT_CLAUSE_MASK, OMP_DISTRIBUTE_CLAUSE_MASK,
OMP_TEAMS_CLAUSE_MASK, OMP_TARGET_DATA_CLAUSE_MASK,
OMP_TARGET_UPDATE_CLAUSE_MASK, OMP_TARGET_CLAUSE_MASK,
OMP_DECLARE_SIMD_CLAUSE_MASK): Define.
* c-typeck.c: Include tree-inline.h.
(c_finish_omp_cancel, c_finish_omp_cancellation_point,
handle_omp_array_sections_1, handle_omp_array_sections,
c_clone_omp_udr, c_find_omp_placeholder_r): New functions.
(c_finish_omp_clauses): Handle new OpenMP 4.0 clauses and
user defined reductions.
(c_tree_equal): New function.
* c-tree.h (temp_store_parm_decls, temp_pop_parm_decls,
c_finish_omp_cancel, c_finish_omp_cancellation_point, c_tree_equal,
c_omp_reduction_id, c_omp_reduction_decl, c_omp_reduction_lookup,
c_check_omp_declare_reduction_r): New prototypes.
* c-decl.c (current_omp_declare_target_attribute): New variable.
(c_decl_attributes): New function.
(start_decl, start_function): Use it instead of decl_attributes.
(temp_store_parm_decls, temp_pop_parm_decls, c_omp_reduction_id,
c_omp_reduction_decl, c_omp_reduction_lookup,
c_check_omp_declare_reduction_r): New functions.
gcc/cp/
* decl.c (duplicate_decls): Error out for redeclaration of UDRs.
(declare_simd_adjust_this): New function.
(grokfndecl): If "omp declare simd" attribute is present,
call declare_simd_adjust_this if needed and
c_omp_declare_simd_clauses_to_numbers.
* cp-array-notation.c (expand_array_notation_exprs): Handle
OMP_TASKGROUP.
* cp-gimplify.c (cp_gimplify_expr): Handle OMP_SIMD and
OMP_DISTRIBUTE. Handle is_invisiref_parm decls in
OMP_CLAUSE_REDUCTION.
(cp_genericize_r): Handle OMP_SIMD and OMP_DISTRIBUTE like
OMP_FOR.
(cxx_omp_privatize_by_reference): Return true for
is_invisiref_parm decls.
(cxx_omp_finish_clause): Adjust cxx_omp_create_clause_info
caller.
* pt.c (apply_late_template_attributes): For "omp declare simd"
attribute call tsubst_omp_clauses,
c_omp_declare_simd_clauses_to_decls, finish_omp_clauses
and c_omp_declare_simd_clauses_to_numbers.
(instantiate_class_template_1): Call cp_check_omp_declare_reduction
for UDRs.
(tsubst_decl): Handle UDRs.
(tsubst_omp_clauses): Add declare_simd argument, if true don't
call finish_omp_clauses. Handle new OpenMP 4.0 clauses.
Handle non-NULL OMP_CLAUSE_REDUCTION_PLACEHOLDER on
OMP_CLAUSE_REDUCTION.
(tsubst_expr): For UDRs call pushdecl and
cp_check_omp_declare_reduction. Adjust tsubst_omp_clauses
callers. Handle OMP_SIMD, OMP_DISTRIBUTE, OMP_TEAMS,
OMP_TARGET_DATA, OMP_TARGET_UPDATE, OMP_TARGET, OMP_TASKGROUP.
Adjust finish_omp_atomic caller.
(tsubst_omp_udr): New function.
(instantiate_decl): For UDRs at block scope, don't call
start_preparsed_function/finish_function. Call tsubst_omp_udr.
* semantics.c (cxx_omp_create_clause_info): Add need_dtor argument,
use it instead of need_default_ctor || need_copy_ctor.
(struct cp_check_omp_declare_reduction_data): New type.
(handle_omp_array_sections_1, handle_omp_array_sections,
omp_reduction_id, omp_reduction_lookup,
cp_remove_omp_priv_cleanup_stmt, cp_check_omp_declare_reduction_r,
cp_check_omp_declare_reduction, clone_omp_udr,
find_omp_placeholder_r, finish_omp_reduction_clause): New functions.
(finish_omp_clauses): Handle new OpenMP 4.0 clauses and user defined
reductions.
(finish_omp_for): Add CODE argument, use it instead of hardcoded
OMP_FOR. Adjust c_finish_omp_for caller.
(finish_omp_atomic): Add seq_cst argument, adjust
c_finish_omp_atomic callers, handle seq_cst and new OpenMP 4.0
atomic variants.
(finish_omp_cancel, finish_omp_cancellation_point): New functions.
* decl2.c (mark_used): Force immediate instantiation of
DECL_OMP_DECLARE_REDUCTION_P decls.
(is_late_template_attribute): Return true for "omp declare simd"
attribute.
(cp_omp_mappable_type): New function.
(cplus_decl_attributes): Add implicit "omp declare target" attribute
if requested.
* parser.c (cp_debug_parser): Print
parser->colon_doesnt_start_class_def_p.
(cp_ensure_no_omp_declare_simd, cp_finalize_omp_declare_simd): New
functions.
(enum pragma_context): Add pragma_member and pragma_objc_icode.
(cp_parser_binary_expression): Handle no_toplevel_fold_p
even for binary operations other than comparison.
(cp_parser_linkage_specification): Call
cp_ensure_no_omp_declare_simd if needed.
(cp_parser_namespace_definition): Likewise.
(cp_parser_init_declarator): Call cp_finalize_omp_declare_simd.
(cp_parser_direct_declarator): Pass declarator to
cp_parser_late_return_type_opt.
(cp_parser_late_return_type_opt): Add declarator argument,
call cp_parser_late_parsing_omp_declare_simd for declare simd.
(cp_parser_class_specifier_1): Call cp_ensure_no_omp_declare_simd.
Parse UDRs before all other methods.
(cp_parser_member_specification_opt): Use pragma_member instead of
pragma_external.
(cp_parser_member_declaration): Call cp_finalize_omp_declare_simd.
(cp_parser_function_definition_from_specifiers_and_declarator,
cp_parser_save_member_function_body): Likewise.
(cp_parser_late_parsing_for_member): Handle UDRs specially.
(cp_parser_next_token_starts_class_definition_p): Don't allow
CPP_COLON if colon_doesnt_start_class_def_p flag is true.
(cp_parser_objc_interstitial_code): Use pragma_objc_icode
instead of pragma_external.
(cp_parser_omp_clause_name): Parse new OpenMP 4.0 clause names.
(cp_parser_omp_var_list_no_open): Parse array sections for
OMP_CLAUSE_{DEPEND,MAP,TO,FROM} clauses. Add COLON argument,
if non-NULL, allow parsing to end with a colon rather than close
paren.
(cp_parser_omp_var_list): Adjust cp_parser_omp_var_list_no_open
caller.
(cp_parser_omp_clause_reduction): Handle user defined reductions.
(cp_parser_omp_clause_branch, cp_parser_omp_clause_cancelkind,
cp_parser_omp_clause_num_teams, cp_parser_omp_clause_thread_limit,
cp_parser_omp_clause_aligned, cp_parser_omp_clause_linear,
cp_parser_omp_clause_safelen, cp_parser_omp_clause_simdlen,
cp_parser_omp_clause_depend, cp_parser_omp_clause_map,
cp_parser_omp_clause_device, cp_parser_omp_clause_dist_schedule,
cp_parser_omp_clause_proc_bind, cp_parser_omp_clause_to,
cp_parser_omp_clause_from, cp_parser_omp_clause_uniform): New
functions.
(cp_parser_omp_all_clauses): Add finish_p argument. Don't call
finish_omp_clauses if it is false. Handle new OpenMP 4.0 clauses.
(cp_parser_omp_atomic): Parse seq_cst clause, pass
true if it is present to finish_omp_atomic. Handle new OpenMP 4.0
atomic forms.
(cp_parser_omp_for_loop): Add CODE argument, pass it through
to finish_omp_for. Change last argument to cclauses,
and adjust uses to grab parallel clauses from the array of all
the split clauses.
(cp_omp_split_clauses): New function.
(cp_parser_omp_simd): New function.
(cp_parser_omp_for): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined constructs,
and call c_parser_omp_simd when parsing for simd.
(cp_parser_omp_sections_scope): If section-sequence doesn't start with
#pragma omp section, require exactly one structured-block instead of
sequence of statements.
(cp_parser_omp_sections): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined constructs.
(cp_parser_omp_parallel): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined
constructs.
(cp_parser_omp_taskgroup, cp_parser_omp_cancel,
cp_parser_omp_cancellation_point, cp_parser_omp_distribute,
cp_parser_omp_teams, cp_parser_omp_target_data,
cp_parser_omp_target_update, cp_parser_omp_target,
cp_parser_omp_declare_simd, cp_parser_late_parsing_omp_declare_simd,
cp_parser_omp_declare_target, cp_parser_omp_end_declare_target,
cp_parser_omp_declare_reduction_exprs, cp_parser_omp_declare_reduction,
cp_parser_omp_declare): New functions.
(cp_parser_omp_construct): Add p_name and mask vars. Handle
PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_SIMD, PRAGMA_OMP_TASKGROUP,
PRAGMA_OMP_TEAMS. Adjust cp_parser_omp_for, cp_parser_omp_parallel
and cp_parser_omp_sections callers.
(cp_parser_pragma): Handle PRAGMA_OMP_CANCEL,
PRAGMA_OMP_CANCELLATION_POINT, PRAGMA_OMP_DECLARE_REDUCTION,
PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_SIMD, PRAGMA_OMP_TASKGROUP,
PRAGMA_OMP_TEAMS, PRAGMA_OMP_TARGET, PRAGMA_OMP_END_DECLARE_TARGET.
Handle pragma_member and pragma_objc_icode like pragma_external.
(OMP_FOR_CLAUSE_MASK, OMP_SECTIONS_CLAUSE_MASK,
OMP_SINGLE_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1.
(OMP_PARALLEL_CLAUSE_MASK): Likewise. Add OMP_CLAUSE_PROC_BIND.
(OMP_TASK_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1. Add
OMP_CLAUSE_DEPEND.
(OMP_SIMD_CLAUSE_MASK, OMP_CANCEL_CLAUSE_MASK,
OMP_CANCELLATION_POINT_CLAUSE_MASK, OMP_DISTRIBUTE_CLAUSE_MASK,
OMP_TEAMS_CLAUSE_MASK, OMP_TARGET_DATA_CLAUSE_MASK,
OMP_TARGET_UPDATE_CLAUSE_MASK, OMP_TARGET_CLAUSE_MASK,
OMP_DECLARE_SIMD_CLAUSE_MASK): Define.
* parser.h (struct cp_omp_declare_simd_data): New type.
(struct cp_parser): Add colon_doesnt_start_class_def_p and
omp_declare_simd fields.
* cp-objcp-common.h (LANG_HOOKS_OMP_MAPPABLE_TYPE): Define.
* cp-tree.h (struct lang_decl_fn): Add omp_declare_reduction_p
bit.
(DECL_OMP_DECLARE_REDUCTION_P): Define.
(OMP_FOR_GIMPLIFYING_P): Use OMP_LOOP_CHECK macro.
(struct saved_scope): Add omp_declare_target_attribute field.
(cp_omp_mappable_type, omp_reduction_id,
cp_remove_omp_priv_cleanup_stmt, cp_check_omp_declare_reduction,
finish_omp_cancel, finish_omp_cancellation_point): New prototypes.
(finish_omp_for): Add CODE argument.
(finish_omp_atomic): Add seq_cst argument.
(cxx_omp_create_clause_info): Add need_dtor argument.
gcc/testsuite/
* c-c++-common/gomp/atomic-15.c: Adjust for C diagnostics.
Remove error test that is now valid in OpenMP 4.0.
* c-c++-common/gomp/atomic-16.c: New test.
* c-c++-common/gomp/cancel-1.c: New test.
* c-c++-common/gomp/depend-1.c: New test.
* c-c++-common/gomp/depend-2.c: New test.
* c-c++-common/gomp/map-1.c: New test.
* c-c++-common/gomp/pr58472.c: New test.
* c-c++-common/gomp/sections1.c: New test.
* c-c++-common/gomp/simd1.c: New test.
* c-c++-common/gomp/simd2.c: New test.
* c-c++-common/gomp/simd3.c: New test.
* c-c++-common/gomp/simd4.c: New test.
* c-c++-common/gomp/simd5.c: New test.
* c-c++-common/gomp/single1.c: New test.
* g++.dg/gomp/block-0.C: Adjust for stricter #pragma omp sections
parser.
* g++.dg/gomp/block-3.C: Likewise.
* g++.dg/gomp/clause-3.C: Adjust error messages.
* g++.dg/gomp/declare-simd-1.C: New test.
* g++.dg/gomp/declare-simd-2.C: New test.
* g++.dg/gomp/depend-1.C: New test.
* g++.dg/gomp/depend-2.C: New test.
* g++.dg/gomp/target-1.C: New test.
* g++.dg/gomp/target-2.C: New test.
* g++.dg/gomp/taskgroup-1.C: New test.
* g++.dg/gomp/teams-1.C: New test.
* g++.dg/gomp/udr-1.C: New test.
* g++.dg/gomp/udr-2.C: New test.
* g++.dg/gomp/udr-3.C: New test.
* g++.dg/gomp/udr-4.C: New test.
* g++.dg/gomp/udr-5.C: New test.
* g++.dg/gomp/udr-6.C: New test.
* gcc.dg/autopar/outer-1.c: Expect 4 instead of 5 loopfn matches.
* gcc.dg/autopar/outer-2.c: Likewise.
* gcc.dg/autopar/outer-3.c: Likewise.
* gcc.dg/autopar/outer-4.c: Likewise.
* gcc.dg/autopar/outer-5.c: Likewise.
* gcc.dg/autopar/outer-6.c: Likewise.
* gcc.dg/autopar/parallelization-1.c: Likewise.
* gcc.dg/gomp/block-3.c: Adjust for stricter #pragma omp sections
parser.
* gcc.dg/gomp/clause-1.c: Adjust error messages.
* gcc.dg/gomp/combined-1.c: Look for GOMP_parallel_loop_runtime
instead of GOMP_parallel_loop_runtime_start.
* gcc.dg/gomp/declare-simd-1.c: New test.
* gcc.dg/gomp/declare-simd-2.c: New test.
* gcc.dg/gomp/nesting-1.c: Adjust for stricter #pragma omp sections
parser. Add further #pragma omp sections nesting tests.
* gcc.dg/gomp/target-1.c: New test.
* gcc.dg/gomp/target-2.c: New test.
* gcc.dg/gomp/taskgroup-1.c: New test.
* gcc.dg/gomp/teams-1.c: New test.
* gcc.dg/gomp/udr-1.c: New test.
* gcc.dg/gomp/udr-2.c: New test.
* gcc.dg/gomp/udr-3.c: New test.
* gcc.dg/gomp/udr-4.c: New test.
* gfortran.dg/gomp/appendix-a/a.35.5.f90: Add dg-error.
Co-Authored-By: Richard Henderson <rth@redhat.com>
Co-Authored-By: Tobias Burnus <burnus@net-b.de>
From-SVN: r203408
Diffstat (limited to 'libgomp/testsuite/libgomp.c')
43 files changed, 3869 insertions, 2 deletions
diff --git a/libgomp/testsuite/libgomp.c/affinity-1.c b/libgomp/testsuite/libgomp.c/affinity-1.c new file mode 100644 index 0000000..5d3e45d --- /dev/null +++ b/libgomp/testsuite/libgomp.c/affinity-1.c @@ -0,0 +1,1146 @@ +/* Affinity tests. + Copyright (C) 2013 Free Software Foundation, Inc. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 3, or (at your option) any later + version. + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + <http://www.gnu.org/licenses/>. */ + +/* { dg-do run } */ +/* { dg-set-target-env-var OMP_PROC_BIND "false" } */ +/* { dg-additional-options "-DINTERPOSE_GETAFFINITY -DDO_FORK -ldl" { target *-*-linux* } } */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include "config.h" +#include <alloca.h> +#include <omp.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +#ifdef DO_FORK +#include <signal.h> +#endif +#ifdef HAVE_PTHREAD_AFFINITY_NP +#include <sched.h> +#include <pthread.h> +#ifdef INTERPOSE_GETAFFINITY +#include <dlfcn.h> +#endif +#endif + +struct place +{ + int start, len; +}; +struct places +{ + char name[40]; + int count; + struct place places[8]; +} places_array[] = { + { "", 1, { { -1, -1 } } }, + { "{0}:8", 8, + { { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1 }, + { 4, 1 }, { 5, 1 }, { 6, 1 }, { 7, 1 } } }, + { "{7,6}:2:-3", 2, { { 6, 2 }, { 3, 2 } } }, + { "{6,7}:4:-2,!{2,3}", 3, { { 6, 2 }, { 4, 2 }, { 0, 2 } } }, + { "{1}:7:1", 7, + { { 1, 1 }, { 2, 1 }, { 3, 1 }, + { 4, 1 }, { 5, 1 }, { 6, 1 }, { 7, 1 } } }, + { "{0,1},{3,2,4},{6,5,!6},{6},{7:2:-1,!6}", 5, + { { 0, 2 }, { 2, 3 }, { 5, 1 }, { 6, 1 }, { 7, 1 } } } +}; + +unsigned long contig_cpucount; +unsigned long min_cpusetsize; + +#if defined (HAVE_PTHREAD_AFFINITY_NP) && defined (_SC_NPROCESSORS_CONF) \ + && defined (CPU_ALLOC_SIZE) + +#if defined (RTLD_NEXT) && defined (INTERPOSE_GETAFFINITY) +int (*orig_getaffinity_np) (pthread_t, size_t, cpu_set_t *); + +int +pthread_getaffinity_np (pthread_t thread, size_t cpusetsize, cpu_set_t *cpuset) +{ + int ret; + unsigned long i, max; + if (orig_getaffinity_np == NULL) + { + orig_getaffinity_np = (int (*) (pthread_t, size_t, cpu_set_t *)) + dlsym (RTLD_NEXT, "pthread_getaffinity_np"); + if (orig_getaffinity_np == NULL) + exit (0); + } + ret = orig_getaffinity_np (thread, cpusetsize, cpuset); + if (ret != 0) + return ret; + if (contig_cpucount == 0) + { + max = 8 * cpusetsize; + for (i = 0; i < max; i++) + if (!CPU_ISSET_S (i, cpusetsize, cpuset)) + break; + contig_cpucount = i; + min_cpusetsize = cpusetsize; + } + return ret; +} +#endif + +void +print_affinity (struct place p) +{ + static unsigned long size; + if (size == 0) + { + if (min_cpusetsize) + size = min_cpusetsize; + else + { + size = sysconf (_SC_NPROCESSORS_CONF); + size = CPU_ALLOC_SIZE (size); + if (size < sizeof (cpu_set_t)) + size = sizeof (cpu_set_t); + } + } + cpu_set_t *cpusetp = (cpu_set_t *) alloca (size); + if (pthread_getaffinity_np (pthread_self (), size, cpusetp) == 0) + { + unsigned long i, len, max = 8 * size; + int notfirst = 0, unexpected = 1; + + printf (" bound to {"); + for (i = 0, len = 0; i < max; i++) + if (CPU_ISSET_S (i, size, cpusetp)) + { + if (len == 0) + { + if (notfirst) + { + unexpected = 1; + printf (","); + } + else if (i == (unsigned long) p.start) + unexpected = 0; + notfirst = 1; + printf ("%lu", i); + } + ++len; + } + else + { + if (len && len != (unsigned long) p.len) + unexpected = 1; + if (len > 1) + printf (":%lu", len); + len = 0; + } + if (len && len != (unsigned long) p.len) + unexpected = 1; + if (len > 1) + printf (":%lu", len); + printf ("}"); + if (p.start != -1 && unexpected) + { + printf (", expected {%d", p.start); + if (p.len != 1) + printf (":%d", p.len); + printf ("} instead"); + } + else if (p.start != -1) + printf (", verified"); + } +} +#else +void +print_affinity (struct place p) +{ + (void) p.start; + (void) p.len; +} +#endif + + +int +main () +{ + char *env_proc_bind = getenv ("OMP_PROC_BIND"); + int test_false = env_proc_bind && strcmp (env_proc_bind, "false") == 0; + int test_true = env_proc_bind && strcmp (env_proc_bind, "true") == 0; + int test_spread_master_close + = env_proc_bind && strcmp (env_proc_bind, "spread,master,close") == 0; + char *env_places = getenv ("OMP_PLACES"); + int test_places = 0; + +#ifdef DO_FORK + if (env_places == NULL && contig_cpucount >= 8 && test_false + && getenv ("GOMP_AFFINITY") == NULL) + { + int i, j, status; + pid_t pid; + for (j = 0; j < 2; j++) + { + if (setenv ("OMP_PROC_BIND", j ? "spread,master,close" : "true", 1) + < 0) + break; + for (i = sizeof (places_array) / sizeof (places_array[0]) - 1; + i; --i) + { + if (setenv ("OMP_PLACES", places_array[i].name, 1) < 0) + break; + pid = fork (); + if (pid == -1) + break; + if (pid == 0) + { + execl ("/proc/self/exe", "affinity-1.exe", NULL); + _exit (1); + } + if (waitpid (pid, &status, 0) < 0) + break; + if (WIFSIGNALED (status) && WTERMSIG (status) == SIGABRT) + abort (); + else if (!WIFEXITED (status) || WEXITSTATUS (status) != 0) + break; + } + if (i) + break; + } + } +#endif + + int first = 1; + if (env_proc_bind) + { + printf ("OMP_PROC_BIND='%s'", env_proc_bind); + first = 0; + } + if (env_places) + printf ("%sOMP_PLACES='%s'", first ? "" : " ", env_places); + printf ("\n"); + + if (env_places && contig_cpucount >= 8 + && (test_true || test_spread_master_close)) + { + for (test_places = sizeof (places_array) / sizeof (places_array[0]) - 1; + test_places; --test_places) + if (strcmp (env_places, places_array[test_places].name) == 0) + break; + } + +#define verify(if_true, if_s_m_c) \ + if (test_false && omp_get_proc_bind () != omp_proc_bind_false) \ + abort (); \ + if (test_true && omp_get_proc_bind () != if_true) \ + abort (); \ + if (test_spread_master_close && omp_get_proc_bind () != if_s_m_c) \ + abort (); + + verify (omp_proc_bind_true, omp_proc_bind_spread); + + printf ("Initial thread"); + print_affinity (places_array[test_places].places[0]); + printf ("\n"); + omp_set_nested (1); + omp_set_dynamic (0); + + #pragma omp parallel if (0) + { + verify (omp_proc_bind_true, omp_proc_bind_master); + #pragma omp parallel if (0) + { + verify (omp_proc_bind_true, omp_proc_bind_close); + #pragma omp parallel if (0) + { + verify (omp_proc_bind_true, omp_proc_bind_close); + } + #pragma omp parallel if (0) proc_bind (spread) + { + verify (omp_proc_bind_spread, omp_proc_bind_spread); + } + } + #pragma omp parallel if (0) proc_bind (master) + { + verify (omp_proc_bind_master, omp_proc_bind_close); + #pragma omp parallel if (0) + { + verify (omp_proc_bind_master, omp_proc_bind_close); + } + #pragma omp parallel if (0) proc_bind (spread) + { + verify (omp_proc_bind_spread, omp_proc_bind_spread); + } + } + } + + /* True/spread */ + #pragma omp parallel num_threads (4) + { + verify (omp_proc_bind_true, omp_proc_bind_master); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#1 thread %d", thr); + if (omp_get_num_threads () == 4 && test_spread_master_close) + switch (places_array[test_places].count) + { + case 8: + /* T = 4, P = 8, each subpartition has 2 places. */ + case 7: + /* T = 4, P = 7, each subpartition has 2 places, but + last partition, which has just one place. */ + p = places_array[test_places].places[2 * thr]; + break; + case 5: + /* T = 4, P = 5, first subpartition has 2 places, the + rest just one. */ + p = places_array[test_places].places[thr ? 1 + thr : 0]; + break; + case 3: + /* T = 4, P = 3, unit sized subpartitions, first gets + thr0 and thr3, second thr1, third thr2. */ + p = places_array[test_places].places[thr == 3 ? 0 : thr]; + break; + case 2: + /* T = 4, P = 2, unit sized subpartitions, each with + 2 threads. */ + p = places_array[test_places].places[thr / 2]; + break; + } + print_affinity (p); + printf ("\n"); + } + #pragma omp barrier + if (omp_get_thread_num () == 3) + { + /* True/spread, true/master. */ + #pragma omp parallel num_threads (3) + { + verify (omp_proc_bind_true, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#1,#1 thread 3,%d", thr); + if (omp_get_num_threads () == 3 && test_spread_master_close) + /* Outer is spread, inner master, so just bind to the + place or the master thread, which is thr 3 above. */ + switch (places_array[test_places].count) + { + case 8: + case 7: + p = places_array[test_places].places[6]; + break; + case 5: + p = places_array[test_places].places[4]; + break; + case 3: + p = places_array[test_places].places[0]; + break; + case 2: + p = places_array[test_places].places[1]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + /* True/spread, spread. */ + #pragma omp parallel num_threads (5) proc_bind (spread) + { + verify (omp_proc_bind_spread, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#1,#2 thread 3,%d", thr); + if (omp_get_num_threads () == 5 && test_spread_master_close) + /* Outer is spread, inner spread. */ + switch (places_array[test_places].count) + { + case 8: + /* T = 5, P = 2, unit sized subpartitions. */ + p = places_array[test_places].places[thr == 4 ? 6 + : 6 + thr / 2]; + break; + /* The rest are T = 5, P = 1. */ + case 7: + p = places_array[test_places].places[6]; + break; + case 5: + p = places_array[test_places].places[4]; + break; + case 3: + p = places_array[test_places].places[0]; + break; + case 2: + p = places_array[test_places].places[1]; + break; + } + print_affinity (p); + printf ("\n"); + } + #pragma omp barrier + if (omp_get_thread_num () == 3) + { + /* True/spread, spread, close. */ + #pragma omp parallel num_threads (5) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#1,#2,#1 thread 3,3,%d", thr); + if (omp_get_num_threads () == 5 && test_spread_master_close) + /* Outer is spread, inner spread, innermost close. */ + switch (places_array[test_places].count) + { + /* All are T = 5, P = 1. */ + case 8: + p = places_array[test_places].places[7]; + break; + case 7: + p = places_array[test_places].places[6]; + break; + case 5: + p = places_array[test_places].places[4]; + break; + case 3: + p = places_array[test_places].places[0]; + break; + case 2: + p = places_array[test_places].places[1]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + } + /* True/spread, master. */ + #pragma omp parallel num_threads (4) proc_bind(master) + { + verify (omp_proc_bind_master, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#1,#3 thread 3,%d", thr); + if (omp_get_num_threads () == 4 && test_spread_master_close) + /* Outer is spread, inner master, so just bind to the + place or the master thread, which is thr 3 above. */ + switch (places_array[test_places].count) + { + case 8: + case 7: + p = places_array[test_places].places[6]; + break; + case 5: + p = places_array[test_places].places[4]; + break; + case 3: + p = places_array[test_places].places[0]; + break; + case 2: + p = places_array[test_places].places[1]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + /* True/spread, close. */ + #pragma omp parallel num_threads (6) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#1,#4 thread 3,%d", thr); + if (omp_get_num_threads () == 6 && test_spread_master_close) + /* Outer is spread, inner close. */ + switch (places_array[test_places].count) + { + case 8: + /* T = 6, P = 2, unit sized subpartitions. */ + p = places_array[test_places].places[6 + thr / 3]; + break; + /* The rest are T = 6, P = 1. */ + case 7: + p = places_array[test_places].places[6]; + break; + case 5: + p = places_array[test_places].places[4]; + break; + case 3: + p = places_array[test_places].places[0]; + break; + case 2: + p = places_array[test_places].places[1]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + } + + /* Spread. */ + #pragma omp parallel num_threads (5) proc_bind(spread) + { + verify (omp_proc_bind_spread, omp_proc_bind_master); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#2 thread %d", thr); + if (omp_get_num_threads () == 5 + && (test_spread_master_close || test_true)) + switch (places_array[test_places].count) + { + case 8: + /* T = 5, P = 8, first 3 subpartitions have 2 places, last + 2 one place. */ + p = places_array[test_places].places[thr < 3 ? 2 * thr : 3 + thr]; + break; + case 7: + /* T = 5, P = 7, first 2 subpartitions have 2 places, last + 3 one place. */ + p = places_array[test_places].places[thr < 2 ? 2 * thr : 2 + thr]; + break; + case 5: + /* T = 5, P = 5, unit sized subpartitions, each one with one + thread. */ + p = places_array[test_places].places[thr]; + break; + case 3: + /* T = 5, P = 3, unit sized subpartitions, first gets + thr0 and thr3, second thr1 and thr4, third thr2. */ + p = places_array[test_places].places[thr >= 3 ? thr - 3 : thr]; + break; + case 2: + /* T = 5, P = 2, unit sized subpartitions, first with + thr{0,1,4} and second with thr{2,3}. */ + p = places_array[test_places].places[thr == 4 ? 0 : thr / 2]; + break; + } + print_affinity (p); + printf ("\n"); + } + #pragma omp barrier + if (omp_get_thread_num () == 3) + { + int pp = 0; + switch (places_array[test_places].count) + { + case 8: pp = 6; break; + case 7: pp = 5; break; + case 5: pp = 3; break; + case 2: pp = 1; break; + } + /* Spread, spread/master. */ + #pragma omp parallel num_threads (3) firstprivate (pp) + { + verify (omp_proc_bind_spread, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#2,#1 thread 3,%d", thr); + if (test_spread_master_close || test_true) + /* Outer is spread, inner spread resp. master, bit we have + just unit sized partitions. */ + p = places_array[test_places].places[pp]; + print_affinity (p); + printf ("\n"); + } + } + /* Spread, spread. */ + #pragma omp parallel num_threads (5) proc_bind (spread) \ + firstprivate (pp) + { + verify (omp_proc_bind_spread, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#2,#2 thread 3,%d", thr); + if (test_spread_master_close || test_true) + /* Outer is spread, inner spread, bit we have + just unit sized partitions. */ + p = places_array[test_places].places[pp]; + print_affinity (p); + printf ("\n"); + } + } + /* Spread, master. */ + #pragma omp parallel num_threads (4) proc_bind(master) \ + firstprivate(pp) + { + verify (omp_proc_bind_master, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#2,#3 thread 3,%d", thr); + if (test_spread_master_close || test_true) + /* Outer is spread, inner master, bit we have + just unit sized partitions. */ + p = places_array[test_places].places[pp]; + print_affinity (p); + printf ("\n"); + } + } + /* Spread, close. */ + #pragma omp parallel num_threads (6) proc_bind (close) \ + firstprivate (pp) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#2,#4 thread 3,%d", thr); + if (test_spread_master_close || test_true) + /* Outer is spread, inner close, bit we have + just unit sized partitions. */ + p = places_array[test_places].places[pp]; + print_affinity (p); + printf ("\n"); + } + } + } + } + + /* Master. */ + #pragma omp parallel num_threads (3) proc_bind(master) + { + verify (omp_proc_bind_master, omp_proc_bind_master); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#3 thread %d", thr); + if (test_spread_master_close || test_true) + p = places_array[test_places].places[0]; + print_affinity (p); + printf ("\n"); + } + #pragma omp barrier + if (omp_get_thread_num () == 2) + { + /* Master, master. */ + #pragma omp parallel num_threads (4) + { + verify (omp_proc_bind_master, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#3,#1 thread 2,%d", thr); + if (test_spread_master_close || test_true) + /* Outer is master, inner is master. */ + p = places_array[test_places].places[0]; + print_affinity (p); + printf ("\n"); + } + } + /* Master, spread. */ + #pragma omp parallel num_threads (4) proc_bind (spread) + { + verify (omp_proc_bind_spread, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#3,#2 thread 2,%d", thr); + if (omp_get_num_threads () == 4 + && (test_spread_master_close || test_true)) + /* Outer is master, inner is spread. */ + switch (places_array[test_places].count) + { + case 8: + /* T = 4, P = 8, each subpartition has 2 places. */ + case 7: + /* T = 4, P = 7, each subpartition has 2 places, but + last partition, which has just one place. */ + p = places_array[test_places].places[2 * thr]; + break; + case 5: + /* T = 4, P = 5, first subpartition has 2 places, the + rest just one. */ + p = places_array[test_places].places[thr ? 1 + thr : 0]; + break; + case 3: + /* T = 4, P = 3, unit sized subpartitions, first gets + thr0 and thr3, second thr1, third thr2. */ + p = places_array[test_places].places[thr == 3 ? 0 : thr]; + break; + case 2: + /* T = 4, P = 2, unit sized subpartitions, each with + 2 threads. */ + p = places_array[test_places].places[thr / 2]; + break; + } + print_affinity (p); + printf ("\n"); + } + #pragma omp barrier + if (omp_get_thread_num () == 0) + { + /* Master, spread, close. */ + #pragma omp parallel num_threads (5) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#3,#2,#1 thread 2,0,%d", thr); + if (omp_get_num_threads () == 5 + && (test_spread_master_close || test_true)) + /* Outer is master, inner spread, innermost close. */ + switch (places_array[test_places].count) + { + /* First 3 are T = 5, P = 2. */ + case 8: + case 7: + case 5: + p = places_array[test_places].places[(thr & 2) / 2]; + break; + /* All the rest are T = 5, P = 1. */ + case 3: + case 2: + p = places_array[test_places].places[0]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + #pragma omp barrier + if (omp_get_thread_num () == 3) + { + /* Master, spread, close. */ + #pragma omp parallel num_threads (5) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#3,#2,#2 thread 2,3,%d", thr); + if (omp_get_num_threads () == 5 + && (test_spread_master_close || test_true)) + /* Outer is master, inner spread, innermost close. */ + switch (places_array[test_places].count) + { + case 8: + /* T = 5, P = 2. */ + p = places_array[test_places].places[6 + + (thr & 2) / 2]; + break; + /* All the rest are T = 5, P = 1. */ + case 7: + p = places_array[test_places].places[6]; + break; + case 5: + p = places_array[test_places].places[4]; + break; + case 3: + p = places_array[test_places].places[0]; + break; + case 2: + p = places_array[test_places].places[1]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + } + /* Master, master. */ + #pragma omp parallel num_threads (4) proc_bind(master) + { + verify (omp_proc_bind_master, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#3,#3 thread 2,%d", thr); + if (test_spread_master_close || test_true) + /* Outer is master, inner master. */ + p = places_array[test_places].places[0]; + print_affinity (p); + printf ("\n"); + } + } + /* Master, close. */ + #pragma omp parallel num_threads (6) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#3,#4 thread 2,%d", thr); + if (omp_get_num_threads () == 6 + && (test_spread_master_close || test_true)) + switch (places_array[test_places].count) + { + case 8: + /* T = 6, P = 8. */ + case 7: + /* T = 6, P = 7. */ + p = places_array[test_places].places[thr]; + break; + case 5: + /* T = 6, P = 5. thr{0,5} go into the first place. */ + p = places_array[test_places].places[thr == 5 ? 0 : thr]; + break; + case 3: + /* T = 6, P = 3, two threads into each place. */ + p = places_array[test_places].places[thr / 2]; + break; + case 2: + /* T = 6, P = 2, 3 threads into each place. */ + p = places_array[test_places].places[thr / 3]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + } + + #pragma omp parallel num_threads (5) proc_bind(close) + { + verify (omp_proc_bind_close, omp_proc_bind_master); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4 thread %d", thr); + if (omp_get_num_threads () == 5 + && (test_spread_master_close || test_true)) + switch (places_array[test_places].count) + { + case 8: + /* T = 5, P = 8. */ + case 7: + /* T = 5, P = 7. */ + case 5: + /* T = 5, P = 5. */ + p = places_array[test_places].places[thr]; + break; + case 3: + /* T = 5, P = 3, thr{0,3} in first place, thr{1,4} in second, + thr2 in third. */ + p = places_array[test_places].places[thr >= 3 ? thr - 3 : thr]; + break; + case 2: + /* T = 5, P = 2, thr{0,1,4} in first place, thr{2,3} in second. */ + p = places_array[test_places].places[thr == 4 ? 0 : thr / 2]; + break; + } + print_affinity (p); + printf ("\n"); + } + #pragma omp barrier + if (omp_get_thread_num () == 2) + { + int pp = 0; + switch (places_array[test_places].count) + { + case 8: + case 7: + case 5: + case 3: + pp = 2; + break; + case 2: + pp = 1; + break; + } + /* Close, close/master. */ + #pragma omp parallel num_threads (4) firstprivate (pp) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4,#1 thread 2,%d", thr); + if (test_spread_master_close) + /* Outer is close, inner is master. */ + p = places_array[test_places].places[pp]; + else if (omp_get_num_threads () == 4 && test_true) + /* Outer is close, inner is close. */ + switch (places_array[test_places].count) + { + case 8: + /* T = 4, P = 8. */ + case 7: + /* T = 4, P = 7. */ + p = places_array[test_places].places[2 + thr]; + break; + case 5: + /* T = 4, P = 5. There is wrap-around for thr3. */ + p = places_array[test_places].places[thr == 3 ? 0 : 2 + thr]; + break; + case 3: + /* T = 4, P = 3, thr{0,3} go into p2, thr1 into p0, thr2 + into p1. */ + p = places_array[test_places].places[(2 + thr) % 3]; + break; + case 2: + /* T = 4, P = 2, 2 threads into each place. */ + p = places_array[test_places].places[1 - thr / 2]; + break; + } + + print_affinity (p); + printf ("\n"); + } + } + /* Close, spread. */ + #pragma omp parallel num_threads (4) proc_bind (spread) + { + verify (omp_proc_bind_spread, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4,#2 thread 2,%d", thr); + if (omp_get_num_threads () == 4 + && (test_spread_master_close || test_true)) + /* Outer is close, inner is spread. */ + switch (places_array[test_places].count) + { + case 8: + /* T = 4, P = 8, each subpartition has 2 places. */ + case 7: + /* T = 4, P = 7, each subpartition has 2 places, but + last partition, which has just one place. */ + p = places_array[test_places].places[thr == 3 ? 0 + : 2 + 2 * thr]; + break; + case 5: + /* T = 4, P = 5, first subpartition has 2 places, the + rest just one. */ + p = places_array[test_places].places[thr == 3 ? 0 + : 2 + thr]; + break; + case 3: + /* T = 4, P = 3, unit sized subpartitions, third gets + thr0 and thr3, first thr1, second thr2. */ + p = places_array[test_places].places[thr == 0 ? 2 : thr - 1]; + break; + case 2: + /* T = 4, P = 2, unit sized subpartitions, each with + 2 threads. */ + p = places_array[test_places].places[1 - thr / 2]; + break; + } + print_affinity (p); + printf ("\n"); + } + #pragma omp barrier + if (omp_get_thread_num () == 0) + { + /* Close, spread, close. */ + #pragma omp parallel num_threads (5) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4,#2,#1 thread 2,0,%d", thr); + if (omp_get_num_threads () == 5 + && (test_spread_master_close || test_true)) + /* Outer is close, inner spread, innermost close. */ + switch (places_array[test_places].count) + { + case 8: + case 7: + /* T = 5, P = 2. */ + p = places_array[test_places].places[2 + + (thr & 2) / 2]; + break; + /* All the rest are T = 5, P = 1. */ + case 5: + case 3: + p = places_array[test_places].places[2]; + break; + case 2: + p = places_array[test_places].places[1]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + #pragma omp barrier + if (omp_get_thread_num () == 2) + { + /* Close, spread, close. */ + #pragma omp parallel num_threads (5) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4,#2,#2 thread 2,2,%d", thr); + if (omp_get_num_threads () == 5 + && (test_spread_master_close || test_true)) + /* Outer is close, inner spread, innermost close. */ + switch (places_array[test_places].count) + { + case 8: + /* T = 5, P = 2. */ + p = places_array[test_places].places[6 + + (thr & 2) / 2]; + break; + /* All the rest are T = 5, P = 1. */ + case 7: + p = places_array[test_places].places[6]; + break; + case 5: + p = places_array[test_places].places[4]; + break; + case 3: + p = places_array[test_places].places[1]; + break; + case 2: + p = places_array[test_places].places[0]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + #pragma omp barrier + if (omp_get_thread_num () == 3) + { + /* Close, spread, close. */ + #pragma omp parallel num_threads (5) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4,#2,#3 thread 2,3,%d", thr); + if (omp_get_num_threads () == 5 + && (test_spread_master_close || test_true)) + /* Outer is close, inner spread, innermost close. */ + switch (places_array[test_places].count) + { + case 8: + case 7: + case 5: + /* T = 5, P = 2. */ + p = places_array[test_places].places[(thr & 2) / 2]; + break; + /* All the rest are T = 5, P = 1. */ + case 3: + p = places_array[test_places].places[2]; + break; + case 2: + p = places_array[test_places].places[0]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + } + /* Close, master. */ + #pragma omp parallel num_threads (4) proc_bind(master) \ + firstprivate (pp) + { + verify (omp_proc_bind_master, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4,#3 thread 2,%d", thr); + if (test_spread_master_close || test_true) + /* Outer is close, inner master. */ + p = places_array[test_places].places[pp]; + print_affinity (p); + printf ("\n"); + } + } + /* Close, close. */ + #pragma omp parallel num_threads (6) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4,#4 thread 2,%d", thr); + if (omp_get_num_threads () == 6 + && (test_spread_master_close || test_true)) + switch (places_array[test_places].count) + { + case 8: + /* T = 6, P = 8. */ + p = places_array[test_places].places[2 + thr]; + break; + case 7: + /* T = 6, P = 7. */ + p = places_array[test_places].places[thr == 5 ? 0 : 2 + thr]; + break; + case 5: + /* T = 6, P = 5. thr{0,5} go into the third place. */ + p = places_array[test_places].places[thr >= 3 ? thr - 3 + : 2 + thr]; + break; + case 3: + /* T = 6, P = 3, two threads into each place. */ + p = places_array[test_places].places[thr < 2 ? 2 + : thr / 2 - 1]; + break; + case 2: + /* T = 6, P = 2, 3 threads into each place. */ + p = places_array[test_places].places[1 - thr / 3]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + } + + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/atomic-14.c b/libgomp/testsuite/libgomp.c/atomic-14.c index 5936650..9046d80 100644 --- a/libgomp/testsuite/libgomp.c/atomic-14.c +++ b/libgomp/testsuite/libgomp.c/atomic-14.c @@ -16,7 +16,7 @@ main () #pragma omp atomic update x = x + 7; #pragma omp atomic - x = x + 7 + 6; + x = x + (7 + 6); #pragma omp atomic update x = x + 2 * 3; #pragma omp atomic @@ -65,7 +65,7 @@ main () if (v != -8) abort (); #pragma omp atomic - x = x * -4 / 2; + x = x * (-4 / 2); #pragma omp atomic read v = x; if (v != 16) diff --git a/libgomp/testsuite/libgomp.c/atomic-15.c b/libgomp/testsuite/libgomp.c/atomic-15.c new file mode 100644 index 0000000..58331f4 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/atomic-15.c @@ -0,0 +1,99 @@ +// { dg-do run } + +extern void abort (void); +int x = 6; + +int +main () +{ + int v, l = 2, s = 1; + #pragma omp atomic + x = -3 + x; + #pragma omp atomic read + v = x; + if (v != 3) + abort (); + #pragma omp atomic update + x = 3 * 2 * 1 + x; + #pragma omp atomic read + v = x; + if (v != 9) + abort (); + #pragma omp atomic capture + v = x = x | 16; + if (v != 25) + abort (); + #pragma omp atomic capture + v = x = x + 14 * 2 / 4; + if (v != 32) + abort (); + #pragma omp atomic capture + v = x = 5 | x; + if (v != 37) + abort (); + #pragma omp atomic capture + v = x = 40 + 12 - 2 - 7 - x; + if (v != 6) + abort (); + #pragma omp atomic read + v = x; + if (v != 6) + abort (); + #pragma omp atomic capture + { v = x; x = 3 + x; } + if (v != 6) + abort (); + #pragma omp atomic capture + { v = x; x = -1 * -1 * -1 * -1 - x; } + if (v != 9) + abort (); + #pragma omp atomic read + v = x; + if (v != -8) + abort (); + #pragma omp atomic capture + { x = 2 * 2 - x; v = x; } + if (v != 12) + abort (); + #pragma omp atomic capture + { x = 7 & x; v = x; } + if (v != 4) + abort (); + #pragma omp atomic capture + { v = x; x = 6; } + if (v != 4) + abort (); + #pragma omp atomic read + v = x; + if (v != 6) + abort (); + #pragma omp atomic capture + { v = x; x = 7 * 8 + 23; } + if (v != 6) + abort (); + #pragma omp atomic read + v = x; + if (v != 79) + abort (); + #pragma omp atomic capture + { v = x; x = 23 + 6 * 4; } + if (v != 79) + abort (); + #pragma omp atomic read + v = x; + if (v != 47) + abort (); + #pragma omp atomic capture + { v = x; x = l ? 17 : 12; } + if (v != 47) + abort (); + #pragma omp atomic capture + { v = x; x = l = s++ + 3; } + if (v != 17 || l != 4 || s != 2) + abort (); + #pragma omp atomic read + v = x; + if (v != 4) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/atomic-16.c b/libgomp/testsuite/libgomp.c/atomic-16.c new file mode 100644 index 0000000..d33f670 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/atomic-16.c @@ -0,0 +1,58 @@ +// { dg-do run } + +extern void abort (void); +int x = 6, cnt; + +int +foo (void) +{ + return cnt++; +} + +int +main () +{ + int v, *p; + p = &x; + #pragma omp atomic update + p[foo (), 0] = 16 + 6 - p[foo (), 0]; + #pragma omp atomic read + v = x; + if (cnt != 2 || v != 16) + abort (); + #pragma omp atomic capture + v = p[foo () + foo (), 0] = p[foo () + foo (), 0] + 3; + if (cnt != 6 || v != 19) + abort (); + #pragma omp atomic capture + v = p[foo (), 0] = 12 * 1 / 2 + (foo (), 0) + p[foo (), 0]; + if (cnt != 9 || v != 25) + abort (); + #pragma omp atomic capture + { + v = p[foo () & 0]; p[foo () & 0] = (foo (), 1) * 9 - p[foo () & 0]; + } + if (cnt != 13 || v != 25) + abort (); + #pragma omp atomic read + v = x; + if (v != -16) + abort (); + #pragma omp atomic capture + { + p[0 & foo ()] = 16 - 2 + 3 + p[0 & foo ()]; v = p[0 & foo ()]; + } + if (cnt != 16 || v != 1) + abort (); + #pragma omp atomic capture + { + v = p[foo (), 0]; p[foo (), 0] = (foo (), 7) ? 13 : foo () + 6; + } + if (cnt != 19 || v != 1) + abort (); + #pragma omp atomic read + v = x; + if (v != 13) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/atomic-17.c b/libgomp/testsuite/libgomp.c/atomic-17.c new file mode 100644 index 0000000..2bd0e9b --- /dev/null +++ b/libgomp/testsuite/libgomp.c/atomic-17.c @@ -0,0 +1,99 @@ +// { dg-do run } + +extern void abort (void); +int x = 6; + +int +main () +{ + int v, l = 2, s = 1; + #pragma omp atomic seq_cst + x = -3 + x; + #pragma omp atomic read seq_cst + v = x; + if (v != 3) + abort (); + #pragma omp atomic update seq_cst + x = 3 * 2 * 1 + x; + #pragma omp atomic read seq_cst + v = x; + if (v != 9) + abort (); + #pragma omp atomic capture seq_cst + v = x = x | 16; + if (v != 25) + abort (); + #pragma omp atomic capture seq_cst + v = x = x + 14 * 2 / 4; + if (v != 32) + abort (); + #pragma omp atomic capture seq_cst + v = x = 5 | x; + if (v != 37) + abort (); + #pragma omp atomic capture seq_cst + v = x = 40 + 12 - 2 - 7 - x; + if (v != 6) + abort (); + #pragma omp atomic read seq_cst + v = x; + if (v != 6) + abort (); + #pragma omp atomic capture seq_cst + { v = x; x = 3 + x; } + if (v != 6) + abort (); + #pragma omp atomic capture seq_cst + { v = x; x = -1 * -1 * -1 * -1 - x; } + if (v != 9) + abort (); + #pragma omp atomic read seq_cst + v = x; + if (v != -8) + abort (); + #pragma omp atomic capture seq_cst + { x = 2 * 2 - x; v = x; } + if (v != 12) + abort (); + #pragma omp atomic capture seq_cst + { x = 7 & x; v = x; } + if (v != 4) + abort (); + #pragma omp atomic capture seq_cst + { v = x; x = 6; } + if (v != 4) + abort (); + #pragma omp atomic read seq_cst + v = x; + if (v != 6) + abort (); + #pragma omp atomic capture seq_cst + { v = x; x = 7 * 8 + 23; } + if (v != 6) + abort (); + #pragma omp atomic read seq_cst + v = x; + if (v != 79) + abort (); + #pragma omp atomic capture seq_cst + { v = x; x = 23 + 6 * 4; } + if (v != 79) + abort (); + #pragma omp atomic read seq_cst + v = x; + if (v != 47) + abort (); + #pragma omp atomic capture seq_cst + { v = x; x = l ? 17 : 12; } + if (v != 47) + abort (); + #pragma omp atomic capture seq_cst + { v = x; x = l = s++ + 3; } + if (v != 17 || l != 4 || s != 2) + abort (); + #pragma omp atomic read seq_cst + v = x; + if (v != 4) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/cancel-for-1.c b/libgomp/testsuite/libgomp.c/cancel-for-1.c new file mode 100644 index 0000000..f805f13 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/cancel-for-1.c @@ -0,0 +1,22 @@ +/* { dg-do run } */ +/* { dg-set-target-env-var OMP_CANCELLATION "true" } */ + +#include <stdlib.h> +#include <omp.h> + +int +main () +{ + #pragma omp parallel num_threads (32) + { + int i; + #pragma omp for + for (i = 0; i < 1000; ++i) + { + #pragma omp cancel for + if (omp_get_cancellation ()) + abort (); + } + } + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/cancel-for-2.c b/libgomp/testsuite/libgomp.c/cancel-for-2.c new file mode 100644 index 0000000..30cfbb1 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/cancel-for-2.c @@ -0,0 +1,95 @@ +/* { dg-do run } */ +/* { dg-set-target-env-var OMP_CANCELLATION "true" } */ + +#include <stdlib.h> +#include <omp.h> + +__attribute__((noinline, noclone)) int +foo (int *x) +{ + int v = 0, w = 0; + #pragma omp parallel num_threads (32) shared (v, w) + { + int i; + #pragma omp for + for (i = 0; i < 1000; ++i) + { + #pragma omp cancel for if (x[0]) + abort (); + } + #pragma omp for + for (i = 0; i < 1000; ++i) + { + #pragma omp cancel for if (x[1]) + #pragma omp atomic + v++; + } + #pragma omp for + for (i = 0; i < 1000; ++i) + { + #pragma omp cancel for if (x[2]) + #pragma omp atomic + w += 8; + } + #pragma omp for + for (i = 0; i < 1000; ++i) + { + #pragma omp cancel for if (x[3]) + #pragma omp atomic + v += 2; + } + } + if (v != 3000 || w != 0) + abort (); + #pragma omp parallel num_threads (32) shared (v, w) + { + int i; + /* None of these cancel directives should actually cancel anything, + but the compiler shouldn't know that and thus should use cancellable + barriers at the end of all the workshares. */ + #pragma omp cancel parallel if (omp_get_thread_num () == 1 && x[4]) + #pragma omp for + for (i = 0; i < 1000; ++i) + { + #pragma omp cancel for if (x[0]) + abort (); + } + #pragma omp cancel parallel if (omp_get_thread_num () == 2 && x[4]) + #pragma omp for + for (i = 0; i < 1000; ++i) + { + #pragma omp cancel for if (x[1]) + #pragma omp atomic + v++; + } + #pragma omp cancel parallel if (omp_get_thread_num () == 3 && x[4]) + #pragma omp for + for (i = 0; i < 1000; ++i) + { + #pragma omp cancel for if (x[2]) + #pragma omp atomic + w += 8; + } + #pragma omp cancel parallel if (omp_get_thread_num () == 4 && x[4]) + #pragma omp for + for (i = 0; i < 1000; ++i) + { + #pragma omp cancel for if (x[3]) + #pragma omp atomic + v += 2; + } + #pragma omp cancel parallel if (omp_get_thread_num () == 5 && x[4]) + } + if (v != 6000 || w != 0) + abort (); + return 0; +} + +int +main () +{ + int x[] = { 1, 0, 1, 0, 0 }; + if (omp_get_cancellation ()) + foo (x); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/cancel-parallel-1.c b/libgomp/testsuite/libgomp.c/cancel-parallel-1.c new file mode 100644 index 0000000..614eb50 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/cancel-parallel-1.c @@ -0,0 +1,17 @@ +/* { dg-do run } */ +/* { dg-set-target-env-var OMP_CANCELLATION "true" } */ + +#include <stdlib.h> +#include <omp.h> + +int +main () +{ + #pragma omp parallel num_threads (32) + { + #pragma omp cancel parallel + if (omp_get_cancellation ()) + abort (); + } + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/cancel-parallel-2.c b/libgomp/testsuite/libgomp.c/cancel-parallel-2.c new file mode 100644 index 0000000..cae0aa4 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/cancel-parallel-2.c @@ -0,0 +1,53 @@ +/* { dg-do run } */ +/* { dg-set-target-env-var OMP_CANCELLATION "true" } */ + +#include <stdlib.h> +#include <unistd.h> +#include <omp.h> + +static void +foo (int *x) +{ + #pragma omp parallel firstprivate(x) num_threads (32) + { + int thr = omp_get_thread_num (); + switch (x[thr]) + { + case 4: + #pragma omp cancel parallel + break; + case 3: + #pragma omp task + usleep (1000); + #pragma omp task + usleep (2000); + #pragma omp task + usleep (4000); + break; + case 2: + usleep (1000); + /* FALLTHRU */ + case 1: + #pragma omp cancellation point parallel + break; + } + #pragma omp barrier + if (omp_get_cancellation ()) + abort (); + } +} + +int +main () +{ + int i, j, x[32] = { 0, 1, 2, 4, 2, 2, 1, 0 }; + foo (x); + for (i = 0; i < 32; i++) + { + for (j = 0; j < 32; j++) + x[j] = rand () & 3; + x[rand () & 31] = 4; + foo (x); + } + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/cancel-parallel-3.c b/libgomp/testsuite/libgomp.c/cancel-parallel-3.c new file mode 100644 index 0000000..7ceaed1 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/cancel-parallel-3.c @@ -0,0 +1,39 @@ +/* { dg-do run } */ +/* { dg-set-target-env-var OMP_CANCELLATION "true" } */ + +#include <omp.h> +#include <unistd.h> + +static inline void +do_some_work (void) +{ + asm volatile ("" : : : "memory"); +} + +int +main () +{ + omp_set_dynamic (0); + omp_set_schedule (omp_sched_static, 1); + #pragma omp parallel num_threads (16) + { + int i, j; + do_some_work (); + #pragma omp barrier + if (omp_get_thread_num () == 1) + { + sleep (2); + #pragma omp cancellation point parallel + } + for (j = 3; j <= 16; j++) + #pragma omp for schedule (runtime) nowait + for (i = 0; i < j; i++) + do_some_work (); + if (omp_get_thread_num () == 0) + { + sleep (1); + #pragma omp cancel parallel + } + } + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/cancel-sections-1.c b/libgomp/testsuite/libgomp.c/cancel-sections-1.c new file mode 100644 index 0000000..e0cef0b --- /dev/null +++ b/libgomp/testsuite/libgomp.c/cancel-sections-1.c @@ -0,0 +1,38 @@ +/* { dg-do run } */ +/* { dg-set-target-env-var OMP_CANCELLATION "true" } */ + +#include <stdlib.h> +#include <omp.h> + +int +main () +{ + if (!omp_get_cancellation ()) + return 0; + #pragma omp parallel num_threads (32) + { + #pragma omp sections + { + { + #pragma omp cancel sections + abort (); + } + #pragma omp section + { + #pragma omp cancel sections + abort (); + } + #pragma omp section + { + #pragma omp cancel sections + abort (); + } + #pragma omp section + { + #pragma omp cancel sections + abort (); + } + } + } + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/cancel-taskgroup-1.c b/libgomp/testsuite/libgomp.c/cancel-taskgroup-1.c new file mode 100644 index 0000000..5a80811 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/cancel-taskgroup-1.c @@ -0,0 +1,70 @@ +/* { dg-do run } */ +/* { dg-set-target-env-var OMP_CANCELLATION "true" } */ + +#include <stdlib.h> +#include <omp.h> + +struct T { struct T *children[2]; int val; }; + +struct T * +search (struct T *tree, int val, int lvl) +{ + if (tree == NULL || tree->val == val) + return tree; + struct T *ret = NULL; + int i; + for (i = 0; i < 2; i++) + #pragma omp task shared(ret) if(lvl < 10) + { + struct T *r = search (tree->children[i], val, lvl + 1); + if (r) + { + #pragma omp atomic write + ret = r; + #pragma omp cancel taskgroup + } + } + #pragma omp taskwait + return ret; +} + +struct T * +searchp (struct T *tree, int val) +{ + struct T *ret; + #pragma omp parallel shared(ret) firstprivate (tree, val) + #pragma omp single + #pragma omp taskgroup + ret = search (tree, val, 0); + return ret; +} + +int +main () +{ + /* Must be power of two minus 1. */ + int size = 0x7ffff; + struct T *trees = (struct T *) malloc (size * sizeof (struct T)); + if (trees == NULL) + return 0; + int i, l = 1, b = 0; + for (i = 0; i < size; i++) + { + if (i == l) + { + b = l; + l = l * 2 + 1; + } + trees[i].val = i; + trees[i].children[0] = l == size ? NULL : &trees[l + (i - b) * 2]; + trees[i].children[1] = l == size ? NULL : &trees[l + (i - b) * 2 + 1]; + } + for (i = 0; i < 50; i++) + { + int v = random () & size; + if (searchp (&trees[0], v) != &trees[v]) + abort (); + } + free (trees); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/cancel-taskgroup-2.c b/libgomp/testsuite/libgomp.c/cancel-taskgroup-2.c new file mode 100644 index 0000000..c7b8bf7 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/cancel-taskgroup-2.c @@ -0,0 +1,37 @@ +/* { dg-do run } */ +/* { dg-set-target-env-var OMP_CANCELLATION "true" } */ + +#include <stdlib.h> +#include <unistd.h> +#include <omp.h> + +int +main () +{ + #pragma omp parallel + #pragma omp taskgroup + #pragma omp task + { + #pragma omp cancel taskgroup + if (omp_get_cancellation ()) + abort (); + } + #pragma omp parallel + { + #pragma omp barrier + #pragma omp single + #pragma omp taskgroup + { + int i; + for (i = 0; i < 50; i++) + #pragma omp task + { + #pragma omp cancellation point taskgroup + usleep (30); + #pragma omp cancel taskgroup if (i > 5) + } + } + usleep (10); + } + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/depend-1.c b/libgomp/testsuite/libgomp.c/depend-1.c new file mode 100644 index 0000000..2db1205 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/depend-1.c @@ -0,0 +1,215 @@ +#include <stdlib.h> + +void +dep (void) +{ + int x = 1; + #pragma omp parallel + #pragma omp single + { + #pragma omp task shared (x) depend(out: x) + x = 2; + #pragma omp task shared (x) depend(in: x) + if (x != 2) + abort (); + } +} + +void +dep2 (void) +{ + #pragma omp parallel + #pragma omp single + { + int x = 1; + #pragma omp task shared (x) depend(out: x) + x = 2; + #pragma omp task shared (x) depend(in: x) + if (x != 2) + abort (); + #pragma omp taskwait + } +} + +void +dep3 (void) +{ + #pragma omp parallel + { + int x = 1; + #pragma omp single + { + #pragma omp task shared (x) depend(out: x) + x = 2; + #pragma omp task shared (x) depend(in: x) + if (x != 2) + abort (); + } + } +} + +void +firstpriv (void) +{ + #pragma omp parallel + #pragma omp single + { + int x = 1; + #pragma omp task depend(out: x) + x = 2; + #pragma omp task depend(in: x) + if (x != 1) + abort (); + } +} + +void +antidep (void) +{ + int x = 1; + #pragma omp parallel + #pragma omp single + { + #pragma omp task shared(x) depend(in: x) + if (x != 1) + abort (); + #pragma omp task shared(x) depend(out: x) + x = 2; + } +} + +void +antidep2 (void) +{ + #pragma omp parallel + #pragma omp single + { + int x = 1; + #pragma omp taskgroup + { + #pragma omp task shared(x) depend(in: x) + if (x != 1) + abort (); + #pragma omp task shared(x) depend(out: x) + x = 2; + } + } +} + +void +antidep3 (void) +{ + #pragma omp parallel + { + int x = 1; + #pragma omp single + { + #pragma omp task shared(x) depend(in: x) + if (x != 1) + abort (); + #pragma omp task shared(x) depend(out: x) + x = 2; + } + } +} + + +void +outdep (void) +{ + #pragma omp parallel + #pragma omp single + { + int x = 0; + #pragma omp task shared(x) depend(out: x) + x = 1; + #pragma omp task shared(x) depend(out: x) + x = 2; + #pragma omp taskwait + if (x != 2) + abort (); + } +} + +void +concurrent (void) +{ + int x = 1; + #pragma omp parallel + #pragma omp single + { + #pragma omp task shared (x) depend(out: x) + x = 2; + #pragma omp task shared (x) depend(in: x) + if (x != 2) + abort (); + #pragma omp task shared (x) depend(in: x) + if (x != 2) + abort (); + #pragma omp task shared (x) depend(in: x) + if (x != 2) + abort (); + } +} + +void +concurrent2 (void) +{ + #pragma omp parallel + #pragma omp single + { + int x = 1; + #pragma omp task shared (x) depend(out: x) + x = 2; + #pragma omp task shared (x) depend(in: x) + if (x != 2) + abort (); + #pragma omp task shared (x) depend(in: x) + if (x != 2) + abort (); + #pragma omp task shared (x) depend(in: x) + if (x != 2) + abort (); + #pragma omp taskwait + } +} + +void +concurrent3 (void) +{ + #pragma omp parallel + { + int x = 1; + #pragma omp single + { + #pragma omp task shared (x) depend(out: x) + x = 2; + #pragma omp task shared (x) depend(in: x) + if (x != 2) + abort (); + #pragma omp task shared (x) depend(in: x) + if (x != 2) + abort (); + #pragma omp task shared (x) depend(in: x) + if (x != 2) + abort (); + } + } +} + +int +main () +{ + dep (); + dep2 (); + dep3 (); + firstpriv (); + antidep (); + antidep2 (); + antidep3 (); + outdep (); + concurrent (); + concurrent2 (); + concurrent3 (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/depend-2.c b/libgomp/testsuite/libgomp.c/depend-2.c new file mode 100644 index 0000000..2772309 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/depend-2.c @@ -0,0 +1,71 @@ +#include <stdlib.h> +#include <unistd.h> + +void +foo (int do_sleep) +{ + int a[64], i, *p = a + 4, x = 0; + asm volatile ("" : "+r" (p)); + for (i = 0; i < 64; i++) + a[i] = i + 8; + #pragma omp parallel private (i) + { + #pragma omp single nowait + { + for (i = 0; i < 8; i++) + { + #pragma omp task depend(out: a[i * 8 : 4]) + a[i * 8] += (i + 2) * 9; + #pragma omp task depend(out: p[i * 8 : 2]) + p[i * 8] += (i + 3) * 10; + #pragma omp task depend(out: x) + x = 1; + } + for (i = 0; i < 8; i++) + #pragma omp task depend(in: a[i * 8 : 4]) \ + depend(inout: a[i * 8 + 4 : 2]) \ + depend(in: a[0 : 4]) depend(in: x) + { + if (a[0] != 8 + 2 * 9 || x != 1) + abort (); + if (a[i * 8] != i * 8 + 8 + (i + 2) * 9) + abort (); + if (a[4 + i * 8] != 4 + i * 8 + 8 + (i + 3) * 10) + abort (); + p[i * 8] += a[i * 8]; + } + for (i = 0; i < 8; i++) + #pragma omp task depend(inout: a[i * 8 : 4]) \ + depend(in: p[i * 8 : 2]) \ + depend(in: p[0 : 2], x) + { + if (p[0] != 4 + 8 + 3 * 10 + 0 + 8 + 2 * 9 || x != 1) + abort (); + if (a[i * 8] != i * 8 + 8 + (i + 2) * 9) + abort (); + if (a[4 + i * 8] != (4 + i * 8 + 8 + (i + 3) * 10 + + i * 8 + 8 + (i + 2) * 9)) + abort (); + a[i * 8] += 2; + } + for (i = 0; i < 4; i++) + #pragma omp task depend(in: a[i * 16 : 4], a[i * 16 + 8 : 4], x) + { + if (a[i * 16] != i * 16 + 8 + (2 * i + 2) * 9 + 2 || x != 1) + abort (); + if (p[i * 16 + 4] != i * 16 + 8 + 8 + (2 * i + 1 + 2) * 9 + 2) + abort (); + } + } + if (do_sleep) + sleep (1); + } +} + +int +main () +{ + foo (1); + foo (0); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/depend-3.c b/libgomp/testsuite/libgomp.c/depend-3.c new file mode 100644 index 0000000..d565d6e --- /dev/null +++ b/libgomp/testsuite/libgomp.c/depend-3.c @@ -0,0 +1,51 @@ +#include <stdlib.h> +#include <unistd.h> + +int +main () +{ + #pragma omp parallel + #pragma omp single + { + int x = 1, y = 2; + #pragma omp taskgroup + { + #pragma omp task shared (x) depend(in: x) + { + usleep (10000); + if (x != 1) + abort (); + } + #pragma omp taskgroup + { + #pragma omp task shared (x) depend(in: x) + { + usleep (15000); + if (x != 1) + abort (); + } + #pragma omp task shared (y) depend(inout: y) + { + if (y != 2) + abort (); + y = 3; + } + #pragma omp taskgroup + { + #pragma omp task shared (x) depend(in: x) + { + usleep (13000); + if (x != 1) + abort (); + } + #pragma omp taskgroup + { + #pragma omp task shared (x) depend(out: x) + x = 2; + } + } + } + } + } + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/depend-4.c b/libgomp/testsuite/libgomp.c/depend-4.c new file mode 100644 index 0000000..a4395ea --- /dev/null +++ b/libgomp/testsuite/libgomp.c/depend-4.c @@ -0,0 +1,56 @@ +#include <stdlib.h> +#include <unistd.h> + +int +main () +{ + #pragma omp parallel + #pragma omp single + { + int x = 1, y = 2, z = 3; + #pragma omp taskgroup + { + #pragma omp task shared (x, y, z) depend(inout: x, y) \ + depend (in: z) if (x > 10) + { + if (x != 1 || y != 2 || z != 3) + abort (); + x = 4; + y = 5; + } + /* The above task has depend clauses, but no dependencies + on earlier tasks, and is if (0), so must be scheduled + immediately. */ + if (x != 4 || y != 5) + abort (); + } + #pragma omp taskgroup + { + #pragma omp task shared (x, y) depend(in: x, y) + { + usleep (10000); + if (x != 4 || y != 5 || z != 3) + abort (); + } + #pragma omp task shared (x, y) depend(in: x, y) + { + usleep (10000); + if (x != 4 || y != 5 || z != 3) + abort (); + } + #pragma omp task shared (x, y, z) depend(inout: x, y) \ + depend (in: z) if (x > 10) + { + if (x != 4 || y != 5 || z != 3) + abort (); + x = 6; + y = 7; + } + /* The above task has depend clauses, and may have dependencies + on earlier tasks, while it is if (0), it can be deferred. */ + } + if (x != 6 || y != 7) + abort (); + } + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/for-1.c b/libgomp/testsuite/libgomp.c/for-1.c new file mode 100644 index 0000000..e702453 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/for-1.c @@ -0,0 +1,35 @@ +/* { dg-options "-std=gnu99 -fopenmp" } */ + +extern void abort (void); + +#define M(x, y, z) O(x, y, z) +#define O(x, y, z) x ## _ ## y ## _ ## z + +#define F parallel for +#define G pf +#include "for-1.h" +#undef F +#undef G + +#define F for +#define G f +#include "for-1.h" +#undef F +#undef G + +int +main () +{ + if (test_pf_static () + || test_pf_static32 () + || test_pf_auto () + || test_pf_guided32 () + || test_pf_runtime () + || test_f_static () + || test_f_static32 () + || test_f_auto () + || test_f_guided32 () + || test_f_runtime ()) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/for-1.h b/libgomp/testsuite/libgomp.c/for-1.h new file mode 100644 index 0000000..fa82c5b --- /dev/null +++ b/libgomp/testsuite/libgomp.c/for-1.h @@ -0,0 +1,25 @@ +#define S +#define N(x) M(x, G, static) +#include "for-2.h" +#undef S +#undef N +#define S schedule(static, 32) +#define N(x) M(x, G, static32) +#include "for-2.h" +#undef S +#undef N +#define S schedule(auto) +#define N(x) M(x, G, auto) +#include "for-2.h" +#undef S +#undef N +#define S schedule(guided, 32) +#define N(x) M(x, G, guided32) +#include "for-2.h" +#undef S +#undef N +#define S schedule(runtime) +#define N(x) M(x, G, runtime) +#include "for-2.h" +#undef S +#undef N diff --git a/libgomp/testsuite/libgomp.c/for-2.c b/libgomp/testsuite/libgomp.c/for-2.c new file mode 100644 index 0000000..f5a01ab --- /dev/null +++ b/libgomp/testsuite/libgomp.c/for-2.c @@ -0,0 +1,46 @@ +/* { dg-options "-std=gnu99 -fopenmp" } */ + +extern void abort (void); + +#define M(x, y, z) O(x, y, z) +#define O(x, y, z) x ## _ ## y ## _ ## z + +#define F simd +#define G simd +#define S +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F parallel for simd +#define G pf_simd +#include "for-1.h" +#undef F +#undef G + +#define F for simd +#define G f_simd +#include "for-1.h" +#undef F +#undef G + +int +main () +{ + if (test_simd_normal () + || test_pf_simd_static () + || test_pf_simd_static32 () + || test_pf_simd_auto () + || test_pf_simd_guided32 () + || test_pf_simd_runtime () + || test_f_simd_static () + || test_f_simd_static32 () + || test_f_simd_auto () + || test_f_simd_guided32 () + || test_f_simd_runtime ()) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/for-2.h b/libgomp/testsuite/libgomp.c/for-2.h new file mode 100644 index 0000000..57c385e --- /dev/null +++ b/libgomp/testsuite/libgomp.c/for-2.h @@ -0,0 +1,269 @@ +#ifndef VARS +#define VARS +int a[1500]; +float b[10][15][10]; +__attribute__((noreturn)) void +noreturn (void) +{ + for (;;); +} +#endif + +__attribute__((noinline, noclone)) void +N(f0) (void) +{ + int i; +#pragma omp F S + for (i = 0; i < 1500; i++) + a[i] += 2; +} + +__attribute__((noinline, noclone)) void +N(f1) (void) +{ +#pragma omp F S + for (unsigned int i = __INT_MAX__; i < 3000U + __INT_MAX__; i += 2) + a[(i - __INT_MAX__) >> 1] -= 2; +} + +__attribute__((noinline, noclone)) void +N(f2) (void) +{ + unsigned long long i; +#pragma omp F S + for (i = __LONG_LONG_MAX__ + 4500ULL - 27; + i > __LONG_LONG_MAX__ - 27ULL; i -= 3) + a[(i + 26LL - __LONG_LONG_MAX__) / 3] -= 4; +} + +__attribute__((noinline, noclone)) void +N(f3) (long long n1, long long n2, long long s3) +{ +#pragma omp F S + for (long long i = n1 + 23; i > n2 - 25; i -= s3) + a[i + 48] += 7; +} + +__attribute__((noinline, noclone)) void +N(f4) (void) +{ + unsigned int i; +#pragma omp F S + for (i = 30; i < 20; i += 2) + a[i] += 10; +} + +__attribute__((noinline, noclone)) void +N(f5) (int n11, int n12, int n21, int n22, int n31, int n32, + int s1, int s2, int s3) +{ + int v1, v2, v3; +#pragma omp F S collapse(3) + for (v1 = n11; v1 < n12; v1 += s1) + for (v2 = n21; v2 < n22; v2 += s2) + for (v3 = n31; v3 < n32; v3 += s3) + b[v1][v2][v3] += 2.5; +} + +__attribute__((noinline, noclone)) void +N(f6) (int n11, int n12, int n21, int n22, long long n31, long long n32, + int s1, int s2, long long int s3) +{ + int v1, v2; + long long v3; +#pragma omp F S collapse(3) + for (v1 = n11; v1 > n12; v1 += s1) + for (v2 = n21; v2 > n22; v2 += s2) + for (v3 = n31; v3 > n32; v3 += s3) + b[v1][v2 / 2][v3] -= 4.5; +} + +__attribute__((noinline, noclone)) void +N(f7) (void) +{ + unsigned int v1, v3; + unsigned long long v2; +#pragma omp F S collapse(3) + for (v1 = 0; v1 < 20; v1 += 2) + for (v2 = __LONG_LONG_MAX__ + 16ULL; + v2 > __LONG_LONG_MAX__ - 29ULL; v2 -= 3) + for (v3 = 10; v3 > 0; v3--) + b[v1 >> 1][(v2 - __LONG_LONG_MAX__ + 64) / 3 - 12][v3 - 1] += 5.5; +} + +__attribute__((noinline, noclone)) void +N(f8) (void) +{ + long long v1, v2, v3; +#pragma omp F S collapse(3) + for (v1 = 0; v1 < 20; v1 += 2) + for (v2 = 30; v2 < 20; v2++) + for (v3 = 10; v3 < 0; v3--) + b[v1][v2][v3] += 5.5; +} + +__attribute__((noinline, noclone)) void +N(f9) (void) +{ + int i; +#pragma omp F S + for (i = 20; i < 10; i++) + { + a[i] += 2; + noreturn (); + a[i] -= 4; + } +} + +__attribute__((noinline, noclone)) void +N(f10) (void) +{ + int i; +#pragma omp F S collapse(3) + for (i = 0; i < 10; i++) + for (int j = 10; j < 8; j++) + for (long k = -10; k < 10; k++) + { + b[i][j][k] += 4; + noreturn (); + b[i][j][k] -= 8; + } +} + +__attribute__((noinline, noclone)) void +N(f11) (int n) +{ + int i; +#pragma omp F S + for (i = 20; i < n; i++) + { + a[i] += 8; + noreturn (); + a[i] -= 16; + } +} + +__attribute__((noinline, noclone)) void +N(f12) (int n) +{ + int i; +#pragma omp F S collapse(3) + for (i = 0; i < 10; i++) + for (int j = n; j < 8; j++) + for (long k = -10; k < 10; k++) + { + b[i][j][k] += 16; + noreturn (); + b[i][j][k] -= 32; + } +} + +__attribute__((noinline, noclone)) void +N(f13) (void) +{ + int *i; +#pragma omp F S + for (i = a; i < &a[1500]; i++) + i[0] += 2; +} + +__attribute__((noinline, noclone)) void +N(f14) (void) +{ + float *i; +#pragma omp F S collapse(3) + for (i = &b[0][0][0]; i < &b[0][0][10]; i++) + for (float *j = &b[0][15][0]; j > &b[0][0][0]; j -= 10) + for (float *k = &b[0][0][10]; k > &b[0][0][0]; --k) + b[i - &b[0][0][0]][(j - &b[0][0][0]) / 10 - 1][(k - &b[0][0][0]) - 1] + -= 3.5; +} + +__attribute__((noinline, noclone)) int +N(test) (void) +{ + int i, j, k; + for (i = 0; i < 1500; i++) + a[i] = i - 25; + N(f0) (); + for (i = 0; i < 1500; i++) + if (a[i] != i - 23) + return 1; + N(f1) (); + for (i = 0; i < 1500; i++) + if (a[i] != i - 25) + return 1; + N(f2) (); + for (i = 0; i < 1500; i++) + if (a[i] != i - 29) + return 1; + N(f3) (1500LL - 1 - 23 - 48, -1LL + 25 - 48, 1LL); + for (i = 0; i < 1500; i++) + if (a[i] != i - 22) + return 1; + N(f3) (1500LL - 1 - 23 - 48, 1500LL - 1, 7LL); + for (i = 0; i < 1500; i++) + if (a[i] != i - 22) + return 1; + N(f4) (); + for (i = 0; i < 1500; i++) + if (a[i] != i - 22) + return 1; + for (i = 0; i < 10; i++) + for (j = 0; j < 15; j++) + for (k = 0; k < 10; k++) + b[i][j][k] = i - 2.5 + 1.5 * j - 1.5 * k; + N(f5) (0, 10, 0, 15, 0, 10, 1, 1, 1); + for (i = 0; i < 10; i++) + for (j = 0; j < 15; j++) + for (k = 0; k < 10; k++) + if (b[i][j][k] != i + 1.5 * j - 1.5 * k) + return 1; + N(f5) (0, 10, 30, 15, 0, 10, 4, 5, 6); + for (i = 0; i < 10; i++) + for (j = 0; j < 15; j++) + for (k = 0; k < 10; k++) + if (b[i][j][k] != i + 1.5 * j - 1.5 * k) + return 1; + N(f6) (9, -1, 29, 0, 9, -1, -1, -2, -1); + for (i = 0; i < 10; i++) + for (j = 0; j < 15; j++) + for (k = 0; k < 10; k++) + if (b[i][j][k] != i - 4.5 + 1.5 * j - 1.5 * k) + return 1; + N(f7) (); + for (i = 0; i < 10; i++) + for (j = 0; j < 15; j++) + for (k = 0; k < 10; k++) + if (b[i][j][k] != i + 1.0 + 1.5 * j - 1.5 * k) + return 1; + N(f8) (); + for (i = 0; i < 10; i++) + for (j = 0; j < 15; j++) + for (k = 0; k < 10; k++) + if (b[i][j][k] != i + 1.0 + 1.5 * j - 1.5 * k) + return 1; + N(f9) (); + N(f10) (); + N(f11) (10); + N(f12) (12); + for (i = 0; i < 1500; i++) + if (a[i] != i - 22) + return 1; + for (i = 0; i < 10; i++) + for (j = 0; j < 15; j++) + for (k = 0; k < 10; k++) + if (b[i][j][k] != i + 1.0 + 1.5 * j - 1.5 * k) + return 1; + N(f13) (); + N(f14) (); + for (i = 0; i < 1500; i++) + if (a[i] != i - 20) + return 1; + for (i = 0; i < 10; i++) + for (j = 0; j < 15; j++) + for (k = 0; k < 10; k++) + if (b[i][j][k] != i - 2.5 + 1.5 * j - 1.5 * k) + return 1; + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/for-3.c b/libgomp/testsuite/libgomp.c/for-3.c new file mode 100644 index 0000000..06cbf4f --- /dev/null +++ b/libgomp/testsuite/libgomp.c/for-3.c @@ -0,0 +1,110 @@ +/* { dg-options "-std=gnu99 -fopenmp" } */ + +extern void abort (); + +#define M(x, y, z) O(x, y, z) +#define O(x, y, z) x ## _ ## y ## _ ## z + +#pragma omp declare target + +#define F distribute +#define G d +#define S +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F distribute +#define G d_ds128 +#define S dist_schedule(static, 128) +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F distribute simd +#define G ds +#define S +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F distribute simd +#define G ds_ds128 +#define S dist_schedule(static, 128) +#define N(x) M(x, G, normal) +#include "for-2.h" +#undef S +#undef N +#undef F +#undef G + +#define F distribute parallel for +#define G dpf +#include "for-1.h" +#undef F +#undef G + +#define F distribute parallel for dist_schedule(static, 128) +#define G dpf_ds128 +#include "for-1.h" +#undef F +#undef G + +#define F distribute parallel for simd +#define G dpfs +#include "for-1.h" +#undef F +#undef G + +#define F distribute parallel for simd dist_schedule(static, 128) +#define G dpfs_ds128 +#include "for-1.h" +#undef F +#undef G + +#pragma omp end declare target + +int +main () +{ + int err = 0; + #pragma omp target teams reduction(|:err) + { + err |= test_d_normal (); + err |= test_d_ds128_normal (); + err |= test_ds_normal (); + err |= test_ds_ds128_normal (); + err |= test_dpf_static (); + err |= test_dpf_static32 (); + err |= test_dpf_auto (); + err |= test_dpf_guided32 (); + err |= test_dpf_runtime (); + err |= test_dpf_ds128_static (); + err |= test_dpf_ds128_static32 (); + err |= test_dpf_ds128_auto (); + err |= test_dpf_ds128_guided32 (); + err |= test_dpf_ds128_runtime (); + err |= test_dpfs_static (); + err |= test_dpfs_static32 (); + err |= test_dpfs_auto (); + err |= test_dpfs_guided32 (); + err |= test_dpfs_runtime (); + err |= test_dpfs_ds128_static (); + err |= test_dpfs_ds128_static32 (); + err |= test_dpfs_ds128_auto (); + err |= test_dpfs_ds128_guided32 (); + err |= test_dpfs_ds128_runtime (); + } + if (err) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/pr58392.c b/libgomp/testsuite/libgomp.c/pr58392.c new file mode 100644 index 0000000..6ca97ad --- /dev/null +++ b/libgomp/testsuite/libgomp.c/pr58392.c @@ -0,0 +1,58 @@ +/* PR tree-optimization/58392 */ +/* { dg-do run } */ +/* { dg-options "-O2" } */ +/* { dg-additional-options "-msse2" { target sse2_runtime } } */ +/* { dg-additional-options "-mavx" { target avx_runtime } } */ + +extern void abort (void); +int d[32 * 32]; + +__attribute__((noinline, noclone)) int +foo (int a, int b) +{ + int j, c = 0; + #pragma omp parallel for reduction(+: c) + for (j = 0; j < a; j += 32) + { + int l; + #pragma omp simd reduction(+: c) + for (l = 0; l < b; ++l) + c += d[j + l]; + } + return c; +} + +__attribute__((noinline, noclone)) int +bar (int a) +{ + int j, c = 0; + #pragma omp parallel for simd reduction(+: c) + for (j = 0; j < a; ++j) + c += d[j]; + return c; +} + +__attribute__((noinline)) static int +baz (int a) +{ + int j, c = 0; + #pragma omp simd reduction(+: c) + for (j = 0; j < a; ++j) + c += d[j]; + return c; +} + +int +main () +{ + int i; + for (i = 0; i < 32 * 32; i++) + d[i] = (i & 31); + if (foo (32 * 32, 32) != (31 * 32 / 2) * 32) + abort (); + if (bar (32 * 32) != (31 * 32 / 2) * 32) + abort (); + if (baz (32 * 32) != (31 * 32 / 2) * 32) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/simd-1.c b/libgomp/testsuite/libgomp.c/simd-1.c new file mode 100644 index 0000000..352b3b7 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/simd-1.c @@ -0,0 +1,57 @@ +/* { dg-do run } */ +/* { dg-options "-O2" } */ +/* { dg-additional-options "-msse2" { target sse2_runtime } } */ +/* { dg-additional-options "-mavx" { target avx_runtime } } */ + +extern void abort (); +int a[1024] __attribute__((aligned (32))) = { 1 }; +int b[1024] __attribute__((aligned (32))) = { 1 }; +int k, m; +struct U { int u; }; +struct V { int v; }; + +__attribute__((noinline, noclone)) int +foo (int *p) +{ + int i, s = 0; + struct U u; + struct V v; + #pragma omp simd aligned(a, p : 32) linear(k: m + 1) \ + reduction(+:s) lastprivate(u, v) + for (i = 0; i < 1024; i++) + { + a[i] *= p[i]; + u.u = p[i] + k; + k += m + 1; + v.v = p[i] + k; + s += p[i] + k; + } + if (u.u != 36 + 4 + 3 * 1023 || v.v != 36 + 4 + 3 * 1024) + abort (); + return s; +} + +int +main () +{ +#if __SIZEOF_INT__ >= 4 + int i; + k = 4; + m = 2; + for (i = 0; i < 1024; i++) + { + a[i] = i - 512; + b[i] = (i - 51) % 39; + } + int s = foo (b); + for (i = 0; i < 1024; i++) + { + if (b[i] != (i - 51) % 39 + || a[i] != (i - 512) * b[i]) + abort (); + } + if (k != 4 + 3 * 1024 || s != 1596127) + abort (); +#endif + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/simd-2.c b/libgomp/testsuite/libgomp.c/simd-2.c new file mode 100644 index 0000000..b485fcb --- /dev/null +++ b/libgomp/testsuite/libgomp.c/simd-2.c @@ -0,0 +1,36 @@ +/* { dg-do run } */ +/* { dg-options "-O2" } */ +/* { dg-additional-options "-msse2" { target sse2_runtime } } */ +/* { dg-additional-options "-mavx" { target avx_runtime } } */ + +extern void abort (); +__UINTPTR_TYPE__ arr[1027]; + +__attribute__((noinline, noclone)) void +foo () +{ + int i, v; + #pragma omp simd private (v) safelen(16) + for (i = 0; i < 1027; i++) + arr[i] = (__UINTPTR_TYPE__) &v; +} + +int +main () +{ + int i, j, cnt = 0; + __UINTPTR_TYPE__ arr2[16]; + foo (); + for (i = 0; i < 1027; i++) + { + for (j = 0; j < cnt; j++) + if (arr[i] == arr2[j]) + break; + if (j != cnt) + continue; + if (cnt == 16) + abort (); + arr2[cnt++] = arr[i]; + } + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/simd-3.c b/libgomp/testsuite/libgomp.c/simd-3.c new file mode 100644 index 0000000..34a3883 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/simd-3.c @@ -0,0 +1,131 @@ +/* { dg-do run } */ +/* { dg-options "-O2" } */ +/* { dg-additional-options "-msse2" { target sse2_runtime } } */ +/* { dg-additional-options "-mavx" { target avx_runtime } } */ + +extern void abort (); +int a[1024] __attribute__((aligned (32))) = { 1 }; +int b[1024] __attribute__((aligned (32))) = { 1 }; +unsigned char c[1024] __attribute__((aligned (32))) = { 1 }; +int k, m; +__UINTPTR_TYPE__ u, u2, u3; + +__attribute__((noinline, noclone)) int +foo (int *p) +{ + int i, s = 0, s2 = 0, t, t2; + #pragma omp simd aligned(a, b, p : 32) linear(k: m + 1) reduction(+:s) \ + lastprivate (t2) + for (i = 0; i < 512; i++) + { + a[i] *= p[i]; + t2 = k + p[i]; + k += m + 1; + s += p[i] + k; + c[i]++; + } + #pragma omp simd aligned(a, b, p : 32) linear(k: m + 1) reduction(+:s2) \ + lastprivate (t, u, u2, u3) + for (i = 512; i < 1024; i++) + { + a[i] *= p[i]; + k += m + 1; + t = k + p[i]; + u = (__UINTPTR_TYPE__) &k; + u2 = (__UINTPTR_TYPE__) &s2; + u3 = (__UINTPTR_TYPE__) &t; + s2 += t; + c[i]++; + } + return s + s2 + t + t2; +} + +__attribute__((noinline, noclone)) long int +bar (int *p, long int n, long int o) +{ + long int i, s = 0, s2 = 0, t, t2; + #pragma omp simd aligned(a, b, p : 32) linear(k: m + 1) reduction(+:s) \ + lastprivate (t2) + for (i = 0; i < n; i++) + { + a[i] *= p[i]; + t2 = k + p[i]; + k += m + 1; + s += p[i] + k; + c[i]++; + } + #pragma omp simd aligned(a, b, p : 32) linear(k: m + 1) reduction(+:s2) \ + lastprivate (t, u, u2, u3) + for (i = n; i < o; i++) + { + a[i] *= p[i]; + k += m + 1; + t = k + p[i]; + u = (__UINTPTR_TYPE__) &k; + u2 = (__UINTPTR_TYPE__) &s2; + u3 = (__UINTPTR_TYPE__) &t; + s2 += t; + c[i]++; + } + return s + s2 + t + t2; +} + +int +main () +{ +#if __SIZEOF_INT__ >= 4 + int i; + k = 4; + m = 2; + for (i = 0; i < 1024; i++) + { + a[i] = i - 512; + b[i] = (i - 51) % 39; + c[i] = (unsigned char) i; + } + int s = foo (b); + for (i = 0; i < 1024; i++) + { + if (b[i] != (i - 51) % 39 + || a[i] != (i - 512) * b[i] + || c[i] != (unsigned char) (i + 1)) + abort (); + a[i] = i - 512; + } + if (k != 4 + 3 * 1024 + || s != 1596127 + (4 + 3 * 511 + b[511]) + (4 + 3 * 1024 + b[1023])) + abort (); + k = 4; + s = bar (b, 512, 1024); + for (i = 0; i < 1024; i++) + { + if (b[i] != (i - 51) % 39 + || a[i] != (i - 512) * b[i] + || c[i] != (unsigned char) (i + 2)) + abort (); + a[i] = i - 512; + } + if (k != 4 + 3 * 1024 + || s != 1596127 + (4 + 3 * 511 + b[511]) + (4 + 3 * 1024 + b[1023])) + abort (); + k = 4; + s = bar (b, 511, 1021); + for (i = 0; i < 1021; i++) + { + if (b[i] != (i - 51) % 39 + || a[i] != (i - 512) * b[i] + || c[i] != (unsigned char) (i + 3)) + abort (); + a[i] = i - 512; + } + for (i = 1021; i < 1024; i++) + if (b[i] != (i - 51) % 39 + || a[i] != i - 512 + || c[i] != (unsigned char) (i + 2)) + abort (); + if (k != 4 + 3 * 1021 + || s != 1586803 + (4 + 3 * 510 + b[510]) + (4 + 3 * 1021 + b[1020])) + abort (); +#endif + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/simd-4.c b/libgomp/testsuite/libgomp.c/simd-4.c new file mode 100644 index 0000000..fd87c7e --- /dev/null +++ b/libgomp/testsuite/libgomp.c/simd-4.c @@ -0,0 +1,42 @@ +/* { dg-do run } */ +/* { dg-options "-O2" } */ +/* { dg-additional-options "-msse2" { target sse2_runtime } } */ +/* { dg-additional-options "-mavx" { target avx_runtime } } */ + +extern void abort (); +int a[1024] __attribute__((aligned (32))) = { 1 }; +struct S { int s; }; +#pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s) +#pragma omp declare reduction (foo:struct S:omp_out.s += omp_in.s) +#pragma omp declare reduction (foo:int:omp_out += omp_in) + +__attribute__((noinline, noclone)) int +foo (void) +{ + int i, u = 0; + struct S s, t; + s.s = 0; t.s = 0; + #pragma omp simd aligned(a : 32) reduction(+:s) reduction(foo:t, u) + for (i = 0; i < 1024; i++) + { + int x = a[i]; + s.s += x; + t.s += x; + u += x; + } + if (t.s != s.s || u != s.s) + abort (); + return s.s; +} + +int +main () +{ + int i; + for (i = 0; i < 1024; i++) + a[i] = (i & 31) + (i / 128); + int s = foo (); + if (s != 19456) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/simd-5.c b/libgomp/testsuite/libgomp.c/simd-5.c new file mode 100644 index 0000000..0b6d41e --- /dev/null +++ b/libgomp/testsuite/libgomp.c/simd-5.c @@ -0,0 +1,44 @@ +/* { dg-do run } */ +/* { dg-options "-O2" } */ +/* { dg-additional-options "-msse2" { target sse2_runtime } } */ +/* { dg-additional-options "-mavx" { target avx_runtime } } */ + +extern void abort (); +int a[1024] __attribute__((aligned (32))) = { 1 }; +struct S { int s; }; +#pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s) +#pragma omp declare reduction (foo:struct S:omp_out.s += omp_in.s) +#pragma omp declare reduction (foo:int:omp_out += omp_in) + +__attribute__((noinline, noclone)) int +foo (void) +{ + int i, u = 0, q = 0; + struct S s, t; + s.s = 0; t.s = 0; + #pragma omp simd aligned(a : 32) reduction(+:s, q) reduction(foo:t, u) \ + safelen(1) + for (i = 0; i < 1024; i++) + { + int x = a[i]; + s.s += x; + t.s += x; + u += x; + q++; + } + if (t.s != s.s || u != s.s || q != 1024) + abort (); + return s.s; +} + +int +main () +{ + int i; + for (i = 0; i < 1024; i++) + a[i] = (i & 31) + (i / 128); + int s = foo (); + if (s != 19456) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/simd-6.c b/libgomp/testsuite/libgomp.c/simd-6.c new file mode 100644 index 0000000..896f347 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/simd-6.c @@ -0,0 +1,44 @@ +/* PR libgomp/58482 */ +/* { dg-do run } */ +/* { dg-options "-O2" } */ +/* { dg-additional-options "-msse2" { target sse2_runtime } } */ +/* { dg-additional-options "-mavx" { target avx_runtime } } */ + +extern void abort (); +int a[1024] __attribute__((aligned (32))) = { 1 }; +struct S { int s; }; +#pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s) +#pragma omp declare reduction (foo:struct S:omp_out.s += omp_in.s) +#pragma omp declare reduction (foo:int:omp_out += omp_in) + +__attribute__((noinline, noclone)) int +foo (void) +{ + int i, u = 0; + struct S s, t; + s.s = 0; t.s = 0; + #pragma omp parallel for simd aligned(a : 32) reduction(+:s) \ + reduction(foo:t, u) + for (i = 0; i < 1024; i++) + { + int x = a[i]; + s.s += x; + t.s += x; + u += x; + } + if (t.s != s.s || u != s.s) + abort (); + return s.s; +} + +int +main () +{ + int i; + for (i = 0; i < 1024; i++) + a[i] = (i & 31) + (i / 128); + int s = foo (); + if (s != 19456) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/target-1.c b/libgomp/testsuite/libgomp.c/target-1.c new file mode 100644 index 0000000..f734d3c2 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/target-1.c @@ -0,0 +1,90 @@ +extern +#ifdef __cplusplus +"C" +#endif +void abort (void); + +void +fn1 (double *x, double *y, int z) +{ + int i; + for (i = 0; i < z; i++) + { + x[i] = i & 31; + y[i] = (i & 63) - 30; + } +} + +#pragma omp declare target +int tgtv = 6; +int +tgt (void) +{ + #pragma omp atomic update + tgtv++; + return 0; +} +#pragma omp end declare target + +double +fn2 (int x, int y, int z) +{ + double b[1024], c[1024], s = 0; + int i, j; + fn1 (b, c, x); + #pragma omp target data map(to: b) + { + #pragma omp target map(tofrom: c) + #pragma omp teams num_teams(y) thread_limit(z) reduction(+:s) firstprivate(x) + #pragma omp distribute dist_schedule(static, 4) collapse(1) + for (j=0; j < x; j += y) + #pragma omp parallel for reduction(+:s) + for (i = j; i < j + y; i++) + tgt (), s += b[i] * c[i]; + #pragma omp target update from(b, tgtv) + } + return s; +} + +double +fn3 (int x) +{ + double b[1024], c[1024], s = 0; + int i; + fn1 (b, c, x); + #pragma omp target map(to: b, c) + #pragma omp parallel for reduction(+:s) + for (i = 0; i < x; i++) + tgt (), s += b[i] * c[i]; + return s; +} + +double +fn4 (int x, double *p) +{ + double b[1024], c[1024], d[1024], s = 0; + int i; + fn1 (b, c, x); + fn1 (d + x, p + x, x); + #pragma omp target map(to: b, c[0:x], d[x:x]) map(to:p[x:64 + (x & 31)]) + #pragma omp parallel for reduction(+:s) + for (i = 0; i < x; i++) + s += b[i] * c[i] + d[x + i] + p[x + i]; + return s; +} + +int +main () +{ + double a = fn2 (128, 4, 6); + int b = tgtv; + double c = fn3 (61); + #pragma omp target update from(tgtv) + int d = tgtv; + double e[1024]; + double f = fn4 (64, e); + if (a != 13888.0 || b != 6 + 128 || c != 4062.0 || d != 6 + 128 + 61 + || f != 8032.0) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/target-2.c b/libgomp/testsuite/libgomp.c/target-2.c new file mode 100644 index 0000000..ada8dad --- /dev/null +++ b/libgomp/testsuite/libgomp.c/target-2.c @@ -0,0 +1,88 @@ +extern +#ifdef __cplusplus +"C" +#endif +void abort (void); + +void +fn1 (double *x, double *y, int z) +{ + int i; + for (i = 0; i < z; i++) + { + x[i] = i & 31; + y[i] = (i & 63) - 30; + } +} + +double +fn2 (int x) +{ + double s = 0; + double b[3 * x], c[3 * x], d[3 * x], e[3 * x]; + int i; + fn1 (b, c, x); + fn1 (e, d + x, x); + #pragma omp target map(to: b, c[:x], d[x:x], e) + #pragma omp parallel for reduction(+:s) + for (i = 0; i < x; i++) + s += b[i] * c[i] + d[x + i] + sizeof (b) - sizeof (c); + return s; +} + +double +fn3 (int x) +{ + double s = 0; + double b[3 * x], c[3 * x], d[3 * x], e[3 * x]; + int i; + fn1 (b, c, x); + fn1 (e, d, x); + #pragma omp target + #pragma omp parallel for reduction(+:s) + for (i = 0; i < x; i++) + s += b[i] * c[i] + d[i]; + return s; +} + +double +fn4 (int x) +{ + double s = 0; + double b[3 * x], c[3 * x], d[3 * x], e[3 * x]; + int i; + fn1 (b, c, x); + fn1 (e, d + x, x); + #pragma omp target data map(from: b, c[:x], d[x:x], e) + { + #pragma omp target update to(b, c[:x], d[x:x], e) + #pragma omp target map(c[:x], d[x:x]) + #pragma omp parallel for reduction(+:s) + for (i = 0; i < x; i++) + { + s += b[i] * c[i] + d[x + i] + sizeof (b) - sizeof (c); + b[i] = i + 0.5; + c[i] = 0.5 - i; + d[x + i] = 0.5 * i; + } + } + for (i = 0; i < x; i++) + if (b[i] != i + 0.5 || c[i] != 0.5 - i || d[x + i] != 0.5 * i) + abort (); + return s; +} + +int +main () +{ + double a = fn2 (128); + if (a != 14080.0) + abort (); + double b = fn3 (128); + if (a != b) + abort (); + double c = fn4 (256); + if (c != 28160.0) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/target-3.c b/libgomp/testsuite/libgomp.c/target-3.c new file mode 100644 index 0000000..7002cf2 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/target-3.c @@ -0,0 +1,17 @@ +#include <omp.h> +#include <stdlib.h> + +int +main () +{ + if (omp_get_level ()) + abort (); + #pragma omp target if (0) + if (omp_get_level ()) + abort (); + #pragma omp target if (0) + #pragma omp teams + if (omp_get_level ()) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/target-4.c b/libgomp/testsuite/libgomp.c/target-4.c new file mode 100644 index 0000000..26e935b --- /dev/null +++ b/libgomp/testsuite/libgomp.c/target-4.c @@ -0,0 +1,14 @@ +#include <omp.h> +#include <stdlib.h> + +int +main () +{ + omp_set_dynamic (0); + #pragma omp parallel num_threads (4) + #pragma omp target if (0) + #pragma omp single + if (omp_get_num_threads () != 1) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/target-5.c b/libgomp/testsuite/libgomp.c/target-5.c new file mode 100644 index 0000000..4367443 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/target-5.c @@ -0,0 +1,83 @@ +#include <omp.h> +#include <stdlib.h> + +int +main () +{ + int d_o = omp_get_dynamic (); + int n_o = omp_get_nested (); + omp_sched_t s_o; + int c_o; + omp_get_schedule (&s_o, &c_o); + int m_o = omp_get_max_threads (); + omp_set_dynamic (1); + omp_set_nested (1); + omp_set_schedule (omp_sched_static, 2); + omp_set_num_threads (4); + int d = omp_get_dynamic (); + int n = omp_get_nested (); + omp_sched_t s; + int c; + omp_get_schedule (&s, &c); + int m = omp_get_max_threads (); + if (!omp_is_initial_device ()) + abort (); + #pragma omp target if (0) + { + omp_sched_t s_c; + int c_c; + omp_get_schedule (&s_c, &c_c); + if (d_o != omp_get_dynamic () + || n_o != omp_get_nested () + || s_o != s_c + || c_o != c_c + || m_o != omp_get_max_threads ()) + abort (); + omp_set_dynamic (0); + omp_set_nested (0); + omp_set_schedule (omp_sched_dynamic, 4); + omp_set_num_threads (2); + if (!omp_is_initial_device ()) + abort (); + } + if (!omp_is_initial_device ()) + abort (); + omp_sched_t s_c; + int c_c; + omp_get_schedule (&s_c, &c_c); + if (d != omp_get_dynamic () + || n != omp_get_nested () + || s != s_c + || c != c_c + || m != omp_get_max_threads ()) + abort (); + #pragma omp target if (0) + #pragma omp teams + { + omp_sched_t s_c; + int c_c; + omp_get_schedule (&s_c, &c_c); + if (d_o != omp_get_dynamic () + || n_o != omp_get_nested () + || s_o != s_c + || c_o != c_c + || m_o != omp_get_max_threads ()) + abort (); + omp_set_dynamic (0); + omp_set_nested (0); + omp_set_schedule (omp_sched_dynamic, 4); + omp_set_num_threads (2); + if (!omp_is_initial_device ()) + abort (); + } + if (!omp_is_initial_device ()) + abort (); + omp_get_schedule (&s_c, &c_c); + if (d != omp_get_dynamic () + || n != omp_get_nested () + || s != s_c + || c != c_c + || m != omp_get_max_threads ()) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/target-6.c b/libgomp/testsuite/libgomp.c/target-6.c new file mode 100644 index 0000000..ea35aa4 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/target-6.c @@ -0,0 +1,68 @@ +#include <omp.h> +#include <stdlib.h> + +int +main () +{ + omp_set_dynamic (0); + omp_set_nested (1); + if (omp_in_parallel ()) + abort (); + #pragma omp parallel num_threads (3) + if (omp_get_thread_num () == 2) + { + if (!omp_in_parallel ()) + abort (); + #pragma omp parallel num_threads (3) + if (omp_get_thread_num () == 1) + { + if (!omp_in_parallel () + || omp_get_level () != 2 + || omp_get_ancestor_thread_num (0) != 0 + || omp_get_ancestor_thread_num (1) != 2 + || omp_get_ancestor_thread_num (2) != 1 + || omp_get_ancestor_thread_num (3) != -1) + abort (); + #pragma omp target if (0) + { + if (omp_in_parallel () + || omp_get_level () != 0 + || omp_get_ancestor_thread_num (0) != 0 + || omp_get_ancestor_thread_num (1) != -1) + abort (); + #pragma omp parallel num_threads (2) + { + if (!omp_in_parallel () + || omp_get_level () != 1 + || omp_get_ancestor_thread_num (0) != 0 + || omp_get_ancestor_thread_num (1) + != omp_get_thread_num () + || omp_get_ancestor_thread_num (2) != -1) + abort (); + } + } + #pragma omp target if (0) + { + #pragma omp teams thread_limit (2) + { + if (omp_in_parallel () + || omp_get_level () != 0 + || omp_get_ancestor_thread_num (0) != 0 + || omp_get_ancestor_thread_num (1) != -1) + abort (); + #pragma omp parallel num_threads (2) + { + if (!omp_in_parallel () + || omp_get_level () != 1 + || omp_get_ancestor_thread_num (0) != 0 + || omp_get_ancestor_thread_num (1) + != omp_get_thread_num () + || omp_get_ancestor_thread_num (2) != -1) + abort (); + } + } + } + } + } + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/target-7.c b/libgomp/testsuite/libgomp.c/target-7.c new file mode 100644 index 0000000..90de6c5 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/target-7.c @@ -0,0 +1,111 @@ +#include <omp.h> +#include <stdlib.h> + +volatile int v; + +void +foo (int f) +{ + int d = f ? omp_get_num_devices () : omp_get_default_device (); + int h = 5; + #pragma omp target device (d) + if (omp_get_level () != 0) + abort (); + #pragma omp target if (v > 1) + if (omp_get_level () != 0 || !omp_is_initial_device ()) + abort (); + #pragma omp target device (d) if (v > 1) + if (omp_get_level () != 0 || !omp_is_initial_device ()) + abort (); + #pragma omp target if (v <= 1) + if (omp_get_level () != 0 || (f && !omp_is_initial_device ())) + abort (); + #pragma omp target device (d) if (v <= 1) + if (omp_get_level () != 0 || (f && !omp_is_initial_device ())) + abort (); + #pragma omp target if (0) + if (omp_get_level () != 0 || !omp_is_initial_device ()) + abort (); + #pragma omp target device (d) if (0) + if (omp_get_level () != 0 || !omp_is_initial_device ()) + abort (); + #pragma omp target if (1) + if (omp_get_level () != 0 || (f && !omp_is_initial_device ())) + abort (); + #pragma omp target device (d) if (1) + if (omp_get_level () != 0 || (f && !omp_is_initial_device ())) + abort (); + #pragma omp target data device (d) map (to: h) + { + #pragma omp target device (d) + if (omp_get_level () != 0 || (f && !omp_is_initial_device ()) || h++ != 5) + abort (); + #pragma omp target update device (d) from (h) + } + #pragma omp target data if (v > 1) map (to: h) + { + #pragma omp target if (v > 1) + if (omp_get_level () != 0 || !omp_is_initial_device () || h++ != 6) + abort (); + #pragma omp target update if (v > 1) from (h) + } + #pragma omp target data device (d) if (v > 1) map (to: h) + { + #pragma omp target device (d) if (v > 1) + if (omp_get_level () != 0 || !omp_is_initial_device () || h++ != 7) + abort (); + #pragma omp target update device (d) if (v > 1) from (h) + } + #pragma omp target data if (v <= 1) map (to: h) + { + #pragma omp target if (v <= 1) + if (omp_get_level () != 0 || (f && !omp_is_initial_device ()) || h++ != 8) + abort (); + #pragma omp target update if (v <= 1) from (h) + } + #pragma omp target data device (d) if (v <= 1) map (to: h) + { + #pragma omp target device (d) if (v <= 1) + if (omp_get_level () != 0 || (f && !omp_is_initial_device ()) || h++ != 9) + abort (); + #pragma omp target update device (d) if (v <= 1) from (h) + } + #pragma omp target data if (0) map (to: h) + { + #pragma omp target if (0) + if (omp_get_level () != 0 || !omp_is_initial_device () || h++ != 10) + abort (); + #pragma omp target update if (0) from (h) + } + #pragma omp target data device (d) if (0) map (to: h) + { + #pragma omp target device (d) if (0) + if (omp_get_level () != 0 || !omp_is_initial_device () || h++ != 11) + abort (); + #pragma omp target update device (d) if (0) from (h) + } + #pragma omp target data if (1) map (to: h) + { + #pragma omp target if (1) + if (omp_get_level () != 0 || (f && !omp_is_initial_device ()) || h++ != 12) + abort (); + #pragma omp target update if (1) from (h) + } + #pragma omp target data device (d) if (1) map (to: h) + { + #pragma omp target device (d) if (1) + if (omp_get_level () != 0 || (f && !omp_is_initial_device ()) || h++ != 13) + abort (); + #pragma omp target update device (d) if (1) from (h) + } + if (h != 14) + abort (); +} + +int +main () +{ + foo (0); + foo (1); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/taskgroup-1.c b/libgomp/testsuite/libgomp.c/taskgroup-1.c new file mode 100644 index 0000000..641a3bc --- /dev/null +++ b/libgomp/testsuite/libgomp.c/taskgroup-1.c @@ -0,0 +1,83 @@ +extern +#ifdef __cplusplus +"C" +#endif +void abort (void); +int v[16] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }; + +int +main () +{ + #pragma omp parallel num_threads (4) + #pragma omp single + { + int i; + #pragma omp taskgroup + { + for (i = 0; i < 16; i += 2) + #pragma omp task + { + #pragma omp task + v[i]++; + #pragma omp task + v[i + 1]++; + } + } + for (i = 0; i < 16; i++) + if (v[i] != i + 2) + abort (); + #pragma omp taskgroup + { + for (i = 0; i < 16; i += 2) + #pragma omp task + { + #pragma omp task + v[i]++; + #pragma omp task + v[i + 1]++; + #pragma omp taskwait + } + } + for (i = 0; i < 16; i++) + if (v[i] != i + 3) + abort (); + #pragma omp taskgroup + { + for (i = 0; i < 16; i += 2) + #pragma omp task + { + #pragma omp task + v[i]++; + v[i + 1]++; + } + #pragma omp taskwait + for (i = 0; i < 16; i += 2) + #pragma omp task + v[i + 1]++; + } + for (i = 0; i < 16; i++) + if (v[i] != i + 4 + (i & 1)) + abort (); + #pragma omp taskgroup + { + for (i = 0; i < 16; i += 2) + { + #pragma omp taskgroup + { + #pragma omp task + v[i]++; + #pragma omp task + v[i + 1]++; + } + if (v[i] != i + 5 || v[i + 1] != i + 7) + abort (); + #pragma omp task + v[i]++; + } + } + for (i = 0; i < 16; i++) + if (v[i] != i + 6) + abort (); + } + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/thread-limit-1.c b/libgomp/testsuite/libgomp.c/thread-limit-1.c new file mode 100644 index 0000000..6cc716b --- /dev/null +++ b/libgomp/testsuite/libgomp.c/thread-limit-1.c @@ -0,0 +1,41 @@ +/* { dg-do run } */ +/* { dg-set-target-env-var OMP_THREAD_LIMIT "6" } */ + +#include <stdlib.h> +#include <unistd.h> + +int +main () +{ + if (omp_get_thread_limit () != 6) + return 0; + omp_set_dynamic (0); + omp_set_nested (1); + #pragma omp parallel num_threads (3) + if (omp_get_num_threads () != 3) + abort (); + #pragma omp parallel num_threads (3) + if (omp_get_num_threads () != 3) + abort (); + #pragma omp parallel num_threads (8) + if (omp_get_num_threads () > 6) + abort (); + #pragma omp parallel num_threads (6) + if (omp_get_num_threads () != 6) + abort (); + int cnt = 0; + #pragma omp parallel num_threads (5) + #pragma omp parallel num_threads (5) + #pragma omp parallel num_threads (2) + { + int v; + #pragma omp atomic capture + v = ++cnt; + if (v > 6) + abort (); + usleep (10000); + #pragma omp atomic + --cnt; + } + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/thread-limit-2.c b/libgomp/testsuite/libgomp.c/thread-limit-2.c new file mode 100644 index 0000000..0fc9dae --- /dev/null +++ b/libgomp/testsuite/libgomp.c/thread-limit-2.c @@ -0,0 +1,57 @@ +/* { dg-do run } */ +/* { dg-set-target-env-var OMP_THREAD_LIMIT "9" } */ + +#include <stdlib.h> +#include <unistd.h> + +int +main () +{ + if (omp_get_thread_limit () != 9) + return 0; + omp_set_dynamic (0); + #pragma omp parallel num_threads (8) + if (omp_get_num_threads () != 8) + abort (); + #pragma omp parallel num_threads (16) + if (omp_get_num_threads () > 9) + abort (); + #pragma omp target if (0) + #pragma omp teams thread_limit (6) + { + if (omp_get_thread_limit () > 6) + abort (); + if (omp_get_thread_limit () == 6) + { + omp_set_dynamic (0); + omp_set_nested (1); + #pragma omp parallel num_threads (3) + if (omp_get_num_threads () != 3) + abort (); + #pragma omp parallel num_threads (3) + if (omp_get_num_threads () != 3) + abort (); + #pragma omp parallel num_threads (8) + if (omp_get_num_threads () > 6) + abort (); + #pragma omp parallel num_threads (6) + if (omp_get_num_threads () != 6) + abort (); + int cnt = 0; + #pragma omp parallel num_threads (5) + #pragma omp parallel num_threads (5) + #pragma omp parallel num_threads (2) + { + int v; + #pragma omp atomic capture + v = ++cnt; + if (v > 6) + abort (); + usleep (10000); + #pragma omp atomic + --cnt; + } + } + } + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/thread-limit-3.c b/libgomp/testsuite/libgomp.c/thread-limit-3.c new file mode 100644 index 0000000..af9bd78 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/thread-limit-3.c @@ -0,0 +1,12 @@ +#include <stdlib.h> +#include <omp.h> + +int +main () +{ + #pragma omp target if (0) + #pragma omp teams thread_limit (1) + if (omp_get_thread_limit () != 1) + abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/udr-1.c b/libgomp/testsuite/libgomp.c/udr-1.c new file mode 100644 index 0000000..ea9da72 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/udr-1.c @@ -0,0 +1,81 @@ +/* { dg-do run } */ + +extern +#ifdef __cplusplus +"C" +#endif +void abort (); + +struct S { int s; struct S *t; }; + +void +foo (struct S *out, struct S *in) +{ + out->s += in->s; +} + +void +bar (struct S *x) +{ + if (x->s != 6) abort (); + x->s = 15; +} + +void +baz (struct S *x, struct S *y) +{ + x->s = 6; + x->t = x; + (void) y; +} + +#pragma omp declare reduction (foo: struct S: foo (&omp_out, &omp_in)) \ + initializer (omp_priv = { 8, &omp_priv }) +#pragma omp declare reduction (foo: char, int, short: omp_out += omp_in - 4) \ + initializer (omp_priv = 4) +#pragma omp declare reduction (+: struct S: foo (&omp_out, &omp_in)) \ + initializer (baz (&omp_priv, &omp_orig)) + +void +test (struct S s, struct S t) +{ + int q = 0; + #pragma omp parallel num_threads (4) reduction (+: s, q) reduction (foo: t) + { + if (s.s != 6 || s.t != &s || t.s != 8 || t.t != &t) + abort (); + s.s = 2; + t.s = 3; + q = 1; + } + if (s.s != 12 + 2 * q || t.s != 14 + 3 * q) + abort (); +} + +int +main () +{ + struct S s, t; + s.s = 9; t.s = 10; + int h = 30, v = 2, q = 0; + #pragma omp declare reduction (foo: struct S: omp_out.s *= omp_in.s) \ + initializer (omp_priv = omp_orig) + { + #pragma omp declare reduction (foo: struct S: omp_out.s += omp_in.s) \ + initializer (omp_priv = omp_orig) + #pragma omp parallel num_threads (4) reduction (+: t, q) \ + reduction (min: h) reduction (foo: s, v) + { + if (s.s != 9 || t.s != 6 || v != 4 || h != __INT_MAX__) abort (); + asm volatile ("" : "+m" (s.s), "+m" (t.s)); + asm volatile ("" : "+r" (h), "+r" (v)); + h = t.s; s.s++; t.s++; v++; q++; + } + } + if (h != 6 || s.s != 9 + q * 10 || t.s != 10 + q * 7 || v != 2 + q) + abort (); + s.s = 12; + t.s = 14; + test (s, t); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/udr-2.c b/libgomp/testsuite/libgomp.c/udr-2.c new file mode 100644 index 0000000..b58b5c7 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/udr-2.c @@ -0,0 +1,27 @@ +/* { dg-do run } */ + +extern void abort (); + +struct S { int s; }; + +#pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s) +#pragma omp declare reduction (foo:struct S:omp_out.s += omp_in.s) +#pragma omp declare reduction (foo:int:omp_out += omp_in) + +int +main () +{ + int u = 0, q = 0; + struct S s, t; + s.s = 0; t.s = 0; + #pragma omp parallel reduction(+:s, q) reduction(foo:t, u) + { + if (s.s != 0 || t.s != 0 || u != 0 || q != 0) abort (); + s.s = 6; + t.s = 8; + u = 9; + q++; + } + if (s.s != 6 * q || t.s != 8 * q || u != 9 * q) abort (); + return 0; +} diff --git a/libgomp/testsuite/libgomp.c/udr-3.c b/libgomp/testsuite/libgomp.c/udr-3.c new file mode 100644 index 0000000..e0a5b87 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/udr-3.c @@ -0,0 +1,32 @@ +/* { dg-do run } */ + +extern void abort (); + +struct S; +void foo (struct S *, struct S *); +#pragma omp declare reduction (+:struct S:foo (&omp_out, &omp_in)) +struct S { int s; }; + +void +foo (struct S *x, struct S *y) +{ + x->s += y->s; +} + +int +main () +{ + struct S s; + int i = 0; + s.s = 0; + #pragma omp parallel reduction (+:s, i) + { + if (s.s != 0) + abort (); + s.s = 2; + i = 1; + } + if (s.s != 2 * i) + abort (); + return 0; +} |