aboutsummaryrefslogtreecommitdiff
path: root/gcc/omp-low.c
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2018-11-08 18:13:04 +0100
committerJakub Jelinek <jakub@gcc.gnu.org>2018-11-08 18:13:04 +0100
commit28567c40e2c7c88e424283e8a1a6ff8cb7ba440c (patch)
treed35b0640fcabbdcf16b6254688aa76a6fbc4f4a7 /gcc/omp-low.c
parentdfe2a550e8879f9af11e8e2c7c4eb10ea626a7e6 (diff)
downloadgcc-28567c40e2c7c88e424283e8a1a6ff8cb7ba440c.zip
gcc-28567c40e2c7c88e424283e8a1a6ff8cb7ba440c.tar.gz
gcc-28567c40e2c7c88e424283e8a1a6ff8cb7ba440c.tar.bz2
builtin-types.def (BT_FN_VOID_BOOL, [...]): New.
* builtin-types.def (BT_FN_VOID_BOOL, BT_FN_VOID_SIZE_SIZE_PTR, BT_FN_UINT_UINT_PTR_PTR, BT_FN_UINT_OMPFN_PTR_UINT_UINT, BT_FN_BOOL_UINT_LONGPTR_LONG_LONG_LONGPTR_LONGPTR_PTR_PTR, BT_FN_BOOL_UINT_ULLPTR_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR, BT_FN_BOOL_LONG_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR_PTR_PTR, BT_FN_BOOL_BOOL_ULL_ULL_ULL_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR): New. * gengtype.c (open_base_files): Add omp-general.h. * gimple.c (gimple_build_omp_critical): (gimple_build_omp_taskgroup): Add CLAUSES argument. Call gimple_omp_taskgroup_set_clauses. (gimple_build_omp_atomic_load): Add mo argument, call gimple_omp_atomic_set_memory_order. (gimple_build_omp_atomic_store): Likewise. (gimple_copy): Adjust handling of GIMPLE_OMP_TASKGROUP. * gimple.def (GIMPLE_OMP_TASKGROUP): Use GSS_OMP_SINGLE_LAYOUT instead of GSS_OMP. (GIMPLE_OMP_TEAMS): Use GSS_OMP_PARALLEL_LAYOUT instead of GSS_OMP_SINGLE_LAYOUT, adjust comments. * gimple.h (enum gf_mask): Add GF_OMP_TEAMS_HOST, GF_OMP_TASK_TASKWAIT and GF_OMP_ATOMIC_MEMORY_ORDER. Remove GF_OMP_ATOMIC_SEQ_CST, use different value for GF_OMP_ATOMIC_NEED_VALUE. (struct gimple_statement_omp_taskreg): Add GIMPLE_OMP_TEAMS to comments. (struct gimple_statement_omp_single_layout): And remove here. (struct gomp_teams): Inherit from gimple_statement_omp_taskreg rather than gimple_statement_omp_single_layout. (is_a_helper <gimple_statement_omp_taskreg *>::test): Allow GIMPLE_OMP_TEAMS. (is_a_helper <const gimple_statement_omp_taskreg *>::test): Likewise. (gimple_omp_subcode): Formatting fix. (gimple_omp_teams_child_fn, gimple_omp_teams_child_fn_ptr, gimple_omp_teams_set_child_fn, gimple_omp_teams_data_arg, gimple_omp_teams_data_arg_ptr, gimple_omp_teams_set_data_arg, gimple_omp_teams_host, gimple_omp_teams_set_host, gimple_omp_task_taskwait_p, gimple_omp_task_set_taskwait_p, gimple_omp_taskgroup_clauses, gimple_omp_taskgroup_clauses_ptr, gimple_omp_taskgroup_set_clauses): New inline functions. (gimple_build_omp_atomic_load): Add enum omp_memory_order argument. (gimple_build_omp_atomic_store): Likewise. (gimple_omp_atomic_seq_cst_p): Remove. (gimple_omp_atomic_memory_order): New function. (gimple_omp_atomic_set_seq_cst): Remove. (gimple_omp_atomic_set_memory_order): New function. (gimple_build_omp_taskgroup): Add clauses argument. * gimple-pretty-print.c (dump_gimple_omp_taskgroup): New function. (dump_gimple_omp_task): Print taskwait with depend clauses. (dump_gimple_omp_atomic_load, dump_gimple_omp_atomic_store): Use dump_omp_atomic_memory_order. (pp_gimple_stmt_1): Handle GIMPLE_OMP_TASKGROUP. * gimplify.c (enum gimplify_omp_var_data): Add GOVD_MAP_ALLOC_ONLY, GOVD_MAP_FROM_ONLY and GOVD_NONTEMPORAL. (enum omp_region_type): Reserve bits 1 and 2 for auxiliary flags, renumber values of most of ORT_* enumerators, add ORT_HOST_TEAMS, ORT_COMBINED_HOST_TEAMS, ORT_TASKGROUP, ORT_TASKLOOP and ORT_UNTIED_TASKLOOP enumerators. (enum gimplify_defaultmap_kind): New. (struct gimplify_omp_ctx): Remove target_map_scalars_firstprivate and target_map_pointers_as_0len_arrays members, add defaultmap. (new_omp_context): Initialize defaultmap member. (gimple_add_tmp_var): Handle ORT_TASKGROUP like ORT_WORKSHARE. (maybe_fold_stmt): Don't fold even in host teams regions. (omp_firstprivatize_variable): Handle ORT_TASKGROUP like ORT_WORKSHARE. Test ctx->defaultmap[GDMK_SCALAR] instead of ctx->omp_firstprivatize_variable. (omp_add_variable): Don't add private/firstprivate for VLAs in ORT_TASKGROUP. (omp_default_clause): Print "taskloop" rather than "task" if ORT_*TASKLOOP. (omp_notice_variable): Handle ORT_TASKGROUP like ORT_WORKSHARE. Handle new defaultmap clause kinds. (omp_is_private): Handle ORT_TASKGROUP like ORT_WORKSHARE. Allow simd iterator to be lastprivate or private. Fix up diagnostics if linear is used on collapse>1 simd iterator. (omp_check_private): Handle ORT_TASKGROUP like ORT_WORKSHARE. (gimplify_omp_depend): New function. (gimplify_scan_omp_clauses): Add shared clause on parallel for combined parallel master taskloop{, simd} if taskloop has firstprivate, lastprivate or reduction clause. Handle OMP_CLAUSE_REDUCTION_TASK diagnostics. Adjust tests for ORT_COMBINED_TEAMS. Gimplify depend clauses with iterators. Handle cancel and simd OMP_CLAUSE_IF_MODIFIERs. Handle OMP_CLAUSE_NONTEMPORAL. Handle new defaultmap clause kinds. Handle OMP_CLAUSE_{TASK,IN}_REDUCTION. Diagnose invalid conditional lastprivate. (gimplify_adjust_omp_clauses_1): Ignore GOVD_NONTEMPORAL. Handle GOVD_MAP_ALLOC_ONLY and GOVD_MAP_FROM_ONLY. (gimplify_adjust_omp_clauses): Handle OMP_CLAUSE_NONTEMPORAL. Handle OMP_CLAUSE_{TASK,IN}_REDUCTION. (gimplify_omp_task): Handle taskwait with depend clauses. (gimplify_omp_for): Add shared clause on parallel for combined parallel master taskloop{, simd} if taskloop has firstprivate, lastprivate or reduction clause. Use ORT_TASKLOOP or ORT_UNTIED_TASKLOOP instead of ORT_TASK or ORT_UNTIED_TASK. Adjust tests for ORT_COMBINED_TEAMS. Handle C++ range for loops with NULL TREE_PURPOSE in OMP_FOR_ORIG_DECLS. Firstprivatize __for_end and __for_range temporaries on OMP_PARALLEL for distribute parallel for{, simd}. Move OMP_CLAUSE_REDUCTION and OMP_CLAUSE_IN_REDUCTION from taskloop to the task construct sandwiched in between two taskloops. (computable_teams_clause): Test ctx->defaultmap[GDMK_SCALAR] instead of ctx->omp_firstprivatize_variable. (gimplify_omp_workshare): Set ort to ORT_HOST_TEAMS or ORT_COMBINED_HOST_TEAMS if not inside of target construct. If host teams, use gimplify_and_return_first etc. for body like for target or target data constructs, and at the end call gimple_omp_teams_set_host on the GIMPLE_OMP_TEAMS object. (gimplify_omp_atomic): Use OMP_ATOMIC_MEMORY_ORDER instead of OMP_ATOMIC_SEQ_CST, pass it as new argument to gimple_build_omp_atomic_load and gimple_build_omp_atomic_store, remove gimple_omp_atomic_set_seq_cst calls. (gimplify_expr) <case OMP_TASKGROUP>: Move handling into a separate case, handle taskgroup clauses. * lto-streamer-out.c (hash_tree): Handle OMP_CLAUSE_{TASK,IN}_REDUCTION. * Makefile.in (GTFILES): Add omp-general.h. * omp-builtins.def (BUILT_IN_GOMP_TASKWAIT_DEPEND, BUILT_IN_GOMP_LOOP_NONMONOTONIC_RUNTIME_START, BUILT_IN_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_START, BUILT_IN_GOMP_LOOP_START, BUILT_IN_GOMP_LOOP_ORDERED_START, BUILT_IN_GOMP_LOOP_DOACROSS_START, BUILT_IN_GOMP_LOOP_NONMONOTONIC_RUNTIME_NEXT, BUILT_IN_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_NEXT, BUILT_IN_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_START, BUILT_IN_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_START, BUILT_IN_GOMP_LOOP_ULL_START, BUILT_IN_GOMP_LOOP_ULL_ORDERED_START, BUILT_IN_GOMP_LOOP_ULL_DOACROSS_START, BUILT_IN_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_NEXT, BUILT_IN_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_NEXT, BUILT_IN_GOMP_PARALLEL_LOOP_NONMONOTONIC_RUNTIME, BUILT_IN_GOMP_PARALLEL_LOOP_MAYBE_NONMONOTONIC_RUNTIME, BUILT_IN_GOMP_PARALLEL_REDUCTIONS, BUILT_IN_GOMP_SECTIONS2_START, BUILT_IN_GOMP_TEAMS_REG, BUILT_IN_GOMP_TASKGROUP_REDUCTION_REGISTER, BUILT_IN_GOMP_TASKGROUP_REDUCTION_UNREGISTER, BUILT_IN_GOMP_TASK_REDUCTION_REMAP, BUILT_IN_GOMP_WORKSHARE_TASK_REDUCTION_UNREGISTER): New builtins. * omp-expand.c (workshare_safe_to_combine_p): Return false for non-worksharing loops. (omp_adjust_chunk_size): Don't adjust anything if chunk_size is zero. (determine_parallel_type): Don't combine parallel with worksharing which has _reductemp_ clause. (expand_parallel_call): Emit the GOMP_*nonmonotonic_runtime* or GOMP_*maybe_nonmonotonic_runtime* builtins instead of GOMP_*runtime* if there is nonmonotonic modifier or if there is no modifier and no ordered clause. For dynamic and guided schedule without monotonic and nonmonotonic modifier, default to nonmonotonic. (expand_omp_for): Likewise. Adjust expand_omp_for_generic caller, use GOMP_loop{,_ull}{,_ordered,_doacross}_start builtins if there are task reductions. (expand_task_call): Add GOMP_TASK_FLAG_REDUCTION flag to flags if there are any reduction clauses. (expand_taskwait_call): New function. (expand_teams_call): New function. (expand_omp_taskreg): Allow GIMPLE_OMP_TEAMS and call expand_teams_call for it. Formatting fix. Handle taskwait with depend clauses. (expand_omp_for_generic): Add SCHED_ARG argument. Handle expansion of worksharing loops with task reductions. (expand_omp_for_static_nochunk, expand_omp_for_static_chunk): Handle expansion of worksharing loops with task reductions. (expand_omp_sections): Handle expansion of sections with task reductions. (expand_omp_synch): For host teams call expand_omp_taskreg. (omp_memory_order_to_memmodel): New function. (expand_omp_atomic_load, expand_omp_atomic_store, expand_omp_atomic_fetch_op): Use it and gimple_omp_atomic_memory_order instead of gimple_omp_atomic_seq_cst_p. (build_omp_regions_1, omp_make_gimple_edges): Treat taskwait with depend clauses as a standalone directive. * omp-general.c (enum omp_requires): New variable. (omp_extract_for_data): Initialize have_reductemp member. Allow NE_EXPR even in OpenMP loops, transform them into LT_EXPR or GT_EXPR loops depending on incr sign. Formatting fixes. * omp-general.h (struct omp_for_data): Add have_reductemp member. (enum omp_requires): New enum. (omp_requires_mask): Declare. * omp-grid.c (grid_eliminate_combined_simd_part): Formatting fix. Fix comment typos. * omp-low.c (struct omp_context): Add task_reductions and task_reduction_map fields. (is_host_teams_ctx): New function. (is_taskreg_ctx): Return true also if is_host_teams_ctx. (use_pointer_for_field): Use is_global_var instead of TREE_STATIC || DECL_EXTERNAL, and apply only if not privatized in outer contexts. (build_outer_var_ref): Ignore taskgroup outer contexts. (delete_omp_context): Release task_reductions and task_reduction_map. (scan_sharing_clauses): Don't add any fields for reduction clause on taskloop. Handle OMP_CLAUSE__REDUCTEMP_. Handle OMP_CLAUSE_{IN,TASK}_REDUCTION and OMP_CLAUSE_REDUCTION with task modifier. Don't ignore shared clauses in is_host_teams_ctx contexts. Handle OMP_CLAUSE_NONTEMPORAL. (add_taskreg_looptemp_clauses): Add OMP_CLAUSE__REDUCTEMP_ clause if needed. (scan_omp_parallel): Add _reductemp_ clause if there are any reduction clauses with task modifier. (scan_omp_task): Handle taskwait with depend clauses. (finish_taskreg_scan): Move field corresponding to _reductemp_ clause first. Move also OMP_CLAUSE__REDUCTEMP_ clause in front if present. Handle GIMPLE_OMP_TEAMS like GIMPLE_OMP_PARALLEL. (scan_omp_for): Fix comment formatting. (scan_omp_teams): Handle host teams constructs. (check_omp_nesting_restrictions): Allow teams with no outer OpenMP context. Adjust diagnostics for teams strictly nested into some explicit OpenMP construct other than target. Allow OpenMP atomics inside of simd regions. (scan_omp_1_stmt): Call scan_sharing_clauses for taskgroups. (scan_omp_1_stmt) <case GIMPLE_OMP_TEAMS>: Temporarily bump taskreg_nesting_level while scanning host teams construct. (task_reduction_read): New function. (lower_rec_input_clauses): Handle OMP_CLAUSE_REDUCTION on taskloop construct. Handle OMP_CLAUSE_IN_REDUCTION and OMP_CLAUSE__REDUCTEMP_ clauses. Handle OMP_CLAUSE_REDUCTION with task modifier. Remove second argument create_tmp_var if it is NULL. Don't ignore shared clauses in is_host_teams_ctx contexts. Handle OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE on OMP_CLAUSE_FIRSTPRIVATE clauses. (lower_reduction_clauses): Ignore reduction clauses with task modifier. Remove second argument create_tmp_var if it is NULL. Initialize OMP_ATOMIC_MEMORY_ORDER to relaxed. (lower_send_clauses): Ignore reduction clauses with task modifier. Handle OMP_CLAUSE__REDUCTEMP_. Don't send anything for OMP_CLAUSE_REDUCTION on taskloop. Handle OMP_CLAUSE_IN_REDUCTION. (maybe_add_implicit_barrier_cancel): Add OMP_RETURN argument, don't rely that it is the last stmt in body so far. Ignore outer taskgroup contexts. (omp_task_reductions_find_first, omp_task_reduction_iterate, lower_omp_task_reductions): New functions. (lower_omp_sections): Handle reduction clauses with taskgroup modifiers. Adjust maybe_add_implicit_barrier_cancel caller. (lower_omp_single): Adjust maybe_add_implicit_barrier_cancel caller. (lower_omp_for): Likewise. Handle reduction clauses with taskgroup modifiers. (lower_omp_taskgroup): Handle taskgroup reductions. (create_task_copyfn): Copy over OMP_CLAUSE__REDUCTEMP_ pointer. Handle OMP_CLAUSE_IN_REDUCTION and OMP_CLAUSE_REDUCTION clauses. (lower_depend_clauses): If there are any OMP_CLAUSE_DEPEND_DEPOBJ or OMP_CLAUSE_DEPEND_MUTEXINOUTSET depend clauses, use a new array format. If OMP_CLAUSE_DEPEND_LAST is seen, assume lowering is done already and return early. Set kind on artificial depend clause to OMP_CLAUSE_DEPEND_LAST. (lower_omp_taskreg): Handle reduction clauses with task modifier on parallel construct. Handle reduction clause on taskloop construct. Handle taskwait with depend clauses. (lower_omp_1): Use lower_omp_taskreg instead of lower_omp_teams for host teams constructs. * tree.c (omp_clause_num_ops): Add in_reduction, task_reduction, nontemporal and _reductemp_ clause entries. (omp_clause_code_name): Likewise. (walk_tree_1): Handle OMP_CLAUSE_{IN,TASK}_REDUCTION, OMP_CLAUSE_NONTEMPORAL and OMP_CLAUSE__REDUCTEMP_. * tree-core.h (enum omp_clause_code): Add OMP_CLAUSE_{{IN,TASK}_REDUCTION,NONTEMPORAL,_REDUCTEMP_}. (enum omp_clause_defaultmap_kind, enum omp_memory_order): New. (struct tree_base): Add omp_atomic_memory_order field into union. Remove OMP_ATOMIC_SEQ_CST comment. (enum omp_clause_depend_kind): Add OMP_CLAUSE_DEPEND_MUTEXINOUTSET and OMP_CLAUSE_DEPEND_DEPOBJ. (struct tree_omp_clause): Add subcode.defaultmap_kind. * tree.def (OMP_TASKGROUP): Add another operand, move next to other OpenMP constructs with body and clauses operands. * tree.h (OMP_BODY): Use OMP_MASTER instead of OMP_TASKGROUP. (OMP_CLAUSES): Use OMP_TASKGROUP instead of OMP_SINGLE. (OMP_TASKGROUP_CLAUSES): Define. (OMP_CLAUSE_DECL): Use OMP_CLAUSE__REDUCTEMP_ instead of OMP_CLAUSE__LOOPTEMP_. (OMP_ATOMIC_SEQ_CST): Remove. (OMP_ATOMIC_MEMORY_ORDER, OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE, OMP_CLAUSE_LASTPRIVATE_CONDITIONAL): Define. (OMP_CLAUSE_REDUCTION_CODE, OMP_CLAUSE_REDUCTION_INIT, OMP_CLAUSE_REDUCTION_MERGE, OMP_CLAUSE_REDUCTION_PLACEHOLDER, OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER, OMP_CLAUSE_REDUCTION_OMP_ORIG_REF): Handle OMP_CLAUSE_{,IN_,TASK_}REDUCTION. (OMP_CLAUSE_REDUCTION_TASK, OMP_CLAUSE_REDUCTION_INSCAN, OMP_CLAUSE_DEFAULTMAP_KIND, OMP_CLAUSE_DEFAULTMAP_CATEGORY, OMP_CLAUSE_DEFAULTMAP_BEHAVIOR, OMP_CLAUSE_DEFAULTMAP_SET_KIND): Define. * tree-inline.c (remap_gimple_stmt): Remap taskgroup clauses. * tree-nested.c (convert_nonlocal_omp_clauses): Handle OMP_CLAUSE__REDUCTEMP_, OMP_CLAUSE_NONTEMPORAL. (convert_local_omp_clauses): Likewise. Remove useless test. * tree-parloops.c (create_call_for_reduction_1): Pass OMP_MEMORY_ORDER_RELAXED as new argument to dump_gimple_omp_atomic_load and dump_gimple_omp_atomic_store. * tree-pretty-print.c (dump_omp_iterators): New function. (dump_omp_clause): Handle OMP_CLAUSE__REDUCTEMP_, OMP_CLAUSE_NONTEMPORAL, OMP_CLAUSE_{TASK,IN}_REDUCTION. Print reduction modifiers. Handle OMP_CLAUSE_DEPEND_DEPOBJ and OMP_CLAUSE_DEPEND_MUTEXINOUTSET. Print iterators in depend clauses. Print __internal__ for OMP_CLAUSE_DEPEND_LAST. Handle cancel and simd OMP_CLAUSE_IF_MODIFIERs. Handle new kinds of OMP_CLAUSE_DEFAULTMAP. Print conditional: for OMP_CLAUSE_LASTPRIVATE_CONDITIONAL. (dump_omp_atomic_memory_order): New function. (dump_generic_node): Use it. Print taskgroup clauses. Print taskwait with depend clauses. * tree-pretty-print.h (dump_omp_atomic_memory_order): Declare. * tree-streamer-in.c (unpack_ts_omp_clause_value_fields): Handle OMP_CLAUSE_{TASK,IN}_REDUCTION. * tree-streamer-out.c (pack_ts_omp_clause_value_fields, write_ts_omp_clause_tree_pointers): Likewise. gcc/c-family/ * c-common.h (c_finish_omp_taskgroup): Add CLAUSES argument. (c_finish_omp_atomic): Replace bool SEQ_CST argument with enum omp_memory_order MEMORY_ORDER. (c_finish_omp_flush): Add MO argument. (c_omp_depend_t_p, c_finish_omp_depobj): Declare. (c_finish_omp_for): Add FINAL_P argument. * c-omp.c: Include memmodel.h. (c_finish_omp_taskgroup): Add CLAUSES argument. Set OMP_TASKGROUP_CLAUSES to it. (c_finish_omp_atomic): Replace bool SEQ_CST argument with enum omp_memory_order MEMORY_ORDER. Set OMP_ATOMIC_MEMORY_ORDER instead of OMP_ATOMIC_SEQ_CST. (c_omp_depend_t_p, c_finish_omp_depobj): New functions. (c_finish_omp_flush): Add MO argument, if not MEMMODEL_LAST, emit __atomic_thread_fence call with the given value. (check_omp_for_incr_expr): Formatting fixes. (c_finish_omp_for): Add FINAL_P argument. Allow NE_EXPR even in OpenMP loops, diagnose if NE_EXPR and incr expression is not constant expression 1 or -1. Transform NE_EXPR loops with iterators pointers to VLA into LT_EXPR or GT_EXPR loops. (c_omp_check_loop_iv_r): Look for orig decl of C++ range for loops too. (c_omp_split_clauses): Add support for combined #pragma omp parallel master and #pragma omp {,parallel }master taskloop{, simd} constructs. Handle OMP_CLAUSE_IN_REDUCTION. Handle OMP_CLAUSE_REDUCTION_TASK. Handle OMP_CLAUSE_NONTEMPORAL. Handle splitting OMP_CLAUSE_IF also to OMP_SIMD. Copy OMP_CLAUSE_LASTPRIVATE_CONDITIONAL. (c_omp_predetermined_sharing): Don't return OMP_CLAUSE_DEFAULT_SHARED for const qualified decls. * c-pragma.c (omp_pragmas): Add PRAGMA_OMP_DEPOBJ and PRAGMA_OMP_REQUIRES. * c-pragma.h (enum pragma_kind): Likewise. (enum pragma_omp_clause): Add PRAGMA_OMP_CLAUSE_NONTEMPORAL and PRAGMA_OMP_CLAUSE_{IN,TASK}_REDUCTION. gcc/c/ * c-parser.c: Include memmode.h. (c_parser_omp_depobj, c_parser_omp_requires): New functions. (c_parser_pragma): Handle PRAGMA_OMP_DEPOBJ and PRAGMA_OMP_REQUIRES. (c_parser_omp_clause_name): Handle nontemporal, in_reduction and task_reduction clauses. (c_parser_omp_variable_list): Handle OMP_CLAUSE_{IN,TASK}_REDUCTION. For OMP_CLAUSE_DEPEND, parse clause operands as either an array section, or lvalue assignment expression. (c_parser_omp_clause_if): Handle cancel and simd modifiers. (c_parser_omp_clause_lastprivate): Parse optional conditional: modifier. (c_parser_omp_clause_hint): Require constant integer expression rather than just integer expression. (c_parser_omp_clause_defaultmap): Parse new kinds of defaultmap clause. (c_parser_omp_clause_reduction): Add IS_OMP and KIND arguments. Parse reduction modifiers. Pass KIND to c_parser_omp_variable_list. (c_parser_omp_clause_nontemporal, c_parser_omp_iterators): New functions. (c_parser_omp_clause_depend): Parse iterator modifier and handle iterators. Parse mutexinoutset and depobj kinds. (c_parser_oacc_all_clauses): Adjust c_parser_omp_clause_reduction callers. (c_parser_omp_all_clauses): Likewise. Handle PRAGMA_OMP_CLAUSE_NONTEMPORAL and PRAGMA_OMP_CLAUSE_{IN,TASK}_REDUCTION. (c_parser_omp_atomic): Parse hint and memory order clauses. Handle default memory order from requires directive if any. Adjust c_finish_omp_atomic caller. (c_parser_omp_critical): Allow comma in between (name) and hint clause. (c_parser_omp_flush): Parse flush with memory-order-clause. (c_parser_omp_for_loop): Allow NE_EXPR even in OpenMP loops, adjust c_finish_omp_for caller. (OMP_SIMD_CLAUSE_MASK): Add if and nontemporal clauses. (c_parser_omp_master): Add p_name, mask and cclauses arguments. Allow to be called while parsing combined parallel master. Parse combined master taskloop{, simd}. (c_parser_omp_parallel): Parse combined parallel master{, taskloop{, simd}} constructs. (OMP_TASK_CLAUSE_MASK): Add in_reduction clause. (OMP_TASKGROUP_CLAUSE_MASK): Define. (c_parser_omp_taskgroup): Add LOC argument. Parse taskgroup clauses. (OMP_TASKWAIT_CLAUSE_MASK): Define. (c_parser_omp_taskwait): Handle taskwait with depend clauses. (c_parser_omp_teams): Force a BIND_EXPR with BLOCK around teams body. Use SET_EXPR_LOCATION. (c_parser_omp_target_data): Allow target data with only use_device_ptr clauses. (c_parser_omp_target): Use SET_EXPR_LOCATION. Set OMP_REQUIRES_TARGET_USED bit in omp_requires_mask. (c_parser_omp_requires): New function. (c_finish_taskloop_clauses): New function. (OMP_TASKLOOP_CLAUSE_MASK): Add reduction and in_reduction clauses. (c_parser_omp_taskloop): Use c_finish_taskloop_clauses. Add forward declaration. Disallow in_reduction clause when combined with parallel master. (c_parser_omp_construct): Adjust c_parser_omp_master and c_parser_omp_taskgroup callers. * c-typeck.c (c_finish_omp_cancel): Diagnose if clause with modifier other than cancel. (handle_omp_array_sections_1): Handle OMP_CLAUSE_{IN,TASK}_REDUCTION like OMP_CLAUSE_REDUCTION. (handle_omp_array_sections): Likewise. Call save_expr on array reductions before calling build_index_type. Handle depend clauses with iterators. (struct c_find_omp_var_s): New type. (c_find_omp_var_r, c_omp_finish_iterators): New functions. (c_finish_omp_clauses): Don't diagnose nonmonotonic clause with static, runtime or auto schedule kinds. Call save_expr for whole array reduction sizes. Diagnose reductions with zero sized elements or variable length structures. Diagnose nogroup clause used with reduction clause(s). Handle depend clause with OMP_CLAUSE_DEPEND_DEPOBJ. Diagnose bit-fields. Require omp_depend_t type for OMP_CLAUSE_DEPEND_DEPOBJ kinds and some different type for other kinds. Use build_unary_op with ADDR_EXPR and build_indirect_ref instead of c_mark_addressable. Handle depend clauses with iterators. Remove no longer needed special case that predetermined const qualified vars may be specified in firstprivate clause. Complain if const qualified vars are mentioned in data-sharing clauses other than firstprivate or shared. Use error_at with OMP_CLAUSE_LOCATION (c) as first argument instead of error. Formatting fix. Handle OMP_CLAUSE_NONTEMPORAL and OMP_CLAUSE_{IN,TASK}_REDUCTION. Allow any lvalue as OMP_CLAUSE_DEPEND operand (besides array section), adjust diagnostics. gcc/cp/ * constexpr.c (potential_constant_expression_1): Handle OMP_DEPOBJ. * cp-gimplify.c (cp_genericize_r): Handle OMP_CLAUSE_{IN,TASK}_REDUCTION. (cxx_omp_predetermined_sharing_1): Don't return OMP_CLAUSE_DEFAULT_SHARED for const qualified decls with no mutable member. Return OMP_CLAUSE_DEFAULT_FIRSTPRIVATE for this pointer. * cp-objcp-common.c (cp_common_init_ts): Handle OMP_DEPOBJ. * cp-tree.def (OMP_DEPOBJ): New tree code. * cp-tree.h (OMP_ATOMIC_DEPENDENT_P): Return true also for first argument being OMP_CLAUSE. (OMP_DEPOBJ_DEPOBJ, OMP_DEPOBJ_CLAUSES): Define. (cp_convert_omp_range_for, cp_finish_omp_range_for): Declare. (finish_omp_atomic): Add LOC, CLAUSES and MO arguments. Remove SEQ_CST argument. (finish_omp_for_block): Declare. (finish_omp_flush): Add MO argument. (finish_omp_depobj): Declare. * cxx-pretty-print.c (cxx_pretty_printer::statement): Handle OMP_DEPOBJ. * dump.c (cp_dump_tree): Likewise. * lex.c (cxx_init): Likewise. * parser.c: Include memmodel.h. (cp_parser_for): Pass false as new is_omp argument to cp_parser_range_for. (cp_parser_range_for): Add IS_OMP argument, return before finalizing if it is true. (cp_parser_omp_clause_name): Handle nontemporal, in_reduction and task_reduction clauses. (cp_parser_omp_var_list_no_open): Handle OMP_CLAUSE_{IN,TASK}_REDUCTION. For OMP_CLAUSE_DEPEND, parse clause operands as either an array section, or lvalue assignment expression. (cp_parser_omp_clause_if): Handle cancel and simd modifiers. (cp_parser_omp_clause_defaultmap): Parse new kinds of defaultmap clause. (cp_parser_omp_clause_reduction): Add IS_OMP and KIND arguments. Parse reduction modifiers. Pass KIND to c_parser_omp_variable_list. (cp_parser_omp_clause_lastprivate, cp_parser_omp_iterators): New functions. (cp_parser_omp_clause_depend): Parse iterator modifier and handle iterators. Parse mutexinoutset and depobj kinds. (cp_parser_oacc_all_clauses): Adjust cp_parser_omp_clause_reduction callers. (cp_parser_omp_all_clauses): Likewise. Handle PRAGMA_OMP_CLAUSE_NONTEMPORAL and PRAGMA_OMP_CLAUSE_{IN,TASK}_REDUCTION. Call cp_parser_omp_clause_lastprivate for OpenMP lastprivate clause. (cp_parser_omp_atomic): Pass pragma_tok->location as LOC to finish_omp_atomic. Parse hint and memory order clauses. Handle default memory order from requires directive if any. Adjust finish_omp_atomic caller. (cp_parser_omp_critical): Allow comma in between (name) and hint clause. (cp_parser_omp_depobj): New function. (cp_parser_omp_flush): Parse flush with memory-order-clause. (cp_parser_omp_for_cond): Allow NE_EXPR even in OpenMP loops. (cp_convert_omp_range_for, cp_finish_omp_range_for): New functions. (cp_parser_omp_for_loop): Parse C++11 range for loops among omp loops. Handle OMP_CLAUSE_IN_REDUCTION like OMP_CLAUSE_REDUCTION. (OMP_SIMD_CLAUSE_MASK): Add if and nontemporal clauses. (cp_parser_omp_simd, cp_parser_omp_for): Call keep_next_level before begin_omp_structured_block and call finish_omp_for_block on finish_omp_structured_block result. (cp_parser_omp_master): Add p_name, mask and cclauses arguments. Allow to be called while parsing combined parallel master. Parse combined master taskloop{, simd}. (cp_parser_omp_parallel): Parse combined parallel master{, taskloop{, simd}} constructs. (cp_parser_omp_single): Use SET_EXPR_LOCATION. (OMP_TASK_CLAUSE_MASK): Add in_reduction clause. (OMP_TASKWAIT_CLAUSE_MASK): Define. (cp_parser_omp_taskwait): Handle taskwait with depend clauses. (OMP_TASKGROUP_CLAUSE_MASK): Define. (cp_parser_omp_taskgroup): Parse taskgroup clauses, adjust c_finish_omp_taskgroup caller. (cp_parser_omp_distribute): Call keep_next_level before begin_omp_structured_block and call finish_omp_for_block on finish_omp_structured_block result. (cp_parser_omp_teams): Force a BIND_EXPR with BLOCK around teams body. (cp_parser_omp_target_data): Allow target data with only use_device_ptr clauses. (cp_parser_omp_target): Set OMP_REQUIRES_TARGET_USED bit in omp_requires_mask. (cp_parser_omp_requires): New function. (OMP_TASKLOOP_CLAUSE_MASK): Add reduction and in_reduction clauses. (cp_parser_omp_taskloop): Add forward declaration. Disallow in_reduction clause when combined with parallel master. Call keep_next_level before begin_omp_structured_block and call finish_omp_for_block on finish_omp_structured_block result. (cp_parser_omp_construct): Adjust cp_parser_omp_master caller. (cp_parser_pragma): Handle PRAGMA_OMP_DEPOBJ and PRAGMA_OMP_REQUIRES. * pt.c (tsubst_omp_clause_decl): Add iterators_cache argument. Adjust recursive calls. Handle iterators. (tsubst_omp_clauses): Handle OMP_CLAUSE_{IN,TASK}_REDUCTION and OMP_CLAUSE_NONTEMPORAL. Adjust tsubst_omp_clause_decl callers. (tsubst_decomp_names): (tsubst_omp_for_iterator): Change orig_declv into a reference. Handle range for loops. Move orig_declv handling after declv/initv handling. (tsubst_expr): Force a BIND_EXPR with BLOCK around teams body. Adjust finish_omp_atomic caller. Call keep_next_level before begin_omp_structured_block. Call cp_finish_omp_range_for for range for loops and use {begin,finish}_omp_structured_block instead of {push,pop}_stmt_list if there are any range for loops. Call finish_omp_for_block on finish_omp_structured_block result. Handle OMP_DEPOBJ. Handle taskwait with depend clauses. For OMP_ATOMIC call tsubst_omp_clauses on clauses if any, adjust finish_omp_atomic caller. Use OMP_ATOMIC_MEMORY_ORDER rather than OMP_ATOMIC_SEQ_CST. Handle clauses on OMP_TASKGROUP. (dependent_omp_for_p): Always return true for range for loops if processing_template_decl. Return true if class type iterator does not have INTEGER_CST increment. * semantics.c: Include memmodel.h. (handle_omp_array_sections_1): Handle OMP_CLAUSE_{IN,TASK}_REDUCTION like OMP_CLAUSE_REDUCTION. (handle_omp_array_sections): Likewise. Call save_expr on array reductions before calling build_index_type. Handle depend clauses with iterators. (finish_omp_reduction_clause): Call save_expr for whole array reduction sizes. Don't mark OMP_CLAUSE_DECL addressable if it has reference type. Do mark decl_placeholder addressable if needed. Use error_at with OMP_CLAUSE_LOCATION (c) as first argument instead of error. (cp_omp_finish_iterators): New function. (finish_omp_clauses): Don't diagnose nonmonotonic clause with static, runtime or auto schedule kinds. Diagnose nogroup clause used with reduction clause(s). Handle depend clause with OMP_CLAUSE_DEPEND_DEPOBJ. Diagnose bit-fields. Require omp_depend_t type for OMP_CLAUSE_DEPEND_DEPOBJ kinds and some different type for other kinds. Use cp_build_addr_expr and cp_build_indirect_ref instead of cxx_mark_addressable. Handle depend clauses with iterators. Only handle static data members in the special case that const qualified vars may be specified in firstprivate clause. Complain if const qualified vars without mutable members are mentioned in data-sharing clauses other than firstprivate or shared. Use error_at with OMP_CLAUSE_LOCATION (c) as first argument instead of error. Diagnose more than one nontemporal clause refering to the same variable. Use error_at rather than error for priority and hint clause diagnostics. Fix pasto for hint clause. Diagnose hint expression that doesn't fold into INTEGER_CST. Diagnose if clause with modifier other than cancel. Handle OMP_CLAUSE_{IN,TASK}_REDUCTION like OMP_CLAUSE_REDUCTION. Allow any lvalue as OMP_CLAUSE_DEPEND operand (besides array section), adjust diagnostics. (handle_omp_for_class_iterator): Don't create a new TREE_LIST if one has been created already for range for, just fill TREE_PURPOSE and TREE_VALUE. Call cp_fully_fold on incr. (finish_omp_for): Don't check cond/incr if cond is global_namespace. Pass to c_omp_check_loop_iv_exprs orig_declv if non-NULL. Don't use IS_EMPTY_STMT on NULL pre_body. Adjust c_finish_omp_for caller. (finish_omp_for_block): New function. (finish_omp_atomic): Add LOC argument, pass it through to c_finish_omp_atomic and set it as location of OMP_ATOMIC* trees. Remove SEQ_CST argument. Add CLAUSES and MO arguments. Adjust c_finish_omp_atomic caller. Stick clauses if any into first argument of wrapping OMP_ATOMIC. (finish_omp_depobj): New function. (finish_omp_flush): Add MO argument, if not MEMMODEL_LAST, emit __atomic_thread_fence call with the given value. (finish_omp_cancel): Diagnose if clause with modifier other than cancel. gcc/fortran/ * trans-openmp.c (gfc_trans_omp_clauses): Use OMP_CLAUSE_DEFAULTMAP_SET_KIND. (gfc_trans_omp_atomic): Set OMP_ATOMIC_MEMORY_ORDER rather than OMP_ATOMIC_SEQ_CST. (gfc_trans_omp_taskgroup): Build OMP_TASKGROUP using make_node instead of build1_loc. * types.def (BT_FN_VOID_BOOL, BT_FN_VOID_SIZE_SIZE_PTR, BT_FN_UINT_UINT_PTR_PTR, BT_FN_UINT_OMPFN_PTR_UINT_UINT, BT_FN_BOOL_UINT_LONGPTR_LONG_LONG_LONGPTR_LONGPTR_PTR_PTR, BT_FN_BOOL_UINT_ULLPTR_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR, BT_FN_BOOL_LONG_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR_PTR_PTR, BT_FN_BOOL_BOOL_ULL_ULL_ULL_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR): New. (BT_FN_VOID_INT_OMPFN_SIZE_PTR_PTR_PTR_UINT_PTR_PTR): Formatting fix. gcc/testsuite/ * c-c++-common/gomp/atomic-17.c: New test. * c-c++-common/gomp/atomic-18.c: New test. * c-c++-common/gomp/atomic-19.c: New test. * c-c++-common/gomp/atomic-20.c: New test. * c-c++-common/gomp/atomic-21.c: New test. * c-c++-common/gomp/atomic-22.c: New test. * c-c++-common/gomp/clauses-1.c (r2): New variable. (foo): Add ntm argument and test if and nontemporal clauses on constructs with simd. (bar): Put taskloop simd inside of taskgroup with task_reduction, use in_reduction clause instead of reduction. Add another taskloop simd without nogroup clause, but with reduction clause and a new in_reduction. Add ntm and i3 arguments. Test if and nontemporal clauses on constructs with simd. Change if clauses on some constructs from specific to the particular constituents to one without a modifier. Add new tests for combined host teams and for new parallel master and {,parallel }master taskloop{, simd} combined constructs. (baz): New function with host teams tests. * gcc.dg/gomp/combined-1.c: Moved to ... * c-c++-common/gomp/combined-1.c: ... here. Adjust expected library call. * c-c++-common/gomp/combined-2.c: New test. * c-c++-common/gomp/combined-3.c: New test. * c-c++-common/gomp/critical-1.c: New test. * c-c++-common/gomp/critical-2.c: New test. * c-c++-common/gomp/default-1.c: New test. * c-c++-common/gomp/defaultmap-1.c: New test. * c-c++-common/gomp/defaultmap-2.c: New test. * c-c++-common/gomp/defaultmap-3.c: New test. * c-c++-common/gomp/depend-5.c: New test. * c-c++-common/gomp/depend-6.c: New test. * c-c++-common/gomp/depend-iterator-1.c: New test. * c-c++-common/gomp/depend-iterator-2.c: New test. * c-c++-common/gomp/depobj-1.c: New test. * c-c++-common/gomp/flush-1.c: New test. * c-c++-common/gomp/flush-2.c: New test. * c-c++-common/gomp/for-1.c: New test. * c-c++-common/gomp/for-2.c: New test. * c-c++-common/gomp/for-3.c: New test. * c-c++-common/gomp/for-4.c: New test. * c-c++-common/gomp/for-5.c: New test. * c-c++-common/gomp/for-6.c: New test. * c-c++-common/gomp/for-7.c: New test. * c-c++-common/gomp/if-1.c (foo): Add some further tests. * c-c++-common/gomp/if-2.c (foo): Likewise. Expect slightly different diagnostics wording in one case. * c-c++-common/gomp/if-3.c: New test. * c-c++-common/gomp/master-combined-1.c: New test. * c-c++-common/gomp/master-combined-2.c: New test. * c-c++-common/gomp/nontemporal-1.c: New test. * c-c++-common/gomp/nontemporal-2.c: New test. * c-c++-common/gomp/reduction-task-1.c: New test. * c-c++-common/gomp/reduction-task-2.c: New test. * c-c++-common/gomp/requires-1.c: New test. * c-c++-common/gomp/requires-2.c: New test. * c-c++-common/gomp/requires-3.c: New test. * c-c++-common/gomp/requires-4.c: New test. * c-c++-common/gomp/schedule-modifiers-1.c (bar): Don't expect diagnostics for nonmonotonic modifier with static, runtime or auto schedule kinds. * c-c++-common/gomp/simd7.c: New test. * c-c++-common/gomp/target-data-1.c: New test. * c-c++-common/gomp/taskloop-reduction-1.c: New test. * c-c++-common/gomp/taskwait-depend-1.c: New test. * c-c++-common/gomp/teams-1.c: New test. * c-c++-common/gomp/teams-2.c: New test. * gcc.dg/gomp/appendix-a/a.24.1.c: Update from OpenMP examples. Add shared(c) clause. * gcc.dg/gomp/atomic-5.c (f1): Add another expected error. * gcc.dg/gomp/clause-1.c: Adjust expected diagnostics for const qualified vars without mutable member no longer being predeterined shared. * gcc.dg/gomp/sharing-1.c: Likewise. * g++.dg/gomp/clause-3.C: Likewise. * g++.dg/gomp/member-2.C: Likewise. * g++.dg/gomp/predetermined-1.C: Likewise. * g++.dg/gomp/private-1.C: Likewise. * g++.dg/gomp/sharing-1.C: Likewise. * g++.dg/gomp/sharing-2.C: Likewise. Add a few tests with aggregate const static data member without mutable elements. * gcc.dg/gomp/for-4.c: Expected nonmonotonic functions in the dumps. * gcc.dg/gomp/for-5.c: Likewise. * gcc.dg/gomp/for-6.c: Change expected library call. * gcc.dg/gomp/pr39495-2.c (foo): Don't expect errors on !=. * gcc.dg/gomp/reduction-2.c: New test. * gcc.dg/gomp/simd-1.c: New test. * gcc.dg/gomp/teams-1.c: Adjust expected diagnostic lines. * g++.dg/gomp/atomic-18.C: New test. * g++.dg/gomp/atomic-19.C: New test. * g++.dg/gomp/atomic-5.C (f1): Adjust expected lines of read-only variable messages. Add another expected error. * g++.dg/gomp/critical-3.C: New test. * g++.dg/gomp/depend-iterator-1.C: New test. * g++.dg/gomp/depend-iterator-2.C: New test. * g++.dg/gomp/depobj-1.C: New test. * g++.dg/gomp/doacross-1.C: New test. * g++.dg/gomp/for-21.C: New test. * g++.dg/gomp/for-4.C: Expected nonmonotonic functions in the dumps. * g++.dg/gomp/for-5.C: Likewise. * g++.dg/gomp/for-6.C: Change expected library call. * g++.dg/gomp/loop-4.C: New test. * g++.dg/gomp/pr33372-1.C: Adjust location of the expected diagnostics. * g++.dg/gomp/pr33372-3.C: Likewise. * g++.dg/gomp/pr39495-2.C (foo): Don't expect errors on !=. * g++.dg/gomp/simd-2.C: New test. * g++.dg/gomp/tpl-atomic-2.C: Adjust expected diagnostic lines. include/ * gomp-constants.h (GOMP_TASK_FLAG_REDUCTION, GOMP_DEPEND_IN, GOMP_DEPEND_OUT, GOMP_DEPEND_INOUT, GOMP_DEPEND_MUTEXINOUTSET): Define. libgomp/ * affinity.c (gomp_display_affinity_place): New function. * affinity-fmt.c: New file. * alloc.c (gomp_aligned_alloc, gomp_aligned_free): New functions. * config/linux/affinity.c (gomp_display_affinity_place): New function. * config/nvptx/icv-device.c (omp_get_num_teams, omp_get_team_num): Move these functions to ... * config/nvptx/teams.c: ... here. New file. * config/nvptx/target.c (omp_pause_resource, omp_pause_resource_all): New functions. * config/nvptx/team.c (gomp_team_start, gomp_pause_host): New functions. * configure.ac: Check for aligned_alloc, posix_memalign, memalign and _aligned_malloc. (HAVE_UNAME, HAVE_GETHOSTNAME, HAVE_GETPID): Add new tests. * configure.tgt: Add -DUSING_INITIAL_EXEC_TLS to XCFLAGS for Linux. * env.c (gomp_display_affinity_var, gomp_affinity_format_var, gomp_affinity_format_len): New variables. (parse_schedule): Parse monotonic and nonmonotonic modifiers in OMP_SCHEDULE variable. Set GFS_MONOTONIC for monotonic schedules. (handle_omp_display_env): Display monotonic/nonmonotonic schedule modifiers. Display (non-default) chunk sizes. Print OMP_DISPLAY_AFFINITY and OMP_AFFINITY_FORMAT. (initialize_env): Don't call pthread_attr_setdetachstate. Handle OMP_DISPLAY_AFFINITY and OMP_AFFINITY_FORMAT env vars. * fortran.c: Include stdio.h and string.h. (omp_pause_resource, omp_pause_resource_all): Add ialias_redirect. (omp_get_schedule_, omp_get_schedule_8_): Mask off GFS_MONOTONIC bit. (omp_set_affinity_format_, omp_get_affinity_format_, omp_display_affinity_, omp_capture_affinity_, omp_pause_resource_, omp_pause_resource_all_): New functions. * icv.c (omp_set_schedule): Mask off omp_sched_monotonic bit in switch. * icv-device.c (omp_get_num_teams, omp_get_team_num): Move these functions to ... * teams.c: ... here. New file. * libgomp_g.h: Include gstdint.h. (GOMP_loop_nonmonotonic_runtime_start, GOMP_loop_maybe_nonmonotonic_runtime_start, GOMP_loop_start, GOMP_loop_ordered_start, GOMP_loop_nonmonotonic_runtime_next, GOMP_loop_maybe_nonmonotonic_runtime_next, GOMP_loop_doacross_start, GOMP_parallel_loop_nonmonotonic_runtime, GOMP_parallel_loop_maybe_nonmonotonic_runtime, GOMP_loop_ull_nonmonotonic_runtime_start, GOMP_loop_ull_maybe_nonmonotonic_runtime_start, GOMP_loop_ull_start, GOMP_loop_ull_ordered_start, GOMP_loop_ull_nonmonotonic_runtime_next, GOMP_loop_ull_maybe_nonmonotonic_runtime_next, GOMP_loop_ull_doacross_start, GOMP_parallel_reductions, GOMP_taskwait_depend, GOMP_taskgroup_reduction_register, GOMP_taskgroup_reduction_unregister, GOMP_task_reduction_remap, GOMP_workshare_task_reduction_unregister, GOMP_sections2_start, GOMP_teams_reg): Declare. * libgomp.h (GOMP_HAVE_EFFICIENT_ALIGNED_ALLOC): Define unless gomp_aligned_alloc uses fallback implementation. (gomp_aligned_alloc, gomp_aligned_free): Declare. (enum gomp_schedule_type): Add GFS_MONOTONIC. (struct gomp_doacross_work_share): Add extra field. (struct gomp_work_share): Add task_reductions field. (struct gomp_taskgroup): Add workshare and reductions fields. (GOMP_NEEDS_THREAD_HANDLE): Define if needed. (gomp_thread_handle): New typedef. (gomp_display_affinity_place, gomp_set_affinity_format, gomp_display_string, gomp_display_affinity, gomp_display_affinity_thread): Declare. (gomp_doacross_init, gomp_doacross_ull_init): Add size_t argument. (gomp_parallel_reduction_register, gomp_workshare_taskgroup_start, gomp_workshare_task_reduction_register): Declare. (gomp_team_start): Add taskgroup argument. (gomp_pause_host): Declare. (gomp_init_work_share, gomp_work_share_start): Change bool argument to size_t. (gomp_thread_self, gomp_thread_to_pthread_t): New inline functions. * libgomp.map (GOMP_5.0): Export GOMP_loop_start, GOMP_loop_ordered_start, GOMP_loop_doacross_start, GOMP_loop_ull_start, GOMP_loop_ull_ordered_start, GOMP_loop_ull_doacross_start, GOMP_workshare_task_reduction_unregister, GOMP_sections2_start, GOMP_loop_maybe_nonmonotonic_runtime_next, GOMP_loop_maybe_nonmonotonic_runtime_start, GOMP_loop_nonmonotonic_runtime_next, GOMP_loop_nonmonotonic_runtime_start, GOMP_loop_ull_maybe_nonmonotonic_runtime_next, GOMP_loop_ull_maybe_nonmonotonic_runtime_start, GOMP_loop_ull_nonmonotonic_runtime_next, GOMP_loop_ull_nonmonotonic_runtime_start, GOMP_parallel_loop_maybe_nonmonotonic_runtime, GOMP_parallel_loop_nonmonotonic_runtime, GOMP_parallel_reductions, GOMP_taskgroup_reduction_register, GOMP_taskgroup_reduction_unregister, GOMP_task_reduction_remap, GOMP_teams_reg and GOMP_taskwait_depend. (OMP_5.0): Export omp_pause_resource{,_all}{,_}, omp_{capture,display}_affinity{,_}, and omp_[gs]et_affinity_format{,_}. * loop.c: Include string.h. (GOMP_loop_runtime_next): Add ialias. (GOMP_taskgroup_reduction_register): Add ialias_redirect. (gomp_loop_static_start, gomp_loop_dynamic_start, gomp_loop_guided_start, gomp_loop_ordered_static_start, gomp_loop_ordered_dynamic_start, gomp_loop_ordered_guided_start, gomp_loop_doacross_static_start, gomp_loop_doacross_dynamic_start, gomp_loop_doacross_guided_start): Adjust gomp_work_share_start or gomp_doacross_init callers. (gomp_adjust_sched, GOMP_loop_start, GOMP_loop_ordered_start, GOMP_loop_doacross_start): New functions. (GOMP_loop_runtime_start, GOMP_loop_ordered_runtime_start, GOMP_loop_doacross_runtime_start, GOMP_parallel_loop_runtime_start): Mask off GFS_MONOTONIC bit. (GOMP_loop_maybe_nonmonotonic_runtime_next, GOMP_loop_maybe_nonmonotonic_runtime_start, GOMP_loop_nonmonotonic_runtime_next, GOMP_loop_nonmonotonic_runtime_start, GOMP_parallel_loop_maybe_nonmonotonic_runtime, GOMP_parallel_loop_nonmonotonic_runtime): New aliases or wrapper functions. (gomp_parallel_loop_start): Pass NULL as taskgroup to gomp_team_start. * loop_ull.c: Include string.h. (GOMP_loop_ull_runtime_next): Add ialias. (GOMP_taskgroup_reduction_register): Add ialias_redirect. (gomp_loop_ull_static_start, gomp_loop_ull_dynamic_start, gomp_loop_ull_guided_start, gomp_loop_ull_ordered_static_start, gomp_loop_ull_ordered_dynamic_start, gomp_loop_ull_ordered_guided_start, gomp_loop_ull_doacross_static_start, gomp_loop_ull_doacross_dynamic_start, gomp_loop_ull_doacross_guided_start): Adjust gomp_work_share_start and gomp_doacross_ull_init callers. (gomp_adjust_sched, GOMP_loop_ull_start, GOMP_loop_ull_ordered_start, GOMP_loop_ull_doacross_start): New functions. (GOMP_loop_ull_runtime_start, GOMP_loop_ull_ordered_runtime_start, GOMP_loop_ull_doacross_runtime_start): Mask off GFS_MONOTONIC bit. (GOMP_loop_ull_maybe_nonmonotonic_runtime_next, GOMP_loop_ull_maybe_nonmonotonic_runtime_start, GOMP_loop_ull_nonmonotonic_runtime_next, GOMP_loop_ull_nonmonotonic_runtime_start): Likewise. * Makefile.am (libgomp_la_SOURCES): Add teams.c and affinity-fmt.c. * omp.h.in (enum omp_sched_t): Add omp_sched_monotonic. (omp_pause_resource_t, omp_depend_t): New typedefs. (enum omp_lock_hint_t): Renamed to ... (enum omp_sync_hint_t): ... this. Define omp_sync_hint_* enumerators using numbers and omp_lock_hint_* as their aliases. (omp_lock_hint_t): New typedef. Rename to ... (omp_sync_hint_t): ... this. (omp_init_lock_with_hint, omp_init_nest_lock_with_hint): Use omp_sync_hint_t instead of omp_lock_hint_t. (omp_pause_resource, omp_pause_resource_all, omp_set_affinity_format, omp_get_affinity_format, omp_display_affinity, omp_capture_affinity): Declare. (omp_target_is_present, omp_target_disassociate_ptr): Change first argument from void * to const void *. (omp_target_memcpy, omp_target_memcpy_rect): Change second argument from void * to const void *. (omp_target_associate_ptr): Change first and second arguments from void * to const void *. * omp_lib.f90.in (omp_pause_resource_kind, omp_pause_soft, omp_pause_hard): New parameters. (omp_pause_resource, omp_pause_resource_all, omp_set_affinity_format, omp_get_affinity_format, omp_display_affinity, omp_capture_affinity): New interfaces. * omp_lib.h.in (omp_pause_resource_kind, omp_pause_soft, omp_pause_hard): New parameters. (omp_pause_resource, omp_pause_resource_all, omp_set_affinity_format, omp_get_affinity_format, omp_display_affinity, omp_capture_affinity): New externals. * ordered.c (gomp_doacross_init, gomp_doacross_ull_init): Add EXTRA argument. If not needed to prepare array, if extra is 0, clear ws->doacross, otherwise allocate just doacross structure and extra payload. If array is needed, allocate also extra payload. (GOMP_doacross_post, GOMP_doacross_wait, GOMP_doacross_ull_post, GOMP_doacross_ull_wait): Handle doacross->array == NULL like doacross == NULL. * parallel.c (GOMP_parallel_start): Pass NULL as taskgroup to gomp_team_start. (GOMP_parallel): Likewise. Formatting fix. (GOMP_parallel_reductions): New function. (GOMP_cancellation_point): If taskgroup has workshare flag set, check cancelled of prev taskgroup if any. (GOMP_cancel): If taskgroup has workshare flag set, set cancelled on prev taskgroup if any. * sections.c: Include string.h. (GOMP_taskgroup_reduction_register): Add ialias_redirect. (GOMP_sections_start): Adjust gomp_work_share_start caller. (GOMP_sections2_start): New function. (GOMP_parallel_sections_start, GOMP_parallel_sections): Pass NULL as taskgroup to gomp_team_start. * single.c (GOMP_single_start, GOMP_single_copy_start): Adjust gomp_work_share_start callers. * target.c (GOMP_target_update_ext, GOMP_target_enter_exit_data): If taskgroup has workshare flag set, check cancelled on prev taskgroup if any. Guard all cancellation tests with gomp_cancel_var test. (omp_target_is_present, omp_target_disassociate_ptr): Change ptr argument from void * to const void *. (omp_target_memcpy): Change src argument from void * to const void *. (omp_target_memcpy_rect): Likewise. (omp_target_memcpy_rect_worker): Likewise. Use const char * casts instead of char * where needed. (omp_target_associate_ptr): Change host_ptr and device_ptr arguments from void * to const void *. (omp_pause_resource, omp_pause_resource_all): New functions. * task.c (gomp_task_handle_depend): Handle new depend array format in addition to the old. Handle mutexinoutset kinds the same as inout for now, handle unspecified kinds. (gomp_create_target_task): If taskgroup has workshare flag set, check cancelled on prev taskgroup if any. Guard all cancellation tests with gomp_cancel_var test. Handle new depend array format count in addition to the old. (GOMP_task): Likewise. Adjust function comment. (gomp_task_run_pre): If taskgroup has workshare flag set, check cancelled on prev taskgroup if any. Guard all cancellation tests with gomp_cancel_var test. (GOMP_taskwait_depend): New function. (gomp_task_maybe_wait_for_dependencies): Handle new depend array format in addition to the old. Handle mutexinoutset kinds the same as inout for now, handle unspecified kinds. Fix a function comment typo. (gomp_taskgroup_init): New function. (GOMP_taskgroup_start): Use it. (gomp_reduction_register, gomp_create_artificial_team, GOMP_taskgroup_reduction_register, GOMP_taskgroup_reduction_unregister, GOMP_task_reduction_remap, gomp_parallel_reduction_register, gomp_workshare_task_reduction_register, gomp_workshare_taskgroup_start, GOMP_workshare_task_reduction_unregister): New functions. * taskloop.c (GOMP_taskloop): If taskgroup has workshare flag set, check cancelled on prev taskgroup if any. Guard all cancellation tests with gomp_cancel_var test. Handle GOMP_TASK_FLAG_REDUCTION flag by calling GOMP_taskgroup_reduction_register. * team.c (gomp_thread_attr): Remove comment. (struct gomp_thread_start_data): Add handle field. (gomp_thread_start): Call pthread_detach. (gomp_new_team): Adjust gomp_init_work_share caller. (gomp_free_pool_helper): Call pthread_detach. (gomp_team_start): Add taskgroup argument, initialize implicit tasks' taskgroup field to that. Don't call pthread_attr_setdetachstate. Handle OMP_DISPLAY_AFFINITY env var. (gomp_team_end): Determine nesting by thr->ts.level != 0 rather than thr->ts.team != NULL. (gomp_pause_pool_helper, gomp_pause_host): New functions. * work.c (alloc_work_share): Use gomp_aligned_alloc instead of gomp_malloc if GOMP_HAVE_EFFICIENT_ALIGNED_ALLOC is defined. (gomp_init_work_share): Change ORDERED argument from bool to size_t, if more than 1 allocate also extra payload at the end of array. Never keep ordered_team_ids NULL, set it to inline_ordered_team_ids instead. (gomp_work_share_start): Change ORDERED argument from bool to size_t, return true instead of ws. * Makefile.in: Regenerated. * configure: Regenerated. * config.h.in: Regenerated. * testsuite/libgomp.c/cancel-for-2.c (foo): Use cancel modifier in some cases. * testsuite/libgomp.c-c++-common/cancel-parallel-1.c: New test. * testsuite/libgomp.c-c++-common/cancel-taskgroup-3.c: New test. * testsuite/libgomp.c-c++-common/depend-iterator-1.c: New test. * testsuite/libgomp.c-c++-common/depend-iterator-2.c: New test. * testsuite/libgomp.c-c++-common/depend-mutexinout-1.c: New test. * testsuite/libgomp.c-c++-common/depend-mutexinout-2.c: New test. * testsuite/libgomp.c-c++-common/depobj-1.c: New test. * testsuite/libgomp.c-c++-common/display-affinity-1.c: New test. * testsuite/libgomp.c-c++-common/for-10.c: New test. * testsuite/libgomp.c-c++-common/for-11.c: New test. * testsuite/libgomp.c-c++-common/for-12.c: New test. * testsuite/libgomp.c-c++-common/for-13.c: New test. * testsuite/libgomp.c-c++-common/for-14.c: New test. * testsuite/libgomp.c-c++-common/for-15.c: New test. * testsuite/libgomp.c-c++-common/for-2.h: If CONDNE macro is defined, define a different N(test), don't define N(f0) to N(f14), but instead define N(f20) to N(f34) using != comparisons. * testsuite/libgomp.c-c++-common/for-7.c: New test. * testsuite/libgomp.c-c++-common/for-8.c: New test. * testsuite/libgomp.c-c++-common/for-9.c: New test. * testsuite/libgomp.c-c++-common/master-combined-1.c: New test. * testsuite/libgomp.c-c++-common/pause-1.c: New test. * testsuite/libgomp.c-c++-common/pause-2.c: New test. * testsuite/libgomp.c-c++-common/pr66199-10.c: New test. * testsuite/libgomp.c-c++-common/pr66199-11.c: New test. * testsuite/libgomp.c-c++-common/pr66199-12.c: New test. * testsuite/libgomp.c-c++-common/pr66199-13.c: New test. * testsuite/libgomp.c-c++-common/pr66199-14.c: New test. * testsuite/libgomp.c-c++-common/simd-1.c: New test. * testsuite/libgomp.c-c++-common/taskloop-reduction-1.c: New test. * testsuite/libgomp.c-c++-common/taskloop-reduction-2.c: New test. * testsuite/libgomp.c-c++-common/taskloop-reduction-3.c: New test. * testsuite/libgomp.c-c++-common/taskloop-reduction-4.c: New test. * testsuite/libgomp.c-c++-common/task-reduction-11.c: New test. * testsuite/libgomp.c-c++-common/task-reduction-12.c: New test. * testsuite/libgomp.c-c++-common/task-reduction-1.c: New test. * testsuite/libgomp.c-c++-common/task-reduction-2.c: New test. * testsuite/libgomp.c-c++-common/task-reduction-3.c: New test. * testsuite/libgomp.c-c++-common/task-reduction-4.c: New test. * testsuite/libgomp.c-c++-common/task-reduction-5.c: New test. * testsuite/libgomp.c-c++-common/task-reduction-6.c: New test. * testsuite/libgomp.c-c++-common/task-reduction-7.c: New test. * testsuite/libgomp.c-c++-common/task-reduction-8.c: New test. * testsuite/libgomp.c-c++-common/task-reduction-9.c: New test. * testsuite/libgomp.c-c++-common/taskwait-depend-1.c: New test. * testsuite/libgomp.c++/depend-1.C: New test. * testsuite/libgomp.c++/depend-iterator-1.C: New test. * testsuite/libgomp.c++/depobj-1.C: New test. * testsuite/libgomp.c++/for-16.C: New test. * testsuite/libgomp.c++/for-21.C: New test. * testsuite/libgomp.c++/for-22.C: New test. * testsuite/libgomp.c++/for-23.C: New test. * testsuite/libgomp.c++/for-24.C: New test. * testsuite/libgomp.c++/for-25.C: New test. * testsuite/libgomp.c++/for-26.C: New test. * testsuite/libgomp.c++/taskloop-reduction-1.C: New test. * testsuite/libgomp.c++/taskloop-reduction-2.C: New test. * testsuite/libgomp.c++/taskloop-reduction-3.C: New test. * testsuite/libgomp.c++/taskloop-reduction-4.C: New test. * testsuite/libgomp.c++/task-reduction-10.C: New test. * testsuite/libgomp.c++/task-reduction-11.C: New test. * testsuite/libgomp.c++/task-reduction-12.C: New test. * testsuite/libgomp.c++/task-reduction-13.C: New test. * testsuite/libgomp.c++/task-reduction-14.C: New test. * testsuite/libgomp.c++/task-reduction-15.C: New test. * testsuite/libgomp.c++/task-reduction-16.C: New test. * testsuite/libgomp.c++/task-reduction-17.C: New test. * testsuite/libgomp.c++/task-reduction-18.C: New test. * testsuite/libgomp.c++/task-reduction-19.C: New test. * testsuite/libgomp.c/task-reduction-1.c: New test. * testsuite/libgomp.c++/task-reduction-1.C: New test. * testsuite/libgomp.c/task-reduction-2.c: New test. * testsuite/libgomp.c++/task-reduction-2.C: New test. * testsuite/libgomp.c++/task-reduction-3.C: New test. * testsuite/libgomp.c++/task-reduction-4.C: New test. * testsuite/libgomp.c++/task-reduction-5.C: New test. * testsuite/libgomp.c++/task-reduction-6.C: New test. * testsuite/libgomp.c++/task-reduction-7.C: New test. * testsuite/libgomp.c++/task-reduction-8.C: New test. * testsuite/libgomp.c++/task-reduction-9.C: New test. * testsuite/libgomp.c/teams-1.c: New test. * testsuite/libgomp.c/teams-2.c: New test. * testsuite/libgomp.c/thread-limit-4.c: New test. * testsuite/libgomp.c/thread-limit-5.c: New test. * testsuite/libgomp.fortran/display-affinity-1.f90: New test. From-SVN: r265930
Diffstat (limited to 'gcc/omp-low.c')
-rw-r--r--gcc/omp-low.c1878
1 files changed, 1719 insertions, 159 deletions
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index b06ddb3..ca78d7a 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -114,6 +114,15 @@ struct omp_context
otherwise. */
gimple *simt_stmt;
+ /* For task reductions registered in this context, a vector containing
+ the length of the private copies block (if constant, otherwise NULL)
+ and then offsets (if constant, otherwise NULL) for each entry. */
+ vec<tree> task_reductions;
+
+ /* And a hash map from the reduction clauses to the registered array
+ elts. */
+ hash_map<tree, unsigned> *task_reduction_map;
+
/* Nesting depth of this context. Used to beautify error messages re
invalid gotos. The outermost ctx is depth 1, with depth 0 being
reserved for the main body of the function. */
@@ -280,12 +289,23 @@ is_taskloop_ctx (omp_context *ctx)
}
-/* Return true if CTX is for an omp parallel or omp task. */
+/* Return true if CTX is for a host omp teams. */
+
+static inline bool
+is_host_teams_ctx (omp_context *ctx)
+{
+ return gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS
+ && gimple_omp_teams_host (as_a <gomp_teams *> (ctx->stmt));
+}
+
+/* Return true if CTX is for an omp parallel or omp task or host omp teams
+ (the last one is strictly not a task region in OpenMP speak, but we
+ need to treat it similarly). */
static inline bool
is_taskreg_ctx (omp_context *ctx)
{
- return is_parallel_ctx (ctx) || is_task_ctx (ctx);
+ return is_parallel_ctx (ctx) || is_task_ctx (ctx) || is_host_teams_ctx (ctx);
}
/* Return true if EXPR is variable sized. */
@@ -371,7 +391,7 @@ use_pointer_for_field (tree decl, omp_context *shared_ctx)
be passing an address in this case? Should we simply assert
this to be false, or should we have a cleanup pass that removes
these from the list of mappings? */
- if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
+ if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, shared_ctx)))
return true;
/* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
@@ -522,6 +542,9 @@ build_outer_var_ref (tree var, omp_context *ctx,
enum omp_clause_code code = OMP_CLAUSE_ERROR)
{
tree x;
+ omp_context *outer = ctx->outer;
+ while (outer && gimple_code (outer->stmt) == GIMPLE_OMP_TASKGROUP)
+ outer = outer->outer;
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
x = var;
@@ -548,44 +571,43 @@ build_outer_var_ref (tree var, omp_context *ctx,
Similarly for OMP_CLAUSE_PRIVATE with outer ref, that can refer
to private vars in all worksharing constructs. */
x = NULL_TREE;
- if (ctx->outer && is_taskreg_ctx (ctx))
- x = lookup_decl (var, ctx->outer);
- else if (ctx->outer)
+ if (outer && is_taskreg_ctx (outer))
+ x = lookup_decl (var, outer);
+ else if (outer)
x = maybe_lookup_decl_in_outer_ctx (var, ctx);
if (x == NULL_TREE)
x = var;
}
else if (code == OMP_CLAUSE_LASTPRIVATE && is_taskloop_ctx (ctx))
{
- gcc_assert (ctx->outer);
+ gcc_assert (outer);
splay_tree_node n
- = splay_tree_lookup (ctx->outer->field_map,
+ = splay_tree_lookup (outer->field_map,
(splay_tree_key) &DECL_UID (var));
if (n == NULL)
{
- if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx->outer)))
+ if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, outer)))
x = var;
else
- x = lookup_decl (var, ctx->outer);
+ x = lookup_decl (var, outer);
}
else
{
tree field = (tree) n->value;
/* If the receiver record type was remapped in the child function,
remap the field into the new record type. */
- x = maybe_lookup_field (field, ctx->outer);
+ x = maybe_lookup_field (field, outer);
if (x != NULL)
field = x;
- x = build_simple_mem_ref (ctx->outer->receiver_decl);
+ x = build_simple_mem_ref (outer->receiver_decl);
x = omp_build_component_ref (x, field);
- if (use_pointer_for_field (var, ctx->outer))
+ if (use_pointer_for_field (var, outer))
x = build_simple_mem_ref (x);
}
}
- else if (ctx->outer)
+ else if (outer)
{
- omp_context *outer = ctx->outer;
if (gimple_code (outer->stmt) == GIMPLE_OMP_GRID_BODY)
{
outer = outer->outer;
@@ -925,6 +947,12 @@ delete_omp_context (splay_tree_value value)
if (is_task_ctx (ctx))
finalize_task_copyfn (as_a <gomp_task *> (ctx->stmt));
+ if (ctx->task_reduction_map)
+ {
+ ctx->task_reductions.release ();
+ delete ctx->task_reduction_map;
+ }
+
XDELETE (ctx);
}
@@ -1011,8 +1039,10 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
case OMP_CLAUSE_SHARED:
decl = OMP_CLAUSE_DECL (c);
- /* Ignore shared directives in teams construct. */
- if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
+ /* Ignore shared directives in teams construct inside of
+ target construct. */
+ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS
+ && !is_host_teams_ctx (ctx))
{
/* Global variables don't need to be copied,
the receiver side will use them directly. */
@@ -1050,9 +1080,9 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
goto do_private;
case OMP_CLAUSE_REDUCTION:
+ case OMP_CLAUSE_IN_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
- if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
- && TREE_CODE (decl) == MEM_REF)
+ if (TREE_CODE (decl) == MEM_REF)
{
tree t = TREE_OPERAND (decl, 0);
if (TREE_CODE (t) == POINTER_PLUS_EXPR)
@@ -1062,14 +1092,52 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
t = TREE_OPERAND (t, 0);
install_var_local (t, ctx);
if (is_taskreg_ctx (ctx)
- && !is_global_var (maybe_lookup_decl_in_outer_ctx (t, ctx))
- && !is_variable_sized (t))
+ && (!is_global_var (maybe_lookup_decl_in_outer_ctx (t, ctx))
+ || (is_task_ctx (ctx)
+ && (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
+ || (TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE
+ && (TREE_CODE (TREE_TYPE (TREE_TYPE (t)))
+ == POINTER_TYPE)))))
+ && !is_variable_sized (t)
+ && (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
+ || (!OMP_CLAUSE_REDUCTION_TASK (c)
+ && !is_task_ctx (ctx))))
{
- by_ref = use_pointer_for_field (t, ctx);
- install_var_field (t, by_ref, 3, ctx);
+ by_ref = use_pointer_for_field (t, NULL);
+ if (is_task_ctx (ctx)
+ && TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (t))) == POINTER_TYPE)
+ {
+ install_var_field (t, false, 1, ctx);
+ install_var_field (t, by_ref, 2, ctx);
+ }
+ else
+ install_var_field (t, by_ref, 3, ctx);
}
break;
}
+ if (is_task_ctx (ctx)
+ || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
+ && OMP_CLAUSE_REDUCTION_TASK (c)
+ && is_parallel_ctx (ctx)))
+ {
+ /* Global variables don't need to be copied,
+ the receiver side will use them directly. */
+ if (!is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
+ {
+ by_ref = use_pointer_for_field (decl, ctx);
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION)
+ install_var_field (decl, by_ref, 3, ctx);
+ }
+ install_var_local (decl, ctx);
+ break;
+ }
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
+ && OMP_CLAUSE_REDUCTION_TASK (c))
+ {
+ install_var_local (decl, ctx);
+ break;
+ }
goto do_private;
case OMP_CLAUSE_LASTPRIVATE:
@@ -1142,6 +1210,7 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
goto do_private;
case OMP_CLAUSE__LOOPTEMP_:
+ case OMP_CLAUSE__REDUCTEMP_:
gcc_assert (is_taskreg_ctx (ctx));
decl = OMP_CLAUSE_DECL (c);
install_var_field (decl, false, 3, ctx);
@@ -1323,8 +1392,10 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
case OMP_CLAUSE_TILE:
case OMP_CLAUSE__SIMT_:
case OMP_CLAUSE_DEFAULT:
+ case OMP_CLAUSE_NONTEMPORAL:
case OMP_CLAUSE_IF_PRESENT:
case OMP_CLAUSE_FINALIZE:
+ case OMP_CLAUSE_TASK_REDUCTION:
break;
case OMP_CLAUSE_ALIGNED:
@@ -1382,6 +1453,7 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
break;
case OMP_CLAUSE_REDUCTION:
+ case OMP_CLAUSE_IN_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
if (TREE_CODE (decl) != MEM_REF)
{
@@ -1393,9 +1465,16 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
scan_array_reductions = true;
break;
+ case OMP_CLAUSE_TASK_REDUCTION:
+ if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
+ scan_array_reductions = true;
+ break;
+
case OMP_CLAUSE_SHARED:
- /* Ignore shared directives in teams construct. */
- if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
+ /* Ignore shared directives in teams construct inside of
+ target construct. */
+ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS
+ && !is_host_teams_ctx (ctx))
break;
decl = OMP_CLAUSE_DECL (c);
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
@@ -1472,6 +1551,7 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
case OMP_CLAUSE_ALIGNED:
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE__LOOPTEMP_:
+ case OMP_CLAUSE__REDUCTEMP_:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_PRIORITY:
@@ -1482,6 +1562,7 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_USE_DEVICE_PTR:
+ case OMP_CLAUSE_NONTEMPORAL:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_NUM_GANGS:
@@ -1511,7 +1592,9 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
if (scan_array_reductions)
{
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
- if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
+ if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
@@ -1700,7 +1783,7 @@ omp_find_combined_for (gimple_stmt_iterator *gsi_p,
return NULL;
}
-/* Add _LOOPTEMP_ clauses on OpenMP parallel or task. */
+/* Add _LOOPTEMP_/_REDUCTEMP_ clauses on OpenMP parallel or task. */
static void
add_taskreg_looptemp_clauses (enum gf_mask msk, gimple *stmt,
@@ -1747,6 +1830,18 @@ add_taskreg_looptemp_clauses (enum gf_mask msk, gimple *stmt,
gimple_omp_taskreg_set_clauses (stmt, c);
}
}
+ if (msk == GF_OMP_FOR_KIND_TASKLOOP
+ && omp_find_clause (gimple_omp_task_clauses (stmt),
+ OMP_CLAUSE_REDUCTION))
+ {
+ tree type = build_pointer_type (pointer_sized_int_node);
+ tree temp = create_tmp_var (type);
+ tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__REDUCTEMP_);
+ insert_decl_map (&outer_ctx->cb, temp, temp);
+ OMP_CLAUSE_DECL (c) = temp;
+ OMP_CLAUSE_CHAIN (c) = gimple_omp_task_clauses (stmt);
+ gimple_omp_task_set_clauses (stmt, c);
+ }
}
/* Scan an OpenMP parallel directive. */
@@ -1771,6 +1866,23 @@ scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
if (gimple_omp_parallel_combined_p (stmt))
add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_FOR, stmt, outer_ctx);
+ for (tree c = omp_find_clause (gimple_omp_parallel_clauses (stmt),
+ OMP_CLAUSE_REDUCTION);
+ c; c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE_REDUCTION))
+ if (OMP_CLAUSE_REDUCTION_TASK (c))
+ {
+ tree type = build_pointer_type (pointer_sized_int_node);
+ tree temp = create_tmp_var (type);
+ tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__REDUCTEMP_);
+ if (outer_ctx)
+ insert_decl_map (&outer_ctx->cb, temp, temp);
+ OMP_CLAUSE_DECL (c) = temp;
+ OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
+ gimple_omp_parallel_set_clauses (stmt, c);
+ break;
+ }
+ else if (OMP_CLAUSE_CHAIN (c) == NULL_TREE)
+ break;
ctx = new_omp_context (stmt, outer_ctx);
taskreg_contexts.safe_push (ctx);
@@ -1810,6 +1922,7 @@ scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
/* Ignore task directives with empty bodies, unless they have depend
clause. */
if (optimize > 0
+ && gimple_omp_body (stmt)
&& empty_body_p (gimple_omp_body (stmt))
&& !omp_find_clause (gimple_omp_task_clauses (stmt), OMP_CLAUSE_DEPEND))
{
@@ -1821,6 +1934,13 @@ scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_TASKLOOP, stmt, outer_ctx);
ctx = new_omp_context (stmt, outer_ctx);
+
+ if (gimple_omp_task_taskwait_p (stmt))
+ {
+ scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
+ return;
+ }
+
taskreg_contexts.safe_push (ctx);
if (taskreg_nesting_level > 1)
ctx->is_nested = true;
@@ -1897,7 +2017,7 @@ finish_taskreg_scan (omp_context *ctx)
return;
/* If any task_shared_vars were needed, verify all
- OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
+ OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK,TEAMS}
statements if use_pointer_for_field hasn't changed
because of that. If it did, update field types now. */
if (task_shared_vars)
@@ -1943,6 +2063,30 @@ finish_taskreg_scan (omp_context *ctx)
if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
{
+ tree clauses = gimple_omp_parallel_clauses (ctx->stmt);
+ tree c = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_);
+ if (c)
+ {
+ /* Move the _reductemp_ clause first. GOMP_parallel_reductions
+ expects to find it at the start of data. */
+ tree f = lookup_field (OMP_CLAUSE_DECL (c), ctx);
+ tree *p = &TYPE_FIELDS (ctx->record_type);
+ while (*p)
+ if (*p == f)
+ {
+ *p = DECL_CHAIN (*p);
+ break;
+ }
+ else
+ p = &DECL_CHAIN (*p);
+ DECL_CHAIN (f) = TYPE_FIELDS (ctx->record_type);
+ TYPE_FIELDS (ctx->record_type) = f;
+ }
+ layout_type (ctx->record_type);
+ fixup_child_record_type (ctx);
+ }
+ else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
+ {
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
}
@@ -1969,33 +2113,50 @@ finish_taskreg_scan (omp_context *ctx)
/* Move fields corresponding to first and second _looptemp_
clause first. There are filled by GOMP_taskloop
and thus need to be in specific positions. */
- tree c1 = gimple_omp_task_clauses (ctx->stmt);
- c1 = omp_find_clause (c1, OMP_CLAUSE__LOOPTEMP_);
+ tree clauses = gimple_omp_task_clauses (ctx->stmt);
+ tree c1 = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
tree c2 = omp_find_clause (OMP_CLAUSE_CHAIN (c1),
OMP_CLAUSE__LOOPTEMP_);
+ tree c3 = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_);
tree f1 = lookup_field (OMP_CLAUSE_DECL (c1), ctx);
tree f2 = lookup_field (OMP_CLAUSE_DECL (c2), ctx);
+ tree f3 = c3 ? lookup_field (OMP_CLAUSE_DECL (c3), ctx) : NULL_TREE;
p = &TYPE_FIELDS (ctx->record_type);
while (*p)
- if (*p == f1 || *p == f2)
+ if (*p == f1 || *p == f2 || *p == f3)
*p = DECL_CHAIN (*p);
else
p = &DECL_CHAIN (*p);
DECL_CHAIN (f1) = f2;
- DECL_CHAIN (f2) = TYPE_FIELDS (ctx->record_type);
+ if (c3)
+ {
+ DECL_CHAIN (f2) = f3;
+ DECL_CHAIN (f3) = TYPE_FIELDS (ctx->record_type);
+ }
+ else
+ DECL_CHAIN (f2) = TYPE_FIELDS (ctx->record_type);
TYPE_FIELDS (ctx->record_type) = f1;
if (ctx->srecord_type)
{
f1 = lookup_sfield (OMP_CLAUSE_DECL (c1), ctx);
f2 = lookup_sfield (OMP_CLAUSE_DECL (c2), ctx);
+ if (c3)
+ f3 = lookup_sfield (OMP_CLAUSE_DECL (c3), ctx);
p = &TYPE_FIELDS (ctx->srecord_type);
while (*p)
- if (*p == f1 || *p == f2)
+ if (*p == f1 || *p == f2 || *p == f3)
*p = DECL_CHAIN (*p);
else
p = &DECL_CHAIN (*p);
DECL_CHAIN (f1) = f2;
DECL_CHAIN (f2) = TYPE_FIELDS (ctx->srecord_type);
+ if (c3)
+ {
+ DECL_CHAIN (f2) = f3;
+ DECL_CHAIN (f3) = TYPE_FIELDS (ctx->srecord_type);
+ }
+ else
+ DECL_CHAIN (f2) = TYPE_FIELDS (ctx->srecord_type);
TYPE_FIELDS (ctx->srecord_type) = f1;
}
}
@@ -2154,7 +2315,7 @@ scan_omp_for (gomp_for *stmt, omp_context *outer_ctx)
if (tgt && is_oacc_kernels (tgt))
{
- /* Strip out reductions, as they are not handled yet. */
+ /* Strip out reductions, as they are not handled yet. */
tree *prev_ptr = &clauses;
while (tree probe = *prev_ptr)
@@ -2321,8 +2482,32 @@ static void
scan_omp_teams (gomp_teams *stmt, omp_context *outer_ctx)
{
omp_context *ctx = new_omp_context (stmt, outer_ctx);
+
+ if (!gimple_omp_teams_host (stmt))
+ {
+ scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
+ scan_omp (gimple_omp_body_ptr (stmt), ctx);
+ return;
+ }
+ taskreg_contexts.safe_push (ctx);
+ gcc_assert (taskreg_nesting_level == 1);
+ ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
+ ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
+ tree name = create_tmp_var_name (".omp_data_s");
+ name = build_decl (gimple_location (stmt),
+ TYPE_DECL, name, ctx->record_type);
+ DECL_ARTIFICIAL (name) = 1;
+ DECL_NAMELESS (name) = 1;
+ TYPE_NAME (ctx->record_type) = name;
+ TYPE_ARTIFICIAL (ctx->record_type) = 1;
+ create_omp_child_function (ctx, false);
+ gimple_omp_teams_set_child_fn (stmt, ctx->cb.dst_fn);
+
scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
+
+ if (TYPE_FIELDS (ctx->record_type) == NULL)
+ ctx->record_type = ctx->receiver_decl = NULL;
}
/* Check nesting restrictions. */
@@ -2388,9 +2573,13 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
return true;
}
}
+ else if (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD
+ || gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE)
+ return true;
error_at (gimple_location (stmt),
"OpenMP constructs other than %<#pragma omp ordered simd%>"
- " may not be nested inside %<simd%> region");
+ " or %<#pragma omp atomic%> may not be nested inside"
+ " %<simd%> region");
return false;
}
else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
@@ -2814,13 +3003,20 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
}
break;
case GIMPLE_OMP_TEAMS:
- if (ctx == NULL
- || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
- || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
+ if (ctx == NULL)
+ break;
+ else if (gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
+ || (gimple_omp_target_kind (ctx->stmt)
+ != GF_OMP_TARGET_KIND_REGION))
{
+ /* Teams construct can appear either strictly nested inside of
+ target construct with no intervening stmts, or can be encountered
+ only by initial task (so must not appear inside any OpenMP
+ construct. */
error_at (gimple_location (stmt),
- "%<teams%> construct not closely nested inside of "
- "%<target%> construct");
+ "%<teams%> construct must be closely nested inside of "
+ "%<target%> construct or not nested in any OpenMP "
+ "construct");
return false;
}
break;
@@ -3090,7 +3286,6 @@ scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
- case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_GRID_BODY:
@@ -3098,12 +3293,25 @@ scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
scan_omp (gimple_omp_body_ptr (stmt), ctx);
break;
+ case GIMPLE_OMP_TASKGROUP:
+ ctx = new_omp_context (stmt, ctx);
+ scan_sharing_clauses (gimple_omp_taskgroup_clauses (stmt), ctx);
+ scan_omp (gimple_omp_body_ptr (stmt), ctx);
+ break;
+
case GIMPLE_OMP_TARGET:
scan_omp_target (as_a <gomp_target *> (stmt), ctx);
break;
case GIMPLE_OMP_TEAMS:
- scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
+ if (gimple_omp_teams_host (as_a <gomp_teams *> (stmt)))
+ {
+ taskreg_nesting_level++;
+ scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
+ taskreg_nesting_level--;
+ }
+ else
+ scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
break;
case GIMPLE_BIND:
@@ -3515,6 +3723,30 @@ handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
}
}
+/* Helper function for lower_rec_input_clauses. Emit into ilist sequence
+ code to emit (type) (tskred_temp[idx]). */
+
+static tree
+task_reduction_read (gimple_seq *ilist, tree tskred_temp, tree type,
+ unsigned idx)
+{
+ unsigned HOST_WIDE_INT sz
+ = tree_to_uhwi (TYPE_SIZE_UNIT (pointer_sized_int_node));
+ tree r = build2 (MEM_REF, pointer_sized_int_node,
+ tskred_temp, build_int_cst (TREE_TYPE (tskred_temp),
+ idx * sz));
+ tree v = create_tmp_var (pointer_sized_int_node);
+ gimple *g = gimple_build_assign (v, r);
+ gimple_seq_add_stmt (ilist, g);
+ if (!useless_type_conversion_p (type, pointer_sized_int_node))
+ {
+ v = create_tmp_var (type);
+ g = gimple_build_assign (v, NOP_EXPR, gimple_assign_lhs (g));
+ gimple_seq_add_stmt (ilist, g);
+ }
+ return v;
+}
+
/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
from the receiver (aka child) side and initializers for REFERENCE_TYPE
private variables. Initialization statements go in ILIST, while calls
@@ -3558,6 +3790,7 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
sctx.max_vf = 1;
break;
case OMP_CLAUSE_REDUCTION:
+ case OMP_CLAUSE_IN_REDUCTION:
if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF
|| is_variable_sized (OMP_CLAUSE_DECL (c)))
sctx.max_vf = 1;
@@ -3570,18 +3803,87 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
if (sctx.is_simt && maybe_ne (sctx.max_vf, 1U))
sctx.simt_eargs.safe_push (NULL_TREE);
+ unsigned task_reduction_cnt = 0;
+ unsigned task_reduction_cntorig = 0;
+ unsigned task_reduction_cnt_full = 0;
+ unsigned task_reduction_cntorig_full = 0;
+ unsigned task_reduction_other_cnt = 0;
+ tree tskred_atype = NULL_TREE, tskred_avar = NULL_TREE;
+ tree tskred_base = NULL_TREE, tskred_temp = NULL_TREE;
/* Do all the fixed sized types in the first pass, and the variable sized
types in the second pass. This makes sure that the scalar arguments to
the variable sized types are processed before we use them in the
- variable sized operations. */
- for (pass = 0; pass < 2; ++pass)
- {
+ variable sized operations. For task reductions we use 4 passes, in the
+ first two we ignore them, in the third one gather arguments for
+ GOMP_task_reduction_remap call and in the last pass actually handle
+ the task reductions. */
+ for (pass = 0; pass < ((task_reduction_cnt || task_reduction_other_cnt)
+ ? 4 : 2); ++pass)
+ {
+ if (pass == 2 && task_reduction_cnt)
+ {
+ tskred_atype
+ = build_array_type_nelts (ptr_type_node, task_reduction_cnt
+ + task_reduction_cntorig);
+ tskred_avar = create_tmp_var_raw (tskred_atype);
+ gimple_add_tmp_var (tskred_avar);
+ TREE_ADDRESSABLE (tskred_avar) = 1;
+ task_reduction_cnt_full = task_reduction_cnt;
+ task_reduction_cntorig_full = task_reduction_cntorig;
+ }
+ else if (pass == 3 && task_reduction_cnt)
+ {
+ x = builtin_decl_explicit (BUILT_IN_GOMP_TASK_REDUCTION_REMAP);
+ gimple *g
+ = gimple_build_call (x, 3, size_int (task_reduction_cnt),
+ size_int (task_reduction_cntorig),
+ build_fold_addr_expr (tskred_avar));
+ gimple_seq_add_stmt (ilist, g);
+ }
+ if (pass == 3 && task_reduction_other_cnt)
+ {
+ /* For reduction clauses, build
+ tskred_base = (void *) tskred_temp[2]
+ + omp_get_thread_num () * tskred_temp[1]
+ or if tskred_temp[1] is known to be constant, that constant
+ directly. This is the start of the private reduction copy block
+ for the current thread. */
+ tree v = create_tmp_var (integer_type_node);
+ x = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
+ gimple *g = gimple_build_call (x, 0);
+ gimple_call_set_lhs (g, v);
+ gimple_seq_add_stmt (ilist, g);
+ c = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_);
+ tskred_temp = OMP_CLAUSE_DECL (c);
+ if (is_taskreg_ctx (ctx))
+ tskred_temp = lookup_decl (tskred_temp, ctx);
+ tree v2 = create_tmp_var (sizetype);
+ g = gimple_build_assign (v2, NOP_EXPR, v);
+ gimple_seq_add_stmt (ilist, g);
+ if (ctx->task_reductions[0])
+ v = fold_convert (sizetype, ctx->task_reductions[0]);
+ else
+ v = task_reduction_read (ilist, tskred_temp, sizetype, 1);
+ tree v3 = create_tmp_var (sizetype);
+ g = gimple_build_assign (v3, MULT_EXPR, v2, v);
+ gimple_seq_add_stmt (ilist, g);
+ v = task_reduction_read (ilist, tskred_temp, ptr_type_node, 2);
+ tskred_base = create_tmp_var (ptr_type_node);
+ g = gimple_build_assign (tskred_base, POINTER_PLUS_EXPR, v, v3);
+ gimple_seq_add_stmt (ilist, g);
+ }
+ task_reduction_cnt = 0;
+ task_reduction_cntorig = 0;
+ task_reduction_other_cnt = 0;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
tree var, new_var;
bool by_ref;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
+ bool task_reduction_p = false;
+ bool task_reduction_needs_orig_p = false;
+ tree cond = NULL_TREE;
switch (c_kind)
{
@@ -3590,8 +3892,10 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
continue;
break;
case OMP_CLAUSE_SHARED:
- /* Ignore shared directives in teams construct. */
- if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
+ /* Ignore shared directives in teams construct inside
+ of target construct. */
+ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS
+ && !is_host_teams_ctx (ctx))
continue;
if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
{
@@ -3608,11 +3912,46 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
lastprivate_firstprivate = true;
break;
case OMP_CLAUSE_REDUCTION:
- if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
+ case OMP_CLAUSE_IN_REDUCTION:
+ if (is_task_ctx (ctx) || OMP_CLAUSE_REDUCTION_TASK (c))
+ {
+ task_reduction_p = true;
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
+ {
+ task_reduction_other_cnt++;
+ if (pass == 2)
+ continue;
+ }
+ else
+ task_reduction_cnt++;
+ if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
+ {
+ var = OMP_CLAUSE_DECL (c);
+ /* If var is a global variable that isn't privatized
+ in outer contexts, we don't need to look up the
+ original address, it is always the address of the
+ global variable itself. */
+ if (!DECL_P (var)
+ || omp_is_reference (var)
+ || !is_global_var
+ (maybe_lookup_decl_in_outer_ctx (var, ctx)))
+ {
+ task_reduction_needs_orig_p = true;
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
+ task_reduction_cntorig++;
+ }
+ }
+ }
+ else if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
reduction_omp_orig_ref = true;
break;
+ case OMP_CLAUSE__REDUCTEMP_:
+ if (!is_taskreg_ctx (ctx))
+ continue;
+ /* FALLTHRU */
case OMP_CLAUSE__LOOPTEMP_:
- /* Handle _looptemp_ clauses only on parallel/task. */
+ /* Handle _looptemp_/_reductemp_ clauses only on
+ parallel/task. */
if (fd)
continue;
break;
@@ -3632,7 +3971,7 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
lastprivate_firstprivate = true;
break;
case OMP_CLAUSE_ALIGNED:
- if (pass == 0)
+ if (pass != 1)
continue;
var = OMP_CLAUSE_DECL (c);
if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
@@ -3673,8 +4012,13 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
continue;
}
+ if (task_reduction_p != (pass >= 2))
+ continue;
+
new_var = var = OMP_CLAUSE_DECL (c);
- if (c_kind == OMP_CLAUSE_REDUCTION && TREE_CODE (var) == MEM_REF)
+ if ((c_kind == OMP_CLAUSE_REDUCTION
+ || c_kind == OMP_CLAUSE_IN_REDUCTION)
+ && TREE_CODE (var) == MEM_REF)
{
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == POINTER_PLUS_EXPR)
@@ -3701,7 +4045,8 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
continue;
}
/* C/C++ array section reductions. */
- else if (c_kind == OMP_CLAUSE_REDUCTION
+ else if ((c_kind == OMP_CLAUSE_REDUCTION
+ || c_kind == OMP_CLAUSE_IN_REDUCTION)
&& var != OMP_CLAUSE_DECL (c))
{
if (pass == 0)
@@ -3709,6 +4054,7 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
tree bias = TREE_OPERAND (OMP_CLAUSE_DECL (c), 1);
tree orig_var = TREE_OPERAND (OMP_CLAUSE_DECL (c), 0);
+
if (TREE_CODE (orig_var) == POINTER_PLUS_EXPR)
{
tree b = TREE_OPERAND (orig_var, 1);
@@ -3729,6 +4075,47 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
}
orig_var = TREE_OPERAND (orig_var, 0);
}
+ if (pass == 2)
+ {
+ tree out = maybe_lookup_decl_in_outer_ctx (var, ctx);
+ if (is_global_var (out)
+ && TREE_CODE (TREE_TYPE (out)) != POINTER_TYPE
+ && (TREE_CODE (TREE_TYPE (out)) != REFERENCE_TYPE
+ || (TREE_CODE (TREE_TYPE (TREE_TYPE (out)))
+ != POINTER_TYPE)))
+ x = var;
+ else
+ {
+ bool by_ref = use_pointer_for_field (var, NULL);
+ x = build_receiver_ref (var, by_ref, ctx);
+ if (TREE_CODE (TREE_TYPE (var)) == REFERENCE_TYPE
+ && (TREE_CODE (TREE_TYPE (TREE_TYPE (var)))
+ == POINTER_TYPE))
+ x = build_fold_addr_expr (x);
+ }
+ if (TREE_CODE (orig_var) == INDIRECT_REF)
+ x = build_simple_mem_ref (x);
+ else if (TREE_CODE (orig_var) == ADDR_EXPR)
+ {
+ if (var == TREE_OPERAND (orig_var, 0))
+ x = build_fold_addr_expr (x);
+ }
+ bias = fold_convert (sizetype, bias);
+ x = fold_convert (ptr_type_node, x);
+ x = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
+ TREE_TYPE (x), x, bias);
+ unsigned cnt = task_reduction_cnt - 1;
+ if (!task_reduction_needs_orig_p)
+ cnt += (task_reduction_cntorig_full
+ - task_reduction_cntorig);
+ else
+ cnt = task_reduction_cntorig - 1;
+ tree r = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
+ size_int (cnt), NULL_TREE, NULL_TREE);
+ gimplify_assign (r, x, ilist);
+ continue;
+ }
+
if (TREE_CODE (orig_var) == INDIRECT_REF
|| TREE_CODE (orig_var) == ADDR_EXPR)
orig_var = TREE_OPERAND (orig_var, 0);
@@ -3737,7 +4124,64 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
const char *name = get_name (orig_var);
- if (TREE_CONSTANT (v))
+ if (pass == 3)
+ {
+ tree xv = create_tmp_var (ptr_type_node);
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
+ {
+ unsigned cnt = task_reduction_cnt - 1;
+ if (!task_reduction_needs_orig_p)
+ cnt += (task_reduction_cntorig_full
+ - task_reduction_cntorig);
+ else
+ cnt = task_reduction_cntorig - 1;
+ x = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
+ size_int (cnt), NULL_TREE, NULL_TREE);
+
+ gimple *g = gimple_build_assign (xv, x);
+ gimple_seq_add_stmt (ilist, g);
+ }
+ else
+ {
+ unsigned int idx = *ctx->task_reduction_map->get (c);
+ tree off;
+ if (ctx->task_reductions[1 + idx])
+ off = fold_convert (sizetype,
+ ctx->task_reductions[1 + idx]);
+ else
+ off = task_reduction_read (ilist, tskred_temp, sizetype,
+ 7 + 3 * idx + 1);
+ gimple *g = gimple_build_assign (xv, POINTER_PLUS_EXPR,
+ tskred_base, off);
+ gimple_seq_add_stmt (ilist, g);
+ }
+ x = fold_convert (build_pointer_type (boolean_type_node),
+ xv);
+ if (TREE_CONSTANT (v))
+ x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (x), x,
+ TYPE_SIZE_UNIT (type));
+ else
+ {
+ tree t = maybe_lookup_decl (v, ctx);
+ if (t)
+ v = t;
+ else
+ v = maybe_lookup_decl_in_outer_ctx (v, ctx);
+ gimplify_expr (&v, ilist, NULL, is_gimple_val,
+ fb_rvalue);
+ t = fold_build2_loc (clause_loc, PLUS_EXPR,
+ TREE_TYPE (v), v,
+ build_int_cst (TREE_TYPE (v), 1));
+ t = fold_build2_loc (clause_loc, MULT_EXPR,
+ TREE_TYPE (v), t,
+ TYPE_SIZE_UNIT (TREE_TYPE (type)));
+ x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (x), x, t);
+ }
+ cond = create_tmp_var (TREE_TYPE (x));
+ gimplify_assign (cond, x, ilist);
+ x = xv;
+ }
+ else if (TREE_CONSTANT (v))
{
x = create_tmp_var_raw (type, name);
gimple_add_tmp_var (x);
@@ -3799,7 +4243,7 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
tree new_orig_var = lookup_decl (orig_var, ctx);
tree t = build_fold_indirect_ref (new_var);
DECL_IGNORED_P (new_var) = 0;
- TREE_THIS_NOTRAP (t);
+ TREE_THIS_NOTRAP (t) = 1;
SET_DECL_VALUE_EXPR (new_orig_var, t);
DECL_HAS_VALUE_EXPR_P (new_orig_var) = 1;
}
@@ -3824,44 +4268,101 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_assign (new_var, x, ilist);
}
- tree y1 = create_tmp_var (ptype, NULL);
+ /* GOMP_taskgroup_reduction_register memsets the whole
+ array to zero. If the initializer is zero, we don't
+ need to initialize it again, just mark it as ever
+ used unconditionally, i.e. cond = true. */
+ if (cond
+ && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) == NULL_TREE
+ && initializer_zerop (omp_reduction_init (c,
+ TREE_TYPE (type))))
+ {
+ gimple *g = gimple_build_assign (build_simple_mem_ref (cond),
+ boolean_true_node);
+ gimple_seq_add_stmt (ilist, g);
+ continue;
+ }
+ tree end = create_artificial_label (UNKNOWN_LOCATION);
+ if (cond)
+ {
+ gimple *g;
+ if (!is_parallel_ctx (ctx))
+ {
+ tree condv = create_tmp_var (boolean_type_node);
+ g = gimple_build_assign (condv,
+ build_simple_mem_ref (cond));
+ gimple_seq_add_stmt (ilist, g);
+ tree lab1 = create_artificial_label (UNKNOWN_LOCATION);
+ g = gimple_build_cond (NE_EXPR, condv,
+ boolean_false_node, end, lab1);
+ gimple_seq_add_stmt (ilist, g);
+ gimple_seq_add_stmt (ilist, gimple_build_label (lab1));
+ }
+ g = gimple_build_assign (build_simple_mem_ref (cond),
+ boolean_true_node);
+ gimple_seq_add_stmt (ilist, g);
+ }
+
+ tree y1 = create_tmp_var (ptype);
gimplify_assign (y1, y, ilist);
tree i2 = NULL_TREE, y2 = NULL_TREE;
tree body2 = NULL_TREE, end2 = NULL_TREE;
tree y3 = NULL_TREE, y4 = NULL_TREE;
- if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) || is_simd)
+ if (task_reduction_needs_orig_p)
{
- y2 = create_tmp_var (ptype, NULL);
- gimplify_assign (y2, y, ilist);
- tree ref = build_outer_var_ref (var, ctx);
- /* For ref build_outer_var_ref already performs this. */
- if (TREE_CODE (d) == INDIRECT_REF)
- gcc_assert (omp_is_reference (var));
- else if (TREE_CODE (d) == ADDR_EXPR)
- ref = build_fold_addr_expr (ref);
- else if (omp_is_reference (var))
- ref = build_fold_addr_expr (ref);
- ref = fold_convert_loc (clause_loc, ptype, ref);
- if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
- && OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
+ y3 = create_tmp_var (ptype);
+ tree ref;
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
+ ref = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
+ size_int (task_reduction_cnt_full
+ + task_reduction_cntorig - 1),
+ NULL_TREE, NULL_TREE);
+ else
{
- y3 = create_tmp_var (ptype, NULL);
- gimplify_assign (y3, unshare_expr (ref), ilist);
+ unsigned int idx = *ctx->task_reduction_map->get (c);
+ ref = task_reduction_read (ilist, tskred_temp, ptype,
+ 7 + 3 * idx);
}
- if (is_simd)
+ gimplify_assign (y3, ref, ilist);
+ }
+ else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) || is_simd)
+ {
+ if (pass != 3)
{
- y4 = create_tmp_var (ptype, NULL);
- gimplify_assign (y4, ref, dlist);
+ y2 = create_tmp_var (ptype);
+ gimplify_assign (y2, y, ilist);
+ }
+ if (is_simd || OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
+ {
+ tree ref = build_outer_var_ref (var, ctx);
+ /* For ref build_outer_var_ref already performs this. */
+ if (TREE_CODE (d) == INDIRECT_REF)
+ gcc_assert (omp_is_reference (var));
+ else if (TREE_CODE (d) == ADDR_EXPR)
+ ref = build_fold_addr_expr (ref);
+ else if (omp_is_reference (var))
+ ref = build_fold_addr_expr (ref);
+ ref = fold_convert_loc (clause_loc, ptype, ref);
+ if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
+ && OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
+ {
+ y3 = create_tmp_var (ptype);
+ gimplify_assign (y3, unshare_expr (ref), ilist);
+ }
+ if (is_simd)
+ {
+ y4 = create_tmp_var (ptype);
+ gimplify_assign (y4, ref, dlist);
+ }
}
}
- tree i = create_tmp_var (TREE_TYPE (v), NULL);
+ tree i = create_tmp_var (TREE_TYPE (v));
gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), ilist);
tree body = create_artificial_label (UNKNOWN_LOCATION);
- tree end = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (ilist, gimple_build_label (body));
if (y2)
{
- i2 = create_tmp_var (TREE_TYPE (v), NULL);
+ i2 = create_tmp_var (TREE_TYPE (v));
gimplify_assign (i2, build_int_cst (TREE_TYPE (v), 0), dlist);
body2 = create_artificial_label (UNKNOWN_LOCATION);
end2 = create_artificial_label (UNKNOWN_LOCATION);
@@ -3904,14 +4405,17 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
}
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 0;
- x = lang_hooks.decls.omp_clause_dtor
- (c, build_simple_mem_ref (y2));
- if (x)
+ if (y2)
{
- gimple_seq tseq = NULL;
- dtor = x;
- gimplify_stmt (&dtor, &tseq);
- gimple_seq_add_seq (dlist, tseq);
+ x = lang_hooks.decls.omp_clause_dtor
+ (c, build_simple_mem_ref (y2));
+ if (x)
+ {
+ gimple_seq tseq = NULL;
+ dtor = x;
+ gimplify_stmt (&dtor, &tseq);
+ gimple_seq_add_seq (dlist, tseq);
+ }
}
}
else
@@ -3970,6 +4474,78 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
}
continue;
}
+ else if (pass == 2)
+ {
+ if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
+ x = var;
+ else
+ {
+ bool by_ref = use_pointer_for_field (var, ctx);
+ x = build_receiver_ref (var, by_ref, ctx);
+ }
+ if (!omp_is_reference (var))
+ x = build_fold_addr_expr (x);
+ x = fold_convert (ptr_type_node, x);
+ unsigned cnt = task_reduction_cnt - 1;
+ if (!task_reduction_needs_orig_p)
+ cnt += task_reduction_cntorig_full - task_reduction_cntorig;
+ else
+ cnt = task_reduction_cntorig - 1;
+ tree r = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
+ size_int (cnt), NULL_TREE, NULL_TREE);
+ gimplify_assign (r, x, ilist);
+ continue;
+ }
+ else if (pass == 3)
+ {
+ tree type = TREE_TYPE (new_var);
+ if (!omp_is_reference (var))
+ type = build_pointer_type (type);
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
+ {
+ unsigned cnt = task_reduction_cnt - 1;
+ if (!task_reduction_needs_orig_p)
+ cnt += (task_reduction_cntorig_full
+ - task_reduction_cntorig);
+ else
+ cnt = task_reduction_cntorig - 1;
+ x = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
+ size_int (cnt), NULL_TREE, NULL_TREE);
+ }
+ else
+ {
+ unsigned int idx = *ctx->task_reduction_map->get (c);
+ tree off;
+ if (ctx->task_reductions[1 + idx])
+ off = fold_convert (sizetype,
+ ctx->task_reductions[1 + idx]);
+ else
+ off = task_reduction_read (ilist, tskred_temp, sizetype,
+ 7 + 3 * idx + 1);
+ x = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
+ tskred_base, off);
+ }
+ x = fold_convert (type, x);
+ tree t;
+ if (omp_is_reference (var))
+ {
+ gimplify_assign (new_var, x, ilist);
+ t = new_var;
+ new_var = build_simple_mem_ref (new_var);
+ }
+ else
+ {
+ t = create_tmp_var (type);
+ gimplify_assign (t, x, ilist);
+ SET_DECL_VALUE_EXPR (new_var, build_simple_mem_ref (t));
+ DECL_HAS_VALUE_EXPR_P (new_var) = 1;
+ }
+ t = fold_convert (build_pointer_type (boolean_type_node), t);
+ t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
+ TYPE_SIZE_UNIT (TREE_TYPE (type)));
+ cond = create_tmp_var (TREE_TYPE (t));
+ gimplify_assign (cond, t, ilist);
+ }
else if (is_variable_sized (var))
{
/* For variable sized types, we need to allocate the
@@ -4003,7 +4579,9 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
gimplify_assign (ptr, x, ilist);
}
}
- else if (omp_is_reference (var))
+ else if (omp_is_reference (var)
+ && (c_kind != OMP_CLAUSE_FIRSTPRIVATE
+ || !OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE (c)))
{
/* For references that are being privatized for Fortran,
allocate new backing storage for the new pointer
@@ -4053,7 +4631,8 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
}
- else if (c_kind == OMP_CLAUSE_REDUCTION
+ else if ((c_kind == OMP_CLAUSE_REDUCTION
+ || c_kind == OMP_CLAUSE_IN_REDUCTION)
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
if (pass == 0)
@@ -4065,8 +4644,10 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
- /* Ignore shared directives in teams construct. */
- if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
+ /* Ignore shared directives in teams construct inside
+ target construct. */
+ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS
+ && !is_host_teams_ctx (ctx))
continue;
/* Shared global vars are just accessed directly. */
if (is_global_var (new_var))
@@ -4170,7 +4751,9 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_task_ctx (ctx))
{
- if (omp_is_reference (var) || is_variable_sized (var))
+ if ((omp_is_reference (var)
+ && !OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE (c))
+ || is_variable_sized (var))
goto do_dtor;
else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
ctx))
@@ -4182,6 +4765,18 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
goto do_dtor;
}
}
+ if (OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE (c)
+ && omp_is_reference (var))
+ {
+ x = build_outer_var_ref (var, ctx);
+ gcc_assert (TREE_CODE (x) == MEM_REF
+ && integer_zerop (TREE_OPERAND (x, 1)));
+ x = TREE_OPERAND (x, 0);
+ x = lang_hooks.decls.omp_clause_copy_ctor
+ (c, unshare_expr (new_var), x);
+ gimplify_and_add (x, ilist);
+ goto do_dtor;
+ }
do_firstprivate:
x = build_outer_var_ref (var, ctx);
if (is_simd)
@@ -4273,6 +4868,7 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
goto do_dtor;
case OMP_CLAUSE__LOOPTEMP_:
+ case OMP_CLAUSE__REDUCTEMP_:
gcc_assert (is_taskreg_ctx (ctx));
x = build_outer_var_ref (var, ctx);
x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
@@ -4288,6 +4884,7 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
break;
case OMP_CLAUSE_REDUCTION:
+ case OMP_CLAUSE_IN_REDUCTION:
/* OpenACC reductions are initialized using the
GOACC_REDUCTION internal function. */
if (is_gimple_omp_oacc (ctx->stmt))
@@ -4296,12 +4893,40 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
gimple *tseq;
- x = build_outer_var_ref (var, ctx);
+ tree ptype = TREE_TYPE (placeholder);
+ if (cond)
+ {
+ x = error_mark_node;
+ if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c)
+ && !task_reduction_needs_orig_p)
+ x = var;
+ else if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
+ {
+ tree pptype = build_pointer_type (ptype);
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
+ x = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
+ size_int (task_reduction_cnt_full
+ + task_reduction_cntorig - 1),
+ NULL_TREE, NULL_TREE);
+ else
+ {
+ unsigned int idx
+ = *ctx->task_reduction_map->get (c);
+ x = task_reduction_read (ilist, tskred_temp,
+ pptype, 7 + 3 * idx);
+ }
+ x = fold_convert (pptype, x);
+ x = build_simple_mem_ref (x);
+ }
+ }
+ else
+ {
+ x = build_outer_var_ref (var, ctx);
- if (omp_is_reference (var)
- && !useless_type_conversion_p (TREE_TYPE (placeholder),
- TREE_TYPE (x)))
- x = build_fold_addr_expr_loc (clause_loc, x);
+ if (omp_is_reference (var)
+ && !useless_type_conversion_p (ptype, TREE_TYPE (x)))
+ x = build_fold_addr_expr_loc (clause_loc, x);
+ }
SET_DECL_VALUE_EXPR (placeholder, x);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
tree new_vard = new_var;
@@ -4365,9 +4990,35 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
initialization now. */
else if (omp_is_reference (var) && is_simd)
handle_simd_reference (clause_loc, new_vard, ilist);
+
+ tree lab2 = NULL_TREE;
+ if (cond)
+ {
+ gimple *g;
+ if (!is_parallel_ctx (ctx))
+ {
+ tree condv = create_tmp_var (boolean_type_node);
+ tree m = build_simple_mem_ref (cond);
+ g = gimple_build_assign (condv, m);
+ gimple_seq_add_stmt (ilist, g);
+ tree lab1
+ = create_artificial_label (UNKNOWN_LOCATION);
+ lab2 = create_artificial_label (UNKNOWN_LOCATION);
+ g = gimple_build_cond (NE_EXPR, condv,
+ boolean_false_node,
+ lab2, lab1);
+ gimple_seq_add_stmt (ilist, g);
+ gimple_seq_add_stmt (ilist,
+ gimple_build_label (lab1));
+ }
+ g = gimple_build_assign (build_simple_mem_ref (cond),
+ boolean_true_node);
+ gimple_seq_add_stmt (ilist, g);
+ }
x = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (new_var),
- build_outer_var_ref (var, ctx));
+ cond ? NULL_TREE
+ : build_outer_var_ref (var, ctx));
if (x)
gimplify_and_add (x, ilist);
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
@@ -4385,6 +5036,12 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
}
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
+ if (cond)
+ {
+ if (lab2)
+ gimple_seq_add_stmt (ilist, gimple_build_label (lab2));
+ break;
+ }
goto do_dtor;
}
else
@@ -4393,6 +5050,49 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
+ if (cond)
+ {
+ gimple *g;
+ tree lab2 = NULL_TREE;
+ /* GOMP_taskgroup_reduction_register memsets the whole
+ array to zero. If the initializer is zero, we don't
+ need to initialize it again, just mark it as ever
+ used unconditionally, i.e. cond = true. */
+ if (initializer_zerop (x))
+ {
+ g = gimple_build_assign (build_simple_mem_ref (cond),
+ boolean_true_node);
+ gimple_seq_add_stmt (ilist, g);
+ break;
+ }
+
+ /* Otherwise, emit
+ if (!cond) { cond = true; new_var = x; } */
+ if (!is_parallel_ctx (ctx))
+ {
+ tree condv = create_tmp_var (boolean_type_node);
+ tree m = build_simple_mem_ref (cond);
+ g = gimple_build_assign (condv, m);
+ gimple_seq_add_stmt (ilist, g);
+ tree lab1
+ = create_artificial_label (UNKNOWN_LOCATION);
+ lab2 = create_artificial_label (UNKNOWN_LOCATION);
+ g = gimple_build_cond (NE_EXPR, condv,
+ boolean_false_node,
+ lab2, lab1);
+ gimple_seq_add_stmt (ilist, g);
+ gimple_seq_add_stmt (ilist,
+ gimple_build_label (lab1));
+ }
+ g = gimple_build_assign (build_simple_mem_ref (cond),
+ boolean_true_node);
+ gimple_seq_add_stmt (ilist, g);
+ gimplify_assign (new_var, x, ilist);
+ if (lab2)
+ gimple_seq_add_stmt (ilist, gimple_build_label (lab2));
+ break;
+ }
+
/* reduction(-:var) sums up the partial results, so it
acts identically to reduction(+:var). */
if (code == MINUS_EXPR)
@@ -4456,6 +5156,12 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
}
}
}
+ if (tskred_avar)
+ {
+ tree clobber = build_constructor (TREE_TYPE (tskred_avar), NULL);
+ TREE_THIS_VOLATILE (clobber) = 1;
+ gimple_seq_add_stmt (ilist, gimple_build_assign (tskred_avar, clobber));
+ }
if (known_eq (sctx.max_vf, 1U))
sctx.is_simt = false;
@@ -4587,8 +5293,9 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
{
/* Don't add any barrier for #pragma omp simd or
#pragma omp distribute. */
- if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
- || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
+ if (!is_task_ctx (ctx)
+ && (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
+ || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR))
gimple_seq_add_stmt (ilist, omp_build_barrier (NULL_TREE));
}
@@ -5078,7 +5785,8 @@ lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
/* First see if there is exactly one reduction clause. Use OMP_ATOMIC
update in that case, otherwise use a lock. */
for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
- if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
+ && !OMP_CLAUSE_REDUCTION_TASK (c))
{
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
|| TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
@@ -5099,7 +5807,8 @@ lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
enum tree_code code;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
- if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
+ || OMP_CLAUSE_REDUCTION_TASK (c))
continue;
enum omp_clause_code ccode = OMP_CLAUSE_REDUCTION;
@@ -5150,6 +5859,7 @@ lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
x = build2 (OMP_ATOMIC, void_type_node, addr, x);
+ OMP_ATOMIC_MEMORY_ORDER (x) = OMP_MEMORY_ORDER_RELAXED;
gimplify_and_add (x, stmt_seqp);
return;
}
@@ -5158,7 +5868,7 @@ lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
tree d = OMP_CLAUSE_DECL (c);
tree type = TREE_TYPE (d);
tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
- tree i = create_tmp_var (TREE_TYPE (v), NULL);
+ tree i = create_tmp_var (TREE_TYPE (v));
tree ptype = build_pointer_type (TREE_TYPE (type));
tree bias = TREE_OPERAND (d, 1);
d = TREE_OPERAND (d, 0);
@@ -5222,10 +5932,10 @@ lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
}
new_var = fold_convert_loc (clause_loc, ptype, new_var);
ref = fold_convert_loc (clause_loc, ptype, ref);
- tree m = create_tmp_var (ptype, NULL);
+ tree m = create_tmp_var (ptype);
gimplify_assign (m, new_var, stmt_seqp);
new_var = m;
- m = create_tmp_var (ptype, NULL);
+ m = create_tmp_var (ptype);
gimplify_assign (m, ref, stmt_seqp);
ref = m;
gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), stmt_seqp);
@@ -5387,7 +6097,12 @@ lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_LASTPRIVATE:
+ case OMP_CLAUSE_IN_REDUCTION:
+ case OMP_CLAUSE__REDUCTEMP_:
+ break;
case OMP_CLAUSE_REDUCTION:
+ if (is_task_ctx (ctx) || OMP_CLAUSE_REDUCTION_TASK (c))
+ continue;
break;
case OMP_CLAUSE_SHARED:
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
@@ -5405,7 +6120,8 @@ lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
}
val = OMP_CLAUSE_DECL (c);
- if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
+ if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION)
&& TREE_CODE (val) == MEM_REF)
{
val = TREE_OPERAND (val, 0);
@@ -5429,7 +6145,13 @@ lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
var = lookup_decl_in_outer_ctx (val, ctx_for_o);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
- && is_global_var (var))
+ && is_global_var (var)
+ && (val == OMP_CLAUSE_DECL (c)
+ || !is_task_ctx (ctx)
+ || (TREE_CODE (TREE_TYPE (val)) != POINTER_TYPE
+ && (TREE_CODE (TREE_TYPE (val)) != REFERENCE_TYPE
+ || (TREE_CODE (TREE_TYPE (TREE_TYPE (val)))
+ != POINTER_TYPE)))))
continue;
t = omp_member_access_dummy_var (var);
@@ -5457,7 +6179,8 @@ lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
continue;
}
- if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
+ if (((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
+ && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IN_REDUCTION)
|| val == OMP_CLAUSE_DECL (c))
&& is_variable_sized (val))
continue;
@@ -5476,6 +6199,7 @@ lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE__LOOPTEMP_:
+ case OMP_CLAUSE__REDUCTEMP_:
do_in = true;
break;
@@ -5495,9 +6219,15 @@ lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
break;
case OMP_CLAUSE_REDUCTION:
+ case OMP_CLAUSE_IN_REDUCTION:
do_in = true;
if (val == OMP_CLAUSE_DECL (c))
- do_out = !(by_ref || omp_is_reference (val));
+ {
+ if (is_task_ctx (ctx))
+ by_ref = use_pointer_for_field (val, ctx);
+ else
+ do_out = !(by_ref || omp_is_reference (val));
+ }
else
by_ref = TREE_CODE (TREE_TYPE (val)) == ARRAY_TYPE;
break;
@@ -5802,30 +6532,55 @@ maybe_catch_exception (gimple_seq body)
cancellation in the implicit barrier. */
static void
-maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
+maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple *omp_return,
+ gimple_seq *body)
{
- gimple *omp_return = gimple_seq_last_stmt (*body);
gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
if (gimple_omp_return_nowait_p (omp_return))
return;
- if (ctx->outer
- && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
- && ctx->outer->cancellable)
- {
- tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
- tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
- tree lhs = create_tmp_var (c_bool_type);
- gimple_omp_return_set_lhs (omp_return, lhs);
- tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
- gimple *g = gimple_build_cond (NE_EXPR, lhs,
- fold_convert (c_bool_type,
- boolean_false_node),
- ctx->outer->cancel_label, fallthru_label);
- gimple_seq_add_stmt (body, g);
- gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
+ for (omp_context *outer = ctx->outer; outer; outer = outer->outer)
+ if (gimple_code (outer->stmt) == GIMPLE_OMP_PARALLEL
+ && outer->cancellable)
+ {
+ tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
+ tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
+ tree lhs = create_tmp_var (c_bool_type);
+ gimple_omp_return_set_lhs (omp_return, lhs);
+ tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
+ gimple *g = gimple_build_cond (NE_EXPR, lhs,
+ fold_convert (c_bool_type,
+ boolean_false_node),
+ outer->cancel_label, fallthru_label);
+ gimple_seq_add_stmt (body, g);
+ gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
+ }
+ else if (gimple_code (outer->stmt) != GIMPLE_OMP_TASKGROUP)
+ return;
+}
+
+/* Find the first task_reduction or reduction clause or return NULL
+ if there are none. */
+
+static inline tree
+omp_task_reductions_find_first (tree clauses, enum tree_code code,
+ enum omp_clause_code ccode)
+{
+ while (1)
+ {
+ clauses = omp_find_clause (clauses, ccode);
+ if (clauses == NULL_TREE)
+ return NULL_TREE;
+ if (ccode != OMP_CLAUSE_REDUCTION
+ || code == OMP_TASKLOOP
+ || OMP_CLAUSE_REDUCTION_TASK (clauses))
+ return clauses;
+ clauses = OMP_CLAUSE_CHAIN (clauses);
}
}
+static void lower_omp_task_reductions (omp_context *, enum tree_code, tree,
+ gimple_seq *, gimple_seq *);
+
/* Lower the OpenMP sections directive in the current statement in GSI_P.
CTX is the enclosing OMP context for the current statement. */
@@ -5837,7 +6592,7 @@ lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gomp_sections *stmt;
gimple *t;
gbind *new_stmt, *bind;
- gimple_seq ilist, dlist, olist, new_body;
+ gimple_seq ilist, dlist, olist, tred_dlist = NULL, new_body;
stmt = as_a <gomp_sections *> (gsi_stmt (*gsi_p));
@@ -5845,6 +6600,27 @@ lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
dlist = NULL;
ilist = NULL;
+
+ tree rclauses
+ = omp_task_reductions_find_first (gimple_omp_sections_clauses (stmt),
+ OMP_SECTIONS, OMP_CLAUSE_REDUCTION);
+ tree rtmp = NULL_TREE;
+ if (rclauses)
+ {
+ tree type = build_pointer_type (pointer_sized_int_node);
+ tree temp = create_tmp_var (type);
+ tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__REDUCTEMP_);
+ OMP_CLAUSE_DECL (c) = temp;
+ OMP_CLAUSE_CHAIN (c) = gimple_omp_sections_clauses (stmt);
+ gimple_omp_sections_set_clauses (stmt, c);
+ lower_omp_task_reductions (ctx, OMP_SECTIONS,
+ gimple_omp_sections_clauses (stmt),
+ &ilist, &tred_dlist);
+ rclauses = c;
+ rtmp = make_ssa_name (type);
+ gimple_seq_add_stmt (&ilist, gimple_build_assign (rtmp, temp));
+ }
+
lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
&ilist, &dlist, ctx, NULL);
@@ -5916,7 +6692,11 @@ lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
OMP_CLAUSE_NOWAIT) != NULL_TREE;
t = gimple_build_omp_return (nowait);
gimple_seq_add_stmt (&new_body, t);
- maybe_add_implicit_barrier_cancel (ctx, &new_body);
+ gimple_seq_add_seq (&new_body, tred_dlist);
+ maybe_add_implicit_barrier_cancel (ctx, t, &new_body);
+
+ if (rclauses)
+ OMP_CLAUSE_DECL (rclauses) = rtmp;
gimple_bind_set_body (new_stmt, new_body);
}
@@ -6078,7 +6858,7 @@ lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
OMP_CLAUSE_NOWAIT) != NULL_TREE;
gimple *g = gimple_build_omp_return (nowait);
gimple_seq_add_stmt (&bind_body_tail, g);
- maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
+ maybe_add_implicit_barrier_cancel (ctx, g, &bind_body_tail);
if (ctx->record_type)
{
gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
@@ -6140,6 +6920,604 @@ lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
BLOCK_VARS (block) = ctx->block_vars;
}
+/* Helper function for lower_omp_task_reductions. For a specific PASS
+ find out the current clause it should be processed, or return false
+ if all have been processed already. */
+
+static inline bool
+omp_task_reduction_iterate (int pass, enum tree_code code,
+ enum omp_clause_code ccode, tree *c, tree *decl,
+ tree *type, tree *next)
+{
+ for (; *c; *c = omp_find_clause (OMP_CLAUSE_CHAIN (*c), ccode))
+ {
+ if (ccode == OMP_CLAUSE_REDUCTION
+ && code != OMP_TASKLOOP
+ && !OMP_CLAUSE_REDUCTION_TASK (*c))
+ continue;
+ *decl = OMP_CLAUSE_DECL (*c);
+ *type = TREE_TYPE (*decl);
+ if (TREE_CODE (*decl) == MEM_REF)
+ {
+ if (pass != 1)
+ continue;
+ }
+ else
+ {
+ if (omp_is_reference (*decl))
+ *type = TREE_TYPE (*type);
+ if (pass != (!TREE_CONSTANT (TYPE_SIZE_UNIT (*type))))
+ continue;
+ }
+ *next = omp_find_clause (OMP_CLAUSE_CHAIN (*c), ccode);
+ return true;
+ }
+ *decl = NULL_TREE;
+ *type = NULL_TREE;
+ *next = NULL_TREE;
+ return false;
+}
+
+/* Lower task_reduction and reduction clauses (the latter unless CODE is
+ OMP_TASKGROUP only with task modifier). Register mapping of those in
+ START sequence and reducing them and unregister them in the END sequence. */
+
+static void
+lower_omp_task_reductions (omp_context *ctx, enum tree_code code, tree clauses,
+ gimple_seq *start, gimple_seq *end)
+{
+ enum omp_clause_code ccode
+ = (code == OMP_TASKGROUP
+ ? OMP_CLAUSE_TASK_REDUCTION : OMP_CLAUSE_REDUCTION);
+ tree cancellable = NULL_TREE;
+ clauses = omp_task_reductions_find_first (clauses, code, ccode);
+ if (clauses == NULL_TREE)
+ return;
+ if (code == OMP_FOR || code == OMP_SECTIONS)
+ {
+ for (omp_context *outer = ctx->outer; outer; outer = outer->outer)
+ if (gimple_code (outer->stmt) == GIMPLE_OMP_PARALLEL
+ && outer->cancellable)
+ {
+ cancellable = error_mark_node;
+ break;
+ }
+ else if (gimple_code (outer->stmt) != GIMPLE_OMP_TASKGROUP)
+ break;
+ }
+ tree record_type = lang_hooks.types.make_type (RECORD_TYPE);
+ tree *last = &TYPE_FIELDS (record_type);
+ unsigned cnt = 0;
+ if (cancellable)
+ {
+ tree field = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE,
+ ptr_type_node);
+ tree ifield = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE,
+ integer_type_node);
+ *last = field;
+ DECL_CHAIN (field) = ifield;
+ last = &DECL_CHAIN (ifield);
+ }
+ for (int pass = 0; pass < 2; pass++)
+ {
+ tree decl, type, next;
+ for (tree c = clauses;
+ omp_task_reduction_iterate (pass, code, ccode,
+ &c, &decl, &type, &next); c = next)
+ {
+ ++cnt;
+ tree new_type = type;
+ if (ctx->outer)
+ new_type = remap_type (type, &ctx->outer->cb);
+ tree field
+ = build_decl (OMP_CLAUSE_LOCATION (c), FIELD_DECL,
+ DECL_P (decl) ? DECL_NAME (decl) : NULL_TREE,
+ new_type);
+ if (DECL_P (decl) && type == TREE_TYPE (decl))
+ {
+ SET_DECL_ALIGN (field, DECL_ALIGN (decl));
+ DECL_USER_ALIGN (field) = DECL_USER_ALIGN (decl);
+ TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (decl);
+ }
+ else
+ SET_DECL_ALIGN (field, TYPE_ALIGN (type));
+ DECL_CONTEXT (field) = record_type;
+ *last = field;
+ last = &DECL_CHAIN (field);
+ tree bfield
+ = build_decl (OMP_CLAUSE_LOCATION (c), FIELD_DECL, NULL_TREE,
+ boolean_type_node);
+ DECL_CONTEXT (bfield) = record_type;
+ *last = bfield;
+ last = &DECL_CHAIN (bfield);
+ }
+ }
+ *last = NULL_TREE;
+ layout_type (record_type);
+
+ /* Build up an array which registers with the runtime all the reductions
+ and deregisters them at the end. Format documented in libgomp/task.c. */
+ tree atype = build_array_type_nelts (pointer_sized_int_node, 7 + cnt * 3);
+ tree avar = create_tmp_var_raw (atype);
+ gimple_add_tmp_var (avar);
+ TREE_ADDRESSABLE (avar) = 1;
+ tree r = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_zero_node,
+ NULL_TREE, NULL_TREE);
+ tree t = build_int_cst (pointer_sized_int_node, cnt);
+ gimple_seq_add_stmt (start, gimple_build_assign (r, t));
+ gimple_seq seq = NULL;
+ tree sz = fold_convert (pointer_sized_int_node,
+ TYPE_SIZE_UNIT (record_type));
+ int cachesz = 64;
+ sz = fold_build2 (PLUS_EXPR, pointer_sized_int_node, sz,
+ build_int_cst (pointer_sized_int_node, cachesz - 1));
+ sz = fold_build2 (BIT_AND_EXPR, pointer_sized_int_node, sz,
+ build_int_cst (pointer_sized_int_node, ~(cachesz - 1)));
+ ctx->task_reductions.create (1 + cnt);
+ ctx->task_reduction_map = new hash_map<tree, unsigned>;
+ ctx->task_reductions.quick_push (TREE_CODE (sz) == INTEGER_CST
+ ? sz : NULL_TREE);
+ sz = force_gimple_operand (sz, &seq, true, NULL_TREE);
+ gimple_seq_add_seq (start, seq);
+ r = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_one_node,
+ NULL_TREE, NULL_TREE);
+ gimple_seq_add_stmt (start, gimple_build_assign (r, sz));
+ r = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_int (2),
+ NULL_TREE, NULL_TREE);
+ t = build_int_cst (pointer_sized_int_node,
+ MAX (TYPE_ALIGN_UNIT (record_type), (unsigned) cachesz));
+ gimple_seq_add_stmt (start, gimple_build_assign (r, t));
+ r = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_int (3),
+ NULL_TREE, NULL_TREE);
+ t = build_int_cst (pointer_sized_int_node, -1);
+ gimple_seq_add_stmt (start, gimple_build_assign (r, t));
+ r = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_int (4),
+ NULL_TREE, NULL_TREE);
+ t = build_int_cst (pointer_sized_int_node, 0);
+ gimple_seq_add_stmt (start, gimple_build_assign (r, t));
+
+ /* In end, build a loop that iterates from 0 to < omp_get_num_threads ()
+ and for each task reduction checks a bool right after the private variable
+ within that thread's chunk; if the bool is clear, it hasn't been
+ initialized and thus isn't going to be reduced nor destructed, otherwise
+ reduce and destruct it. */
+ tree idx = create_tmp_var (size_type_node);
+ gimple_seq_add_stmt (end, gimple_build_assign (idx, size_zero_node));
+ tree num_thr_sz = create_tmp_var (size_type_node);
+ tree lab1 = create_artificial_label (UNKNOWN_LOCATION);
+ tree lab2 = create_artificial_label (UNKNOWN_LOCATION);
+ tree lab3 = NULL_TREE;
+ gimple *g;
+ if (code == OMP_FOR || code == OMP_SECTIONS)
+ {
+ /* For worksharing constructs, only perform it in the master thread,
+ with the exception of cancelled implicit barriers - then only handle
+ the current thread. */
+ tree lab4 = create_artificial_label (UNKNOWN_LOCATION);
+ t = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
+ tree thr_num = create_tmp_var (integer_type_node);
+ g = gimple_build_call (t, 0);
+ gimple_call_set_lhs (g, thr_num);
+ gimple_seq_add_stmt (end, g);
+ if (cancellable)
+ {
+ tree c;
+ tree lab5 = create_artificial_label (UNKNOWN_LOCATION);
+ tree lab6 = create_artificial_label (UNKNOWN_LOCATION);
+ lab3 = create_artificial_label (UNKNOWN_LOCATION);
+ if (code == OMP_FOR)
+ c = gimple_omp_for_clauses (ctx->stmt);
+ else if (code == OMP_SECTIONS)
+ c = gimple_omp_sections_clauses (ctx->stmt);
+ c = OMP_CLAUSE_DECL (omp_find_clause (c, OMP_CLAUSE__REDUCTEMP_));
+ cancellable = c;
+ g = gimple_build_cond (NE_EXPR, c, build_zero_cst (TREE_TYPE (c)),
+ lab5, lab6);
+ gimple_seq_add_stmt (end, g);
+ gimple_seq_add_stmt (end, gimple_build_label (lab5));
+ g = gimple_build_assign (idx, NOP_EXPR, thr_num);
+ gimple_seq_add_stmt (end, g);
+ g = gimple_build_assign (num_thr_sz, PLUS_EXPR, idx,
+ build_one_cst (TREE_TYPE (idx)));
+ gimple_seq_add_stmt (end, g);
+ gimple_seq_add_stmt (end, gimple_build_goto (lab3));
+ gimple_seq_add_stmt (end, gimple_build_label (lab6));
+ }
+ g = gimple_build_cond (NE_EXPR, thr_num, integer_zero_node, lab2, lab4);
+ gimple_seq_add_stmt (end, g);
+ gimple_seq_add_stmt (end, gimple_build_label (lab4));
+ }
+ if (code != OMP_PARALLEL)
+ {
+ t = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
+ tree num_thr = create_tmp_var (integer_type_node);
+ g = gimple_build_call (t, 0);
+ gimple_call_set_lhs (g, num_thr);
+ gimple_seq_add_stmt (end, g);
+ g = gimple_build_assign (num_thr_sz, NOP_EXPR, num_thr);
+ gimple_seq_add_stmt (end, g);
+ if (cancellable)
+ gimple_seq_add_stmt (end, gimple_build_label (lab3));
+ }
+ else
+ {
+ tree c = omp_find_clause (gimple_omp_parallel_clauses (ctx->stmt),
+ OMP_CLAUSE__REDUCTEMP_);
+ t = fold_convert (pointer_sized_int_node, OMP_CLAUSE_DECL (c));
+ t = fold_convert (size_type_node, t);
+ gimplify_assign (num_thr_sz, t, end);
+ }
+ t = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_int (2),
+ NULL_TREE, NULL_TREE);
+ tree data = create_tmp_var (pointer_sized_int_node);
+ gimple_seq_add_stmt (end, gimple_build_assign (data, t));
+ gimple_seq_add_stmt (end, gimple_build_label (lab1));
+ tree ptr;
+ if (TREE_CODE (TYPE_SIZE_UNIT (record_type)) == INTEGER_CST)
+ ptr = create_tmp_var (build_pointer_type (record_type));
+ else
+ ptr = create_tmp_var (ptr_type_node);
+ gimple_seq_add_stmt (end, gimple_build_assign (ptr, NOP_EXPR, data));
+
+ tree field = TYPE_FIELDS (record_type);
+ cnt = 0;
+ if (cancellable)
+ field = DECL_CHAIN (DECL_CHAIN (field));
+ for (int pass = 0; pass < 2; pass++)
+ {
+ tree decl, type, next;
+ for (tree c = clauses;
+ omp_task_reduction_iterate (pass, code, ccode,
+ &c, &decl, &type, &next); c = next)
+ {
+ tree var = decl, ref;
+ if (TREE_CODE (decl) == MEM_REF)
+ {
+ var = TREE_OPERAND (var, 0);
+ if (TREE_CODE (var) == POINTER_PLUS_EXPR)
+ var = TREE_OPERAND (var, 0);
+ tree v = var;
+ if (TREE_CODE (var) == ADDR_EXPR)
+ var = TREE_OPERAND (var, 0);
+ else if (TREE_CODE (var) == INDIRECT_REF)
+ var = TREE_OPERAND (var, 0);
+ tree orig_var = var;
+ if (is_variable_sized (var))
+ {
+ gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
+ var = DECL_VALUE_EXPR (var);
+ gcc_assert (TREE_CODE (var) == INDIRECT_REF);
+ var = TREE_OPERAND (var, 0);
+ gcc_assert (DECL_P (var));
+ }
+ t = ref = maybe_lookup_decl_in_outer_ctx (var, ctx);
+ if (orig_var != var)
+ gcc_assert (TREE_CODE (v) == ADDR_EXPR);
+ else if (TREE_CODE (v) == ADDR_EXPR)
+ t = build_fold_addr_expr (t);
+ else if (TREE_CODE (v) == INDIRECT_REF)
+ t = build_fold_indirect_ref (t);
+ if (TREE_CODE (TREE_OPERAND (decl, 0)) == POINTER_PLUS_EXPR)
+ {
+ tree b = TREE_OPERAND (TREE_OPERAND (decl, 0), 1);
+ b = maybe_lookup_decl_in_outer_ctx (b, ctx);
+ t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, b);
+ }
+ if (!integer_zerop (TREE_OPERAND (decl, 1)))
+ t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
+ fold_convert (size_type_node,
+ TREE_OPERAND (decl, 1)));
+ }
+ else
+ {
+ t = ref = maybe_lookup_decl_in_outer_ctx (var, ctx);
+ if (!omp_is_reference (decl))
+ t = build_fold_addr_expr (t);
+ }
+ t = fold_convert (pointer_sized_int_node, t);
+ seq = NULL;
+ t = force_gimple_operand (t, &seq, true, NULL_TREE);
+ gimple_seq_add_seq (start, seq);
+ r = build4 (ARRAY_REF, pointer_sized_int_node, avar,
+ size_int (7 + cnt * 3), NULL_TREE, NULL_TREE);
+ gimple_seq_add_stmt (start, gimple_build_assign (r, t));
+ t = unshare_expr (byte_position (field));
+ t = fold_convert (pointer_sized_int_node, t);
+ ctx->task_reduction_map->put (c, cnt);
+ ctx->task_reductions.quick_push (TREE_CODE (t) == INTEGER_CST
+ ? t : NULL_TREE);
+ seq = NULL;
+ t = force_gimple_operand (t, &seq, true, NULL_TREE);
+ gimple_seq_add_seq (start, seq);
+ r = build4 (ARRAY_REF, pointer_sized_int_node, avar,
+ size_int (7 + cnt * 3 + 1), NULL_TREE, NULL_TREE);
+ gimple_seq_add_stmt (start, gimple_build_assign (r, t));
+
+ tree bfield = DECL_CHAIN (field);
+ tree cond;
+ if (code == OMP_PARALLEL || code == OMP_FOR || code == OMP_SECTIONS)
+ /* In parallel or worksharing all threads unconditionally
+ initialize all their task reduction private variables. */
+ cond = boolean_true_node;
+ else if (TREE_TYPE (ptr) == ptr_type_node)
+ {
+ cond = build2 (POINTER_PLUS_EXPR, ptr_type_node, ptr,
+ unshare_expr (byte_position (bfield)));
+ seq = NULL;
+ cond = force_gimple_operand (cond, &seq, true, NULL_TREE);
+ gimple_seq_add_seq (end, seq);
+ tree pbool = build_pointer_type (TREE_TYPE (bfield));
+ cond = build2 (MEM_REF, TREE_TYPE (bfield), cond,
+ build_int_cst (pbool, 0));
+ }
+ else
+ cond = build3 (COMPONENT_REF, TREE_TYPE (bfield),
+ build_simple_mem_ref (ptr), bfield, NULL_TREE);
+ tree lab3 = create_artificial_label (UNKNOWN_LOCATION);
+ tree lab4 = create_artificial_label (UNKNOWN_LOCATION);
+ tree condv = create_tmp_var (boolean_type_node);
+ gimple_seq_add_stmt (end, gimple_build_assign (condv, cond));
+ g = gimple_build_cond (NE_EXPR, condv, boolean_false_node,
+ lab3, lab4);
+ gimple_seq_add_stmt (end, g);
+ gimple_seq_add_stmt (end, gimple_build_label (lab3));
+ if (cancellable && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) == NULL_TREE)
+ {
+ /* If this reduction doesn't need destruction and parallel
+ has been cancelled, there is nothing to do for this
+ reduction, so jump around the merge operation. */
+ tree lab5 = create_artificial_label (UNKNOWN_LOCATION);
+ g = gimple_build_cond (NE_EXPR, cancellable,
+ build_zero_cst (TREE_TYPE (cancellable)),
+ lab4, lab5);
+ gimple_seq_add_stmt (end, g);
+ gimple_seq_add_stmt (end, gimple_build_label (lab5));
+ }
+
+ tree new_var;
+ if (TREE_TYPE (ptr) == ptr_type_node)
+ {
+ new_var = build2 (POINTER_PLUS_EXPR, ptr_type_node, ptr,
+ unshare_expr (byte_position (field)));
+ seq = NULL;
+ new_var = force_gimple_operand (new_var, &seq, true, NULL_TREE);
+ gimple_seq_add_seq (end, seq);
+ tree pbool = build_pointer_type (TREE_TYPE (field));
+ new_var = build2 (MEM_REF, TREE_TYPE (field), new_var,
+ build_int_cst (pbool, 0));
+ }
+ else
+ new_var = build3 (COMPONENT_REF, TREE_TYPE (field),
+ build_simple_mem_ref (ptr), field, NULL_TREE);
+
+ enum tree_code rcode = OMP_CLAUSE_REDUCTION_CODE (c);
+ if (TREE_CODE (decl) != MEM_REF && omp_is_reference (decl))
+ ref = build_simple_mem_ref (ref);
+ /* reduction(-:var) sums up the partial results, so it acts
+ identically to reduction(+:var). */
+ if (rcode == MINUS_EXPR)
+ rcode = PLUS_EXPR;
+ if (TREE_CODE (decl) == MEM_REF)
+ {
+ tree type = TREE_TYPE (new_var);
+ tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
+ tree i = create_tmp_var (TREE_TYPE (v));
+ tree ptype = build_pointer_type (TREE_TYPE (type));
+ if (DECL_P (v))
+ {
+ v = maybe_lookup_decl_in_outer_ctx (v, ctx);
+ tree vv = create_tmp_var (TREE_TYPE (v));
+ gimplify_assign (vv, v, start);
+ v = vv;
+ }
+ ref = build4 (ARRAY_REF, pointer_sized_int_node, avar,
+ size_int (7 + cnt * 3), NULL_TREE, NULL_TREE);
+ new_var = build_fold_addr_expr (new_var);
+ new_var = fold_convert (ptype, new_var);
+ ref = fold_convert (ptype, ref);
+ tree m = create_tmp_var (ptype);
+ gimplify_assign (m, new_var, end);
+ new_var = m;
+ m = create_tmp_var (ptype);
+ gimplify_assign (m, ref, end);
+ ref = m;
+ gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), end);
+ tree body = create_artificial_label (UNKNOWN_LOCATION);
+ tree endl = create_artificial_label (UNKNOWN_LOCATION);
+ gimple_seq_add_stmt (end, gimple_build_label (body));
+ tree priv = build_simple_mem_ref (new_var);
+ tree out = build_simple_mem_ref (ref);
+ if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
+ {
+ tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
+ tree decl_placeholder
+ = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c);
+ tree lab6 = NULL_TREE;
+ if (cancellable)
+ {
+ /* If this reduction needs destruction and parallel
+ has been cancelled, jump around the merge operation
+ to the destruction. */
+ tree lab5 = create_artificial_label (UNKNOWN_LOCATION);
+ lab6 = create_artificial_label (UNKNOWN_LOCATION);
+ tree zero = build_zero_cst (TREE_TYPE (cancellable));
+ g = gimple_build_cond (NE_EXPR, cancellable, zero,
+ lab6, lab5);
+ gimple_seq_add_stmt (end, g);
+ gimple_seq_add_stmt (end, gimple_build_label (lab5));
+ }
+ SET_DECL_VALUE_EXPR (placeholder, out);
+ DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
+ SET_DECL_VALUE_EXPR (decl_placeholder, priv);
+ DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 1;
+ lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
+ gimple_seq_add_seq (end,
+ OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
+ OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
+ {
+ OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
+ OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = NULL;
+ }
+ if (cancellable)
+ gimple_seq_add_stmt (end, gimple_build_label (lab6));
+ tree x = lang_hooks.decls.omp_clause_dtor (c, priv);
+ if (x)
+ {
+ gimple_seq tseq = NULL;
+ gimplify_stmt (&x, &tseq);
+ gimple_seq_add_seq (end, tseq);
+ }
+ }
+ else
+ {
+ tree x = build2 (rcode, TREE_TYPE (out), out, priv);
+ out = unshare_expr (out);
+ gimplify_assign (out, x, end);
+ }
+ gimple *g
+ = gimple_build_assign (new_var, POINTER_PLUS_EXPR, new_var,
+ TYPE_SIZE_UNIT (TREE_TYPE (type)));
+ gimple_seq_add_stmt (end, g);
+ g = gimple_build_assign (ref, POINTER_PLUS_EXPR, ref,
+ TYPE_SIZE_UNIT (TREE_TYPE (type)));
+ gimple_seq_add_stmt (end, g);
+ g = gimple_build_assign (i, PLUS_EXPR, i,
+ build_int_cst (TREE_TYPE (i), 1));
+ gimple_seq_add_stmt (end, g);
+ g = gimple_build_cond (LE_EXPR, i, v, body, endl);
+ gimple_seq_add_stmt (end, g);
+ gimple_seq_add_stmt (end, gimple_build_label (endl));
+ }
+ else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
+ {
+ tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
+ tree oldv = NULL_TREE;
+ tree lab6 = NULL_TREE;
+ if (cancellable)
+ {
+ /* If this reduction needs destruction and parallel
+ has been cancelled, jump around the merge operation
+ to the destruction. */
+ tree lab5 = create_artificial_label (UNKNOWN_LOCATION);
+ lab6 = create_artificial_label (UNKNOWN_LOCATION);
+ tree zero = build_zero_cst (TREE_TYPE (cancellable));
+ g = gimple_build_cond (NE_EXPR, cancellable, zero,
+ lab6, lab5);
+ gimple_seq_add_stmt (end, g);
+ gimple_seq_add_stmt (end, gimple_build_label (lab5));
+ }
+ if (omp_is_reference (decl)
+ && !useless_type_conversion_p (TREE_TYPE (placeholder),
+ TREE_TYPE (ref)))
+ ref = build_fold_addr_expr_loc (OMP_CLAUSE_LOCATION (c), ref);
+ ref = build_fold_addr_expr_loc (OMP_CLAUSE_LOCATION (c), ref);
+ tree refv = create_tmp_var (TREE_TYPE (ref));
+ gimplify_assign (refv, ref, end);
+ ref = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), refv);
+ SET_DECL_VALUE_EXPR (placeholder, ref);
+ DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
+ tree d = maybe_lookup_decl (decl, ctx);
+ gcc_assert (d);
+ if (DECL_HAS_VALUE_EXPR_P (d))
+ oldv = DECL_VALUE_EXPR (d);
+ if (omp_is_reference (var))
+ {
+ tree v = fold_convert (TREE_TYPE (d),
+ build_fold_addr_expr (new_var));
+ SET_DECL_VALUE_EXPR (d, v);
+ }
+ else
+ SET_DECL_VALUE_EXPR (d, new_var);
+ DECL_HAS_VALUE_EXPR_P (d) = 1;
+ lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
+ if (oldv)
+ SET_DECL_VALUE_EXPR (d, oldv);
+ else
+ {
+ SET_DECL_VALUE_EXPR (d, NULL_TREE);
+ DECL_HAS_VALUE_EXPR_P (d) = 0;
+ }
+ gimple_seq_add_seq (end, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
+ OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
+ OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
+ if (cancellable)
+ gimple_seq_add_stmt (end, gimple_build_label (lab6));
+ tree x = lang_hooks.decls.omp_clause_dtor (c, new_var);
+ if (x)
+ {
+ gimple_seq tseq = NULL;
+ gimplify_stmt (&x, &tseq);
+ gimple_seq_add_seq (end, tseq);
+ }
+ }
+ else
+ {
+ tree x = build2 (rcode, TREE_TYPE (ref), ref, new_var);
+ ref = unshare_expr (ref);
+ gimplify_assign (ref, x, end);
+ }
+ gimple_seq_add_stmt (end, gimple_build_label (lab4));
+ ++cnt;
+ field = DECL_CHAIN (bfield);
+ }
+ }
+
+ if (code == OMP_TASKGROUP)
+ {
+ t = builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_REDUCTION_REGISTER);
+ g = gimple_build_call (t, 1, build_fold_addr_expr (avar));
+ gimple_seq_add_stmt (start, g);
+ }
+ else
+ {
+ tree c;
+ if (code == OMP_FOR)
+ c = gimple_omp_for_clauses (ctx->stmt);
+ else if (code == OMP_SECTIONS)
+ c = gimple_omp_sections_clauses (ctx->stmt);
+ else
+ c = gimple_omp_taskreg_clauses (ctx->stmt);
+ c = omp_find_clause (c, OMP_CLAUSE__REDUCTEMP_);
+ t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (c)),
+ build_fold_addr_expr (avar));
+ gimplify_assign (OMP_CLAUSE_DECL (c), t, start);
+ }
+
+ gimple_seq_add_stmt (end, gimple_build_assign (data, PLUS_EXPR, data, sz));
+ gimple_seq_add_stmt (end, gimple_build_assign (idx, PLUS_EXPR, idx,
+ size_one_node));
+ g = gimple_build_cond (NE_EXPR, idx, num_thr_sz, lab1, lab2);
+ gimple_seq_add_stmt (end, g);
+ gimple_seq_add_stmt (end, gimple_build_label (lab2));
+ if (code == OMP_FOR || code == OMP_SECTIONS)
+ {
+ enum built_in_function bfn
+ = BUILT_IN_GOMP_WORKSHARE_TASK_REDUCTION_UNREGISTER;
+ t = builtin_decl_explicit (bfn);
+ tree c_bool_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (t)));
+ tree arg;
+ if (cancellable)
+ {
+ arg = create_tmp_var (c_bool_type);
+ gimple_seq_add_stmt (end, gimple_build_assign (arg, NOP_EXPR,
+ cancellable));
+ }
+ else
+ arg = build_int_cst (c_bool_type, 0);
+ g = gimple_build_call (t, 1, arg);
+ }
+ else
+ {
+ t = builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_REDUCTION_UNREGISTER);
+ g = gimple_build_call (t, 1, build_fold_addr_expr (avar));
+ }
+ gimple_seq_add_stmt (end, g);
+ t = build_constructor (atype, NULL);
+ TREE_THIS_VOLATILE (t) = 1;
+ gimple_seq_add_stmt (end, gimple_build_assign (avar, t));
+}
/* Expand code for an OpenMP taskgroup directive. */
@@ -6149,21 +7527,31 @@ lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple *stmt = gsi_stmt (*gsi_p);
gcall *x;
gbind *bind;
+ gimple_seq dseq = NULL;
tree block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
+ push_gimplify_context ();
+
x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
0);
gimple_bind_add_stmt (bind, x);
+ lower_omp_task_reductions (ctx, OMP_TASKGROUP,
+ gimple_omp_taskgroup_clauses (stmt),
+ gimple_bind_body_ptr (bind), &dseq);
+
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
+ gimple_bind_add_seq (bind, dseq);
+
+ pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
@@ -6752,7 +8140,8 @@ lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
struct omp_for_data fd, *fdp = NULL;
gomp_for *stmt = as_a <gomp_for *> (gsi_stmt (*gsi_p));
gbind *new_stmt;
- gimple_seq omp_for_body, body, dlist;
+ gimple_seq omp_for_body, body, dlist, tred_ilist = NULL, tred_dlist = NULL;
+ gimple_seq cnt_list = NULL;
gimple_seq oacc_head = NULL, oacc_tail = NULL;
size_t i;
@@ -6845,9 +8234,30 @@ lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
/* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
dlist = NULL;
body = NULL;
+ tree rclauses
+ = omp_task_reductions_find_first (gimple_omp_for_clauses (stmt), OMP_FOR,
+ OMP_CLAUSE_REDUCTION);
+ tree rtmp = NULL_TREE;
+ if (rclauses)
+ {
+ tree type = build_pointer_type (pointer_sized_int_node);
+ tree temp = create_tmp_var (type);
+ tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__REDUCTEMP_);
+ OMP_CLAUSE_DECL (c) = temp;
+ OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (stmt);
+ gimple_omp_for_set_clauses (stmt, c);
+ lower_omp_task_reductions (ctx, OMP_FOR,
+ gimple_omp_for_clauses (stmt),
+ &tred_ilist, &tred_dlist);
+ rclauses = c;
+ rtmp = make_ssa_name (type);
+ gimple_seq_add_stmt (&body, gimple_build_assign (rtmp, temp));
+ }
+
lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
fdp);
- gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
+ gimple_seq_add_seq (rclauses ? &tred_ilist : &body,
+ gimple_omp_for_pre_body (stmt));
lower_omp (gimple_omp_body_ptr (stmt), ctx);
@@ -6862,20 +8272,24 @@ lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
rhs_p = gimple_omp_for_initial_ptr (stmt, i);
if (!is_gimple_min_invariant (*rhs_p))
- *rhs_p = get_formal_tmp_var (*rhs_p, &body);
+ *rhs_p = get_formal_tmp_var (*rhs_p, &cnt_list);
else if (TREE_CODE (*rhs_p) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (*rhs_p);
rhs_p = gimple_omp_for_final_ptr (stmt, i);
if (!is_gimple_min_invariant (*rhs_p))
- *rhs_p = get_formal_tmp_var (*rhs_p, &body);
+ *rhs_p = get_formal_tmp_var (*rhs_p, &cnt_list);
else if (TREE_CODE (*rhs_p) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (*rhs_p);
rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
if (!is_gimple_min_invariant (*rhs_p))
- *rhs_p = get_formal_tmp_var (*rhs_p, &body);
+ *rhs_p = get_formal_tmp_var (*rhs_p, &cnt_list);
}
+ if (rclauses)
+ gimple_seq_add_seq (&tred_ilist, cnt_list);
+ else
+ gimple_seq_add_seq (&body, cnt_list);
/* Once lowered, extract the bounds and clauses. */
omp_extract_for_data (stmt, &fd, NULL);
@@ -6922,13 +8336,26 @@ lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_seq_add_seq (&body, dlist);
+ if (rclauses)
+ {
+ gimple_seq_add_seq (&tred_ilist, body);
+ body = tred_ilist;
+ }
+
body = maybe_catch_exception (body);
if (!phony_loop)
{
/* Region exit marker goes at the end of the loop body. */
- gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
- maybe_add_implicit_barrier_cancel (ctx, &body);
+ gimple *g = gimple_build_omp_return (fd.have_nowait);
+ gimple_seq_add_stmt (&body, g);
+
+ gimple_seq_add_seq (&body, tred_dlist);
+
+ maybe_add_implicit_barrier_cancel (ctx, g, &body);
+
+ if (rclauses)
+ OMP_CLAUSE_DECL (rclauses) = rtmp;
}
/* Add OpenACC joining and reduction markers just after the loop. */
@@ -7153,6 +8580,40 @@ create_task_copyfn (gomp_task *task_stmt, omp_context *ctx)
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
append_to_statement_list (t, &list);
break;
+ case OMP_CLAUSE_REDUCTION:
+ case OMP_CLAUSE_IN_REDUCTION:
+ decl = OMP_CLAUSE_DECL (c);
+ if (TREE_CODE (decl) == MEM_REF)
+ {
+ decl = TREE_OPERAND (decl, 0);
+ if (TREE_CODE (decl) == POINTER_PLUS_EXPR)
+ decl = TREE_OPERAND (decl, 0);
+ if (TREE_CODE (decl) == INDIRECT_REF
+ || TREE_CODE (decl) == ADDR_EXPR)
+ decl = TREE_OPERAND (decl, 0);
+ }
+ key = (splay_tree_key) decl;
+ n = splay_tree_lookup (ctx->field_map, key);
+ if (n == NULL)
+ break;
+ f = (tree) n->value;
+ if (tcctx.cb.decl_map)
+ f = *tcctx.cb.decl_map->get (f);
+ n = splay_tree_lookup (ctx->sfield_map, key);
+ sf = (tree) n->value;
+ if (tcctx.cb.decl_map)
+ sf = *tcctx.cb.decl_map->get (sf);
+ src = build_simple_mem_ref_loc (loc, sarg);
+ src = omp_build_component_ref (src, sf);
+ if (decl != OMP_CLAUSE_DECL (c)
+ && TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == POINTER_TYPE)
+ src = build_simple_mem_ref_loc (loc, src);
+ dst = build_simple_mem_ref_loc (loc, arg);
+ dst = omp_build_component_ref (dst, f);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
+ append_to_statement_list (t, &list);
+ break;
case OMP_CLAUSE__LOOPTEMP_:
/* Fields for first two _looptemp_ clauses are initialized by
GOMP_taskloop*, the rest are handled like firstprivate. */
@@ -7162,6 +8623,7 @@ create_task_copyfn (gomp_task *task_stmt, omp_context *ctx)
break;
}
/* FALLTHRU */
+ case OMP_CLAUSE__REDUCTEMP_:
case OMP_CLAUSE_FIRSTPRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (is_variable_sized (decl))
@@ -7187,7 +8649,7 @@ create_task_copyfn (gomp_task *task_stmt, omp_context *ctx)
src = decl;
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
- if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__LOOPTEMP_)
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_FIRSTPRIVATE)
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
else
t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
@@ -7279,7 +8741,7 @@ lower_depend_clauses (tree *pclauses, gimple_seq *iseq, gimple_seq *oseq)
{
tree c, clauses;
gimple *g;
- size_t n_in = 0, n_out = 0, idx = 2, i;
+ size_t cnt[4] = { 0, 0, 0, 0 }, idx = 2, i;
clauses = omp_find_clause (*pclauses, OMP_CLAUSE_DEPEND);
gcc_assert (clauses);
@@ -7287,12 +8749,21 @@ lower_depend_clauses (tree *pclauses, gimple_seq *iseq, gimple_seq *oseq)
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
switch (OMP_CLAUSE_DEPEND_KIND (c))
{
+ case OMP_CLAUSE_DEPEND_LAST:
+ /* Lowering already done at gimplification. */
+ return;
case OMP_CLAUSE_DEPEND_IN:
- n_in++;
+ cnt[2]++;
break;
case OMP_CLAUSE_DEPEND_OUT:
case OMP_CLAUSE_DEPEND_INOUT:
- n_out++;
+ cnt[0]++;
+ break;
+ case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
+ cnt[1]++;
+ break;
+ case OMP_CLAUSE_DEPEND_DEPOBJ:
+ cnt[3]++;
break;
case OMP_CLAUSE_DEPEND_SOURCE:
case OMP_CLAUSE_DEPEND_SINK:
@@ -7300,25 +8771,61 @@ lower_depend_clauses (tree *pclauses, gimple_seq *iseq, gimple_seq *oseq)
default:
gcc_unreachable ();
}
- tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
+ if (cnt[1] || cnt[3])
+ idx = 5;
+ size_t total = cnt[0] + cnt[1] + cnt[2] + cnt[3];
+ tree type = build_array_type_nelts (ptr_type_node, total + idx);
tree array = create_tmp_var (type);
TREE_ADDRESSABLE (array) = 1;
tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
NULL_TREE);
- g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
- gimple_seq_add_stmt (iseq, g);
- r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
- NULL_TREE);
- g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
+ if (idx == 5)
+ {
+ g = gimple_build_assign (r, build_int_cst (ptr_type_node, 0));
+ gimple_seq_add_stmt (iseq, g);
+ r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
+ NULL_TREE);
+ }
+ g = gimple_build_assign (r, build_int_cst (ptr_type_node, total));
gimple_seq_add_stmt (iseq, g);
- for (i = 0; i < 2; i++)
+ for (i = 0; i < (idx == 5 ? 3 : 1); i++)
+ {
+ r = build4 (ARRAY_REF, ptr_type_node, array,
+ size_int (i + 1 + (idx == 5)), NULL_TREE, NULL_TREE);
+ g = gimple_build_assign (r, build_int_cst (ptr_type_node, cnt[i]));
+ gimple_seq_add_stmt (iseq, g);
+ }
+ for (i = 0; i < 4; i++)
{
- if ((i ? n_in : n_out) == 0)
+ if (cnt[i] == 0)
continue;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
- if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
- && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
+ continue;
+ else
{
+ switch (OMP_CLAUSE_DEPEND_KIND (c))
+ {
+ case OMP_CLAUSE_DEPEND_IN:
+ if (i != 2)
+ continue;
+ break;
+ case OMP_CLAUSE_DEPEND_OUT:
+ case OMP_CLAUSE_DEPEND_INOUT:
+ if (i != 0)
+ continue;
+ break;
+ case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
+ if (i != 1)
+ continue;
+ break;
+ case OMP_CLAUSE_DEPEND_DEPOBJ:
+ if (i != 3)
+ continue;
+ break;
+ default:
+ gcc_unreachable ();
+ }
tree t = OMP_CLAUSE_DECL (c);
t = fold_convert (ptr_type_node, t);
gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
@@ -7329,6 +8836,7 @@ lower_depend_clauses (tree *pclauses, gimple_seq *iseq, gimple_seq *oseq)
}
}
c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
+ OMP_CLAUSE_DEPEND_KIND (c) = OMP_CLAUSE_DEPEND_LAST;
OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
OMP_CLAUSE_CHAIN (c) = *pclauses;
*pclauses = c;
@@ -7348,13 +8856,22 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
tree child_fn, t;
gimple *stmt = gsi_stmt (*gsi_p);
gbind *par_bind, *bind, *dep_bind = NULL;
- gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
+ gimple_seq par_body;
location_t loc = gimple_location (stmt);
clauses = gimple_omp_taskreg_clauses (stmt);
- par_bind
- = as_a <gbind *> (gimple_seq_first_stmt (gimple_omp_body (stmt)));
- par_body = gimple_bind_body (par_bind);
+ if (gimple_code (stmt) == GIMPLE_OMP_TASK
+ && gimple_omp_task_taskwait_p (stmt))
+ {
+ par_bind = NULL;
+ par_body = NULL;
+ }
+ else
+ {
+ par_bind
+ = as_a <gbind *> (gimple_seq_first_stmt (gimple_omp_body (stmt)));
+ par_body = gimple_bind_body (par_bind);
+ }
child_fn = ctx->cb.dst_fn;
if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
&& !gimple_omp_parallel_combined_p (stmt))
@@ -7380,14 +8897,49 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
&dep_ilist, &dep_olist);
}
+ if (gimple_code (stmt) == GIMPLE_OMP_TASK
+ && gimple_omp_task_taskwait_p (stmt))
+ {
+ if (dep_bind)
+ {
+ gsi_replace (gsi_p, dep_bind, true);
+ gimple_bind_add_seq (dep_bind, dep_ilist);
+ gimple_bind_add_stmt (dep_bind, stmt);
+ gimple_bind_add_seq (dep_bind, dep_olist);
+ pop_gimplify_context (dep_bind);
+ }
+ return;
+ }
+
if (ctx->srecord_type)
create_task_copyfn (as_a <gomp_task *> (stmt), ctx);
+ gimple_seq tskred_ilist = NULL;
+ gimple_seq tskred_olist = NULL;
+ if ((is_task_ctx (ctx)
+ && gimple_omp_task_taskloop_p (ctx->stmt)
+ && omp_find_clause (gimple_omp_task_clauses (ctx->stmt),
+ OMP_CLAUSE_REDUCTION))
+ || (is_parallel_ctx (ctx)
+ && omp_find_clause (gimple_omp_parallel_clauses (stmt),
+ OMP_CLAUSE__REDUCTEMP_)))
+ {
+ if (dep_bind == NULL)
+ {
+ push_gimplify_context ();
+ dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
+ }
+ lower_omp_task_reductions (ctx, is_task_ctx (ctx) ? OMP_TASKLOOP
+ : OMP_PARALLEL,
+ gimple_omp_taskreg_clauses (ctx->stmt),
+ &tskred_ilist, &tskred_olist);
+ }
+
push_gimplify_context ();
- par_olist = NULL;
- par_ilist = NULL;
- par_rlist = NULL;
+ gimple_seq par_olist = NULL;
+ gimple_seq par_ilist = NULL;
+ gimple_seq par_rlist = NULL;
bool phony_construct = gimple_code (stmt) == GIMPLE_OMP_PARALLEL
&& gimple_omp_parallel_grid_phony (as_a <gomp_parallel *> (stmt));
if (phony_construct && ctx->record_type)
@@ -7417,8 +8969,8 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
}
- olist = NULL;
- ilist = NULL;
+ gimple_seq olist = NULL;
+ gimple_seq ilist = NULL;
lower_send_clauses (clauses, &ilist, &olist, ctx);
lower_send_shared_vars (&ilist, &olist, ctx);
@@ -7433,7 +8985,7 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
/* Once all the expansions are done, sequence all the different
fragments inside gimple_omp_body. */
- new_body = NULL;
+ gimple_seq new_body = NULL;
if (ctx->record_type)
{
@@ -7461,7 +9013,10 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_omp_set_body (stmt, new_body);
}
- bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
+ if (dep_bind && gimple_bind_block (par_bind) == NULL_TREE)
+ bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
+ else
+ bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
gimple_bind_add_seq (bind, ilist);
if (!phony_construct)
@@ -7475,7 +9030,9 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
if (dep_bind)
{
gimple_bind_add_seq (dep_bind, dep_ilist);
+ gimple_bind_add_seq (dep_bind, tskred_ilist);
gimple_bind_add_stmt (dep_bind, bind);
+ gimple_bind_add_seq (dep_bind, tskred_olist);
gimple_bind_add_seq (dep_bind, dep_olist);
pop_gimplify_context (dep_bind);
}
@@ -8830,7 +10387,10 @@ lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
case GIMPLE_OMP_TEAMS:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
- lower_omp_teams (gsi_p, ctx);
+ if (gimple_omp_teams_host (as_a <gomp_teams *> (stmt)))
+ lower_omp_taskreg (gsi_p, ctx);
+ else
+ lower_omp_teams (gsi_p, ctx);
break;
case GIMPLE_OMP_GRID_BODY:
ctx = maybe_lookup_ctx (stmt);