diff options
author | Razya Ladelsky <razya@gcc.gnu.org> | 2007-10-29 11:05:04 +0000 |
---|---|---|
committer | Razya Ladelsky <razya@gcc.gnu.org> | 2007-10-29 11:05:04 +0000 |
commit | a509ebb5a12b96267a948e5c82ff7c4f0a4826e4 (patch) | |
tree | 6b8a35dcec87c3829ed38ac34249604c36916f9c /gcc/tree-parloops.c | |
parent | 245a5fe5c0ae5dd48f7adc806117264543f39fdd (diff) | |
download | gcc-a509ebb5a12b96267a948e5c82ff7c4f0a4826e4.zip gcc-a509ebb5a12b96267a948e5c82ff7c4f0a4826e4.tar.gz gcc-a509ebb5a12b96267a948e5c82ff7c4f0a4826e4.tar.bz2 |
2007-09-23 Razya Ladelsky
Zdenek Dvorak
OMP_ATOMIC Changes,
Reduction support for automatic parallelization.
* expr.c (expand_expr_real_1): Add cases for OMP_ATOMIC_LOAD,
OMP_ATOMIC_STORE.
* Makefile.in: Add dependencies to expr.o, tree-parloops.o, omp-low.o
* tree-pretty-print.c (dump_generic_node): Add OMP_ATOMIC_LOAD
and OMP_ATOMIC_STORE.
* tree.h (OMP_DIRECTIVE_P): Add OMP_ATOMIC_LOAD,
OMP_ATOMIC_STORE.
* gimple-low.c (lower_stmt): Same.
* gimplify.c (gimplify_expr): Same.
(gimplify_omp_atomic_fetch_op, gimplify_omp_atomic_pipeline,
gimplify_omp_atomic_mutex): Remove.
(gimplify_omp_atomic): Change it to simply gimplify the
statement instead of expanding it.
* omp-low.c: Add includes to optabs.h, cfgloop.h.
(expand_omp_atomic, expand_omp_atomic_pipeline,
goa_stabilize_expr, expand_omp_atomic_mutex,
expand_omp_atomic_fetch_op): New functions to implement
expansion of OMP_ATOMIC.
(expand_omp, build_omp_regions_1): Add support for
OMP_ATOMIC_LOAD/OMP_ATOMIC_STORE.
* tree-cfg.c (make_edges): add case for OMP_ATOMIC_LOAD,
OMP_ATOMIC_STORE.
* tree-gimple.c (is_gimple_stmt): Add OMP_ATOMIC_LOAD,
OMP_ATOMIC_STORE.
* tree-parloops.c: add include to tree-vectorizer.h.
(reduction_info): New structure for reduction.
(reduction_list): New list to represent list of reductions
per loop.
(struct data_arg): New helper structure for reduction.
(reduction_info_hash, reduction_info_eq, reduction_phi,
initialize_reductions,
create_call_for_reduction, create_phi_for_local_result,
create_call_for_reduction_1, create_loads_for_reductions,
create_final_loads_for_reduction): New functions.
(loop_parallel_p): Identify reductions, add reduction_list parameter.
(separate_decls_in_loop_name): Support reduction variables.
(separate_decls_in_loop): Add reduction_list and ld_st_data arguments,
call create_loads_for_reduction for each reduction.
(canonicalize_loop_ivs): Identify reductions, add reduction_list
parameter.
(transform_to_exit_first_loop): Add reduction support, add
reduction_list parameter.
(gen_parallel_loop): Add reduction_list parameter. Add call
separate_decls_in_loop with
the new argument. Traverse reductions and call
initialize_reductions, create_call_for_reduction.
(parallelize_loops): Create and delete the reduction list.
(add_field_for_name): Change use of data parameter. Add fields for
reductions.
* tree-vectorizer.h (vect_analyze_loop_form): Add declaration.
* tree-vect-analyze.c (vect_analyze_loop_form): export it.
* tree.def: Add definitions for OMP_ATOMIC_LOAD,
OMP_ATOMIC_STORE.
* tree-inline.c (estimate_num_insns_1): add cases for
OMP_ATOMIC_LOAD, OMP_ATOMIC_STORE.
* tree-cfg.c (make_edges): Add OMP_ATOMIC_LOAD,
OMP_ATOMIC_STORE.
* tree-ssa-operands.c (get_addr_dereference_operands):
New function. Subroutine of get_indirect_ref_operands.
(get_indirect_ref_operands): Call get_addr_dereference_operands.
(get_expr_operands): Support OMP_ATOMIC_LOAD, OMP_ATOMIC_STORE.
From-SVN: r129716
Diffstat (limited to 'gcc/tree-parloops.c')
-rw-r--r-- | gcc/tree-parloops.c | 918 |
1 files changed, 799 insertions, 119 deletions
diff --git a/gcc/tree-parloops.c b/gcc/tree-parloops.c index aca2b74..34c4639 100644 --- a/gcc/tree-parloops.c +++ b/gcc/tree-parloops.c @@ -35,6 +35,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA #include "tree-scalar-evolution.h" #include "hashtab.h" #include "langhooks.h" +#include "tree-vectorizer.h" /* This pass tries to distribute iterations of loops into several threads. The implementation is straightforward -- for each loop we test whether its @@ -61,10 +62,247 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA -- handling of common scalar dependence patterns (accumulation, ...) -- handling of non-innermost loops */ +/* + Reduction handling: + currently we use vect_is_simple_reduction() to detect reduction patterns. + The code transformation will be introduced by an example. + + source code: + +parloop +{ + int sum=1; + + for (i = 0; i < N/1000; i++) + { + x[i] = i + 3; + sum+=x[i]; + } +} + +gimple code: + +header_bb: + + # sum_24 = PHI <sum_14(3), 1(2)>; + # i_21 = PHI <i_15(3), 0(2)>; +<L0>:; + D.2191_10 = i_21 + 3; + x[i_21] = D.2191_10; + sum_14 = D.2191_10 + sum_24; + i_15 = i_21 + 1; + if (N_8 > i_15) goto <L0>; else goto <L2>; + +exit_bb: + + # sum_25 = PHI <sum_14(3)>; +<L2>:; + + +after reduction transformation (only relevant parts): + +parloop +{ + +.... + +<L16>:; + D.2241_2 = (unsigned int) N_8; + D.2242_26 = D.2241_2 - 1; + if (D.2242_26 > 399) goto <L26>; else goto <L27>; + +#two new variables are created for each reduction: +"reduction" is the variable holding the neutral element +for the particular operation, e.g. 0 for PLUS_EXPR, +1 for MULT_EXPR, etc. +"reduction_initial" is the initial value given by the user. +It is kept and will be used after the parallel computing +is done.# + +<L26>:; + reduction.38_42 = 0; + reduction_initial.39_43 = 1; + x.40_44 = &x; + .paral_data_store.47.D.2261 = D.2242_26; + .paral_data_store.47.reduction.38 = reduction.38_42; + .paral_data_store.47.x.40 = x.40_44; + __builtin_GOMP_parallel_start (parloop._loopfn.0, &.paral_data_store.47, 4); + parloop._loopfn.0 (&.paral_data_store.47); + __builtin_GOMP_parallel_end (); + +# collecting the result after the join of the threads is done at + create_loads_for_reductions(). + a new variable "reduction_final" is created. It calculates the + final value from the initial value and the value computed by + the threads. # + + .paral_data_load.48_49 = &.paral_data_store.47; + reduction_final.49_50 = .paral_data_load.48_49->reduction.38; + reduction_final.49_51 = reduction_initial.39_43 + reduction_final.49_50; + ivtmp.37_36 = D.2242_26; + i_37 = (int) ivtmp.37_36; + D.2191_38 = i_37 + 3; + x[i_37] = D.2191_38; + sum_40 = D.2191_38 + reduction_final.49_51; + i_41 = i_37 + 1; + goto <bb 8> (<L2>); + + # sum_25 = PHI <sum_40(4), sum_9(6)>; +<L2>:; + printf (&"sum is %d\n"[0], sum_25); + +... + +} + +parloop._loopfn.0 (.paral_data_param) +{ + ... + +<L28>:; + .paral_data_param_52 = .paral_data_param_75; + .paral_data_load.48_48 = (struct .paral_data.46 *) .paral_data_param_52; + D.2289_46 = .paral_data_load.48_48->D.2261; + reduction.43_45 = .paral_data_load.48_48->reduction.38; + x.45_47 = .paral_data_load.48_48->x.40; + # SUCC: 23 [100.0%] (fallthru) + + # BLOCK 23 + # PRED: 21 [100.0%] (fallthru) +<L30>:; + D.2292_60 = __builtin_omp_get_num_threads (); + D.2293_61 = (unsigned int) D.2292_60; + D.2294_62 = __builtin_omp_get_thread_num (); + D.2295_63 = (unsigned int) D.2294_62; + D.2296_64 = D.2289_46 / D.2293_61; + D.2297_65 = D.2293_61 * D.2296_64; + D.2298_66 = D.2297_65 != D.2289_46; + D.2299_67 = D.2296_64 + D.2298_66; + D.2300_68 = D.2299_67 * D.2295_63; + D.2301_69 = D.2299_67 + D.2300_68; + D.2302_70 = MIN_EXPR <D.2301_69, D.2289_46>; + ivtmp.41_54 = D.2300_68; + if (D.2300_68 >= D.2302_70) goto <L31>; else goto <L32>; + # SUCC: 26 [100.0%] (false) 24 (true) + + # BLOCK 26 + # PRED: 23 [100.0%] (false) +<L32>:; + # SUCC: 4 [100.0%] (fallthru) + + # BLOCK 4 + # PRED: 5 [100.0%] (true) 26 [100.0%] (fallthru) + # ivtmp.41_31 = PHI <ivtmp.41_30(5), ivtmp.41_54(26)>; + # sum.42_32 = PHI <sum.42_14(5), reduction.43_45(26)>; +<L0>:; + # SUCC: 19 [100.0%] (fallthru) + + # BLOCK 19 + # PRED: 4 [100.0%] (fallthru) + # sum.42_24 = PHI <sum.42_32(4)>; + # ivtmp.41_17 = PHI <ivtmp.41_31(4)>; + i.44_21 = (int) ivtmp.41_17; + D.2310_10 = i.44_21 + 3; + (*x.45_47)[i.44_21] = D.2310_10; + sum.42_14 = D.2310_10 + sum.42_24; + i.44_15 = i.44_21 + 1; + # SUCC: 5 [100.0%] (fallthru) + + # BLOCK 5 + # PRED: 19 [100.0%] (fallthru) +<L17>:; + ivtmp.41_30 = ivtmp.41_31 + 1; + if (ivtmp.41_30 < D.2302_70) goto <L0>; else goto <L31>; + # SUCC: 4 [100.0%] (true) 24 (false) + + # Adding this reduction phi is done at + create_phi_for_local_result() # + + # BLOCK 24 + # PRED: 5 (false) 23 (true) + # reduction.38_56 = PHI <sum.42_14(5), 0(23)>; + <L31>:; + __builtin_GOMP_barrier (); + # SUCC: 25 [100.0%] (fallthru) + + # Creating the atomic operation is + done at create_call_for_reduction_1() # + + # BLOCK 25 + # PRED: 24 [100.0%] (fallthru) + D.2306_57 = &.paral_data_load.48_48->reduction.38; + D.2307_58 = (unsigned int) reduction.38_56; + D.2308_59 = __sync_fetch_and_add_4 (D.2306_57, D.2307_58); + # SUCC: 22 [100.0%] (fallthru) + + # BLOCK 22 + # PRED: 25 [100.0%] (fallthru) + <L29>:; + return; + # SUCC: EXIT + +} + +*/ + /* Minimal number of iterations of a loop that should be executed in each thread. */ #define MIN_PER_THREAD 100 +/* Element of the hashtable, representing a + reduction in the current loop. */ +struct reduction_info +{ + tree reduc_stmt; /* reduction statement. */ + tree reduc_phi; /* The phi node defining the reduction. */ + enum tree_code reduction_code; /* code for the reduction operation. */ + tree keep_res; /* The PHI_RESULT of this phi is the resulting value + of the reduction variable when existing the loop. */ + tree initial_value; /* An ssa name representing a new variable holding + the initial value of the reduction var before entering the loop. */ + tree field; /* the name of the field in the parloop data structure intended for reduction. */ + tree reduction_init; /* An ssa name representing a new variable which will be + assigned the proper reduction initialization value (init). */ + tree init; /* reduction initialization value. */ + tree new_phi; /* (helper field) Newly created phi node whose result + will be passed to the atomic operation. Represents + the local result each thread computed for the reduction + operation. */ +}; + +/* Equality and hash functions for hashtab code. */ + +static int +reduction_info_eq (const void *aa, const void *bb) +{ + const struct reduction_info *a = (const struct reduction_info *) aa; + const struct reduction_info *b = (const struct reduction_info *) bb; + + return (a->reduc_phi == b->reduc_phi); +} + +static hashval_t +reduction_info_hash (const void *aa) +{ + const struct reduction_info *a = (const struct reduction_info *) aa; + + return htab_hash_pointer (a->reduc_phi); +} + +static struct reduction_info * +reduction_phi (htab_t reduction_list, tree phi) +{ + struct reduction_info tmpred, *red; + + if (htab_elements (reduction_list) == 0) + return NULL; + + tmpred.reduc_phi = phi; + red = htab_find (reduction_list, &tmpred); + + return red; +} + /* Element of hashtable of names to copy. */ struct name_to_copy_elt @@ -80,8 +318,8 @@ struct name_to_copy_elt static int name_to_copy_elt_eq (const void *aa, const void *bb) { - struct name_to_copy_elt *a = (struct name_to_copy_elt *) aa; - struct name_to_copy_elt *b = (struct name_to_copy_elt *) bb; + const struct name_to_copy_elt *a = (const struct name_to_copy_elt *) aa; + const struct name_to_copy_elt *b = (const struct name_to_copy_elt *) bb; return a->version == b->version; } @@ -89,7 +327,7 @@ name_to_copy_elt_eq (const void *aa, const void *bb) static hashval_t name_to_copy_elt_hash (const void *aa) { - struct name_to_copy_elt *a = (struct name_to_copy_elt *) aa; + const struct name_to_copy_elt *a = (const struct name_to_copy_elt *) aa; return (hashval_t) a->version; } @@ -97,17 +335,19 @@ name_to_copy_elt_hash (const void *aa) /* Returns true if the iterations of LOOP are independent on each other (that is, if we can execute them in parallel), and if LOOP satisfies other conditions that we need to be able to parallelize it. Description of number - of iterations is stored to NITER. */ + of iterations is stored to NITER. Reduction analysis is done, if + reductions are found, they are inserted to the REDUCTION_LIST. */ static bool -loop_parallel_p (struct loop *loop, struct tree_niter_desc *niter) +loop_parallel_p (struct loop *loop, htab_t reduction_list, struct tree_niter_desc *niter) { edge exit = single_dom_exit (loop); - VEC (ddr_p, heap) *dependence_relations; - VEC (data_reference_p, heap) *datarefs; + VEC (ddr_p, heap) * dependence_relations; + VEC (data_reference_p, heap) * datarefs; lambda_trans_matrix trans; bool ret = false; tree phi; + loop_vec_info simple_loop_info; /* Only consider innermost loops with just one exit. The innermost-loop restriction is not necessary, but it makes things simpler. */ @@ -127,15 +367,99 @@ loop_parallel_p (struct loop *loop, struct tree_niter_desc *niter) return false; } + simple_loop_info = vect_analyze_loop_form (loop); + + for (phi = phi_nodes (loop->header); phi; phi = PHI_CHAIN (phi)) + { + tree reduc_stmt = NULL, operation; + + /* ??? TODO: Change this into a generic function that + recognizes reductions. */ + if (!is_gimple_reg (PHI_RESULT (phi))) + continue; + if (simple_loop_info) + reduc_stmt = vect_is_simple_reduction (simple_loop_info, phi); + + /* Create a reduction_info struct, initialize it and insert it to + the reduction list. */ + + if (reduc_stmt) + { + PTR *slot; + struct reduction_info *new_reduction; + + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, + "Detected reduction. reduction stmt is: \n"); + print_generic_stmt (dump_file, reduc_stmt, 0); + fprintf (dump_file, "\n"); + } + + new_reduction = XCNEW (struct reduction_info); + + new_reduction->reduc_stmt = reduc_stmt; + new_reduction->reduc_phi = phi; + operation = GIMPLE_STMT_OPERAND (reduc_stmt, 1); + new_reduction->reduction_code = TREE_CODE (operation); + slot = htab_find_slot (reduction_list, new_reduction, INSERT); + *slot = new_reduction; + } + } + for (phi = phi_nodes (exit->dest); phi; phi = PHI_CHAIN (phi)) { + struct reduction_info *red; + imm_use_iterator imm_iter; + use_operand_p use_p; + tree reduc_phi; + tree val = PHI_ARG_DEF_FROM_EDGE (phi, exit); if (is_gimple_reg (val)) { if (dump_file && (dump_flags & TDF_DETAILS)) - fprintf (dump_file, " FAILED: value used outside loop\n"); - return false; + { + fprintf (dump_file, "phi is "); + print_generic_expr (dump_file, phi, 0); + fprintf (dump_file, "arg of phi to exit: value "); + print_generic_expr (dump_file, val, 0); + fprintf (dump_file, " used outside loop\n"); + fprintf (dump_file, + " checking if it a part of reduction pattern: \n"); + } + if (htab_elements (reduction_list) == 0) + { + if (dump_file && (dump_flags & TDF_DETAILS)) + fprintf (dump_file, + " FAILED: it is not a part of reduction.\n"); + return false; + } + reduc_phi = NULL; + FOR_EACH_IMM_USE_FAST (use_p, imm_iter, val) + { + if (flow_bb_inside_loop_p (loop, bb_for_stmt (USE_STMT (use_p)))) + { + reduc_phi = USE_STMT (use_p); + break; + } + } + red = reduction_phi (reduction_list, reduc_phi); + if (red == NULL) + { + if (dump_file && (dump_flags & TDF_DETAILS)) + fprintf (dump_file, + " FAILED: it is not a part of reduction.\n"); + return false; + } + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "reduction phi is "); + print_generic_expr (dump_file, red->reduc_phi, 0); + fprintf (dump_file, "reduction stmt is "); + print_generic_expr (dump_file, red->reduc_stmt, 0); + } + } } @@ -146,13 +470,18 @@ loop_parallel_p (struct loop *loop, struct tree_niter_desc *niter) tree def = PHI_RESULT (phi); affine_iv iv; - if (is_gimple_reg (def) - && !simple_iv (loop, phi, def, &iv, true)) + if (is_gimple_reg (def) && !simple_iv (loop, phi, def, &iv, true)) { - if (dump_file && (dump_flags & TDF_DETAILS)) - fprintf (dump_file, - " FAILED: scalar dependency between iterations\n"); - return false; + struct reduction_info *red; + + red = reduction_phi (reduction_list, phi); + if (red == NULL) + { + if (dump_file && (dump_flags & TDF_DETAILS)) + fprintf (dump_file, + " FAILED: scalar dependency between iterations\n"); + return false; + } } } @@ -183,7 +512,8 @@ loop_parallel_p (struct loop *loop, struct tree_niter_desc *niter) fprintf (dump_file, " SUCCESS: may be parallelized\n"); } else if (dump_file && (dump_flags & TDF_DETAILS)) - fprintf (dump_file, " FAILED: data dependencies exist across iterations\n"); + fprintf (dump_file, + " FAILED: data dependencies exist across iterations\n"); free_dependence_relations (dependence_relations); free_data_refs (datarefs); @@ -211,8 +541,9 @@ take_address_of (tree var, tree type, struct loop *loop, htab_t decl_address) bvar = create_tmp_var (type, get_name (var)); add_referenced_var (bvar); stmt = build_gimple_modify_stmt (bvar, - fold_convert (type, - build_addr (var, current_function_decl))); + fold_convert (type, + build_addr (var, + current_function_decl))); name = make_ssa_name (bvar, stmt); GIMPLE_STMT_OPERAND (stmt, 0) = name; bsi_insert_on_edge_immediate (entry, stmt); @@ -230,8 +561,7 @@ take_address_of (tree var, tree type, struct loop *loop, htab_t decl_address) return name; bvar = SSA_NAME_VAR (name); - stmt = build_gimple_modify_stmt (bvar, - fold_convert (type, name)); + stmt = build_gimple_modify_stmt (bvar, fold_convert (type, name)); name = make_ssa_name (bvar, stmt); GIMPLE_STMT_OPERAND (stmt, 0) = name; bsi_insert_on_edge_immediate (entry, stmt); @@ -239,10 +569,72 @@ take_address_of (tree var, tree type, struct loop *loop, htab_t decl_address) return name; } -/* Eliminates references to local variables in *TP out of LOOP. DECL_ADDRESS - contains addresses of the references that had their address taken already. - If the expression is changed, CHANGED is set to true. Callback for - walk_tree. */ +/* Callback for htab_traverse. Create the initialization statement + for reduction described in SLOT, and place it at the preheader of + the loop described in DATA. */ + +static int +initialize_reductions (void **slot, void *data) +{ + tree t, stmt; + tree init, c; + tree name, name1; + tree bvar, type, arg; + edge e; + + struct reduction_info *reduc = *slot; + struct loop *loop = (struct loop *) data; + + /* Create initialization in preheader: + reduction_variable = initialization value of reduction. */ + + /* In the phi node at the header, replace the argument coming + from the preheader with the reduction initialization value. */ + + /* Create a new variable to initialize the reduction. */ + type = TREE_TYPE (PHI_RESULT (reduc->reduc_phi)); + bvar = create_tmp_var (type, "reduction"); + add_referenced_var (bvar); + + c = build_omp_clause (OMP_CLAUSE_REDUCTION); + OMP_CLAUSE_REDUCTION_CODE (c) = reduc->reduction_code; + OMP_CLAUSE_DECL (c) = + SSA_NAME_VAR (GIMPLE_STMT_OPERAND (reduc->reduc_stmt, 0)); + + init = omp_reduction_init (c, TREE_TYPE (bvar)); + reduc->init = init; + + t = build_gimple_modify_stmt (bvar, init); + name = make_ssa_name (bvar, t); + + GIMPLE_STMT_OPERAND (t, 0) = name; + SSA_NAME_DEF_STMT (name) = t; + + /* Replace the argument + representing the initialization value. Keeping the old value + in a new variable "reduction_initial", that will be taken in + consideration after the parallel computing is done. */ + + e = loop_preheader_edge (loop); + arg = PHI_ARG_DEF_FROM_EDGE (reduc->reduc_phi, e); + /* Create new variable to hold the initial value. */ + type = TREE_TYPE (bvar); + bvar = create_tmp_var (type, "reduction_initial"); + add_referenced_var (bvar); + + stmt = build_gimple_modify_stmt (bvar, arg); + name1 = make_ssa_name (bvar, stmt); + GIMPLE_STMT_OPERAND (stmt, 0) = name1; + SSA_NAME_DEF_STMT (name1) = stmt; + + bsi_insert_on_edge_immediate (e, stmt); + bsi_insert_on_edge_immediate (e, t); + SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE + (reduc->reduc_phi, loop_preheader_edge (loop)), name); + reduc->initial_value = name1; + reduc->reduction_init = name; + return 1; +} struct elv_data { @@ -251,8 +643,13 @@ struct elv_data bool changed; }; +/* Eliminates references to local variables in *TP out of LOOP. DECL_ADDRESS + contains addresses of the references that had their address taken already. + If the expression is changed, CHANGED is set to true. Callback for + walk_tree. */ + static tree -eliminate_local_variables_1 (tree *tp, int *walk_subtrees, void *data) +eliminate_local_variables_1 (tree * tp, int *walk_subtrees, void *data) { struct elv_data *dta = data; tree t = *tp, var, addr, addr_type, type; @@ -291,8 +688,7 @@ eliminate_local_variables_1 (tree *tp, int *walk_subtrees, void *data) return NULL_TREE; } - if (!EXPR_P (t) - && !GIMPLE_STMT_P (t)) + if (!EXPR_P (t) && !GIMPLE_STMT_P (t)) *walk_subtrees = 0; return NULL_TREE; @@ -318,12 +714,13 @@ eliminate_local_variables_stmt (struct loop *loop, tree stmt, update_stmt (stmt); } -/* Eliminates the references to local variables from LOOP. This includes: - - 1) Taking address of a local variable -- these are moved out of the loop - (and temporary variable is created to hold the address if necessary). +/* Eliminates the references to local variables from LOOP. + This includes: + 1) Taking address of a local variable -- these are moved out of the + loop (and temporary variable is created to hold the address if + necessary). 2) Dereferencing a local variable -- these are replaced with indirect - references. */ + references. */ static void eliminate_local_variables (struct loop *loop) @@ -388,7 +785,7 @@ separate_decls_in_loop_name (tree name, *dslot = nielt; /* Ensure that when we meet this decl next time, we won't duplicate - it again. */ + it again. */ nuid = DECL_UID (var_copy); ielt.uid = nuid; dslot = htab_find_slot_with_hash (decl_copies, &ielt, nuid, INSERT); @@ -439,47 +836,114 @@ separate_decls_in_loop_stmt (struct loop *loop, tree stmt, mark_virtual_ops_for_renaming (stmt); FOR_EACH_PHI_OR_STMT_DEF (def, stmt, oi, SSA_OP_DEF) - { - name = DEF_FROM_PTR (def); - gcc_assert (TREE_CODE (name) == SSA_NAME); - copy = separate_decls_in_loop_name (name, name_copies, decl_copies, - false); - gcc_assert (copy == name); - } + { + name = DEF_FROM_PTR (def); + gcc_assert (TREE_CODE (name) == SSA_NAME); + copy = separate_decls_in_loop_name (name, name_copies, decl_copies, + false); + gcc_assert (copy == name); + } FOR_EACH_PHI_OR_STMT_USE (use, stmt, oi, SSA_OP_USE) - { - name = USE_FROM_PTR (use); - if (TREE_CODE (name) != SSA_NAME) - continue; - - copy_name_p = expr_invariant_in_loop_p (loop, name); - copy = separate_decls_in_loop_name (name, name_copies, decl_copies, - copy_name_p); - SET_USE (use, copy); - } + { + name = USE_FROM_PTR (use); + if (TREE_CODE (name) != SSA_NAME) + continue; + + copy_name_p = expr_invariant_in_loop_p (loop, name); + copy = separate_decls_in_loop_name (name, name_copies, decl_copies, + copy_name_p); + SET_USE (use, copy); + } } +/* A helper structure for passing the TYPE and REDUCTION_LIST + to the DATA parameter of add_field_for_name. */ +struct data_arg +{ + tree type; + htab_t reduction_list; +}; + /* Callback for htab_traverse. Adds a field corresponding to a ssa name - described in SLOT to the type passed in DATA. */ + described in SLOT. The type is passed in DATA. The Reduction list + is also passes in DATA. */ static int add_field_for_name (void **slot, void *data) { + tree stmt; + use_operand_p use_p = NULL; + struct name_to_copy_elt *elt = *slot; - tree type = data; + struct data_arg *data_arg = (struct data_arg *) data; + tree type = data_arg->type; tree name = ssa_name (elt->version); tree var = SSA_NAME_VAR (name); tree field = build_decl (FIELD_DECL, DECL_NAME (var), TREE_TYPE (var)); insert_field_into_struct (type, field); elt->field = field; + + /* Find uses of name to determine if this name is related to + a reduction phi, and if so, record the field in the reduction struct. */ + + if ((htab_elements (data_arg->reduction_list) > 0) + && single_imm_use (elt->new_name, &use_p, &stmt) + && TREE_CODE (stmt) == PHI_NODE) + { + /* check if STMT is a REDUC_PHI of some reduction. */ + struct reduction_info *red; + + red = reduction_phi (data_arg->reduction_list ,stmt); + if (red) + red->field = field; + } + return 1; } -/* Callback for htab_traverse. Creates loads to a field of LOAD in LOAD_BB and - store to a field of STORE in STORE_BB for the ssa name and its duplicate - specified in SLOT. */ +/* Callback for htab_traverse. A local result is the intermediate result + computed by a single + thread, or the intial value in case no iteration was executed. + This function creates a phi node reflecting these values. + The phi's result will be stored in NEW_PHI field of the + reduction's data structure. */ + +static int +create_phi_for_local_result (void **slot, void *data) +{ + struct reduction_info *reduc = *slot; + struct loop *loop = data; + edge e; + tree new_phi; + basic_block store_bb; + tree local_res; + + /* STORE_BB is the block where the phi + should be stored. It is the destination of the loop exit. + (Find the fallthru edge from OMP_CONTINUE). */ + store_bb = FALLTHRU_EDGE (loop->latch)->dest; + + /* STORE_BB has two predecessors. One coming from the loop + (the reduction's result is computed at the loop), + and another coming from a block preceding the loop, + when no iterations + are executed (the initial value should be taken). */ + if (EDGE_PRED (store_bb, 0) == FALLTHRU_EDGE (loop->latch)) + e = EDGE_PRED (store_bb, 1); + else + e = EDGE_PRED (store_bb, 0); + local_res = make_ssa_name (SSA_NAME_VAR (reduc->reduction_init), NULL_TREE); + new_phi = create_phi_node (local_res, store_bb); + SSA_NAME_DEF_STMT (local_res) = new_phi; + add_phi_arg (new_phi, reduc->init, e); + add_phi_arg (new_phi, GIMPLE_STMT_OPERAND (reduc->reduc_stmt, 0), + FALLTHRU_EDGE (loop->latch)); + reduc->new_phi = new_phi; + + return 1; +} struct clsn_data { @@ -490,6 +954,159 @@ struct clsn_data basic_block load_bb; }; +/* Callback for htab_traverse. Create an atomic instruction for the + reduction described in SLOT. + DATA annotates the place in memory the atomic operation relates to, + and the basic block it needs to be generated in. */ + +static int +create_call_for_reduction_1 (void **slot, void *data) +{ + struct reduction_info *reduc = *slot; + struct clsn_data *clsn_data = data; + block_stmt_iterator bsi; + tree type = TREE_TYPE (PHI_RESULT (reduc->reduc_phi)); + tree struct_type = TREE_TYPE (TREE_TYPE (clsn_data->load)); + tree load_struct; + basic_block bb; + basic_block new_bb; + edge e; + tree t, addr, addr_type, ref, x; + tree tmp_load, load, name; + + load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load); + t = build3 (COMPONENT_REF, type, load_struct, reduc->field, NULL_TREE); + addr_type = build_pointer_type (type); + + addr = build_addr (t, current_function_decl); + + /* Create phi node. */ + bb = clsn_data->load_bb; + + e = split_block (bb, t); + new_bb = e->dest; + + tmp_load = create_tmp_var (TREE_TYPE (TREE_TYPE (addr)), NULL); + add_referenced_var (tmp_load); + tmp_load = make_ssa_name (tmp_load, NULL); + load = build2 (OMP_ATOMIC_LOAD, void_type_node, tmp_load, addr); + SSA_NAME_DEF_STMT (tmp_load) = load; + bsi = bsi_start (new_bb); + bsi_insert_after (&bsi, load, BSI_NEW_STMT); + + e = split_block (new_bb, load); + new_bb = e->dest; + bsi = bsi_start (new_bb); + ref = tmp_load; + x = + fold_build2 (reduc->reduction_code, + TREE_TYPE (PHI_RESULT (reduc->new_phi)), ref, + PHI_RESULT (reduc->new_phi)); + + name = + force_gimple_operand_bsi (&bsi, x, true, NULL_TREE, true, + BSI_CONTINUE_LINKING); + + x = build1 (OMP_ATOMIC_STORE, void_type_node, name); + + bsi_insert_after (&bsi, x, BSI_NEW_STMT); + return 1; +} + +/* Create the atomic operation at the join point of the threads. + REDUCTION_LIST describes the reductions in the LOOP. + LD_ST_DATA describes the shared data structure where + shared data is stored in and loaded from. */ +static void +create_call_for_reduction (struct loop *loop, htab_t reduction_list, + struct clsn_data *ld_st_data) +{ + htab_traverse (reduction_list, create_phi_for_local_result, loop); + /* Find the fallthru edge from OMP_CONTINUE. */ + ld_st_data->load_bb = FALLTHRU_EDGE (loop->latch)->dest; + htab_traverse (reduction_list, create_call_for_reduction_1, ld_st_data); +} + +/* Callback for htab_traverse. Create a new variable that loads the + final reduction value at the + join point of all threads, adds the initial value the reduction + variable had before the parallel computation started, and + inserts it in the right place. */ + +static int +create_loads_for_reductions (void **slot, void *data) +{ + struct reduction_info *red = *slot; + struct clsn_data *clsn_data = data; + tree stmt; + block_stmt_iterator bsi; + tree type = TREE_TYPE (red->reduction_init); + tree struct_type = TREE_TYPE (TREE_TYPE (clsn_data->load)); + tree load_struct; + tree bvar, name; + tree x; + + bsi = bsi_after_labels (clsn_data->load_bb); + load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load); + load_struct = build3 (COMPONENT_REF, type, load_struct, red->field, + NULL_TREE); + bvar = create_tmp_var (type, "reduction_final"); + add_referenced_var (bvar); + + /* Apply operation between the new variable which is the result + of computation all threads, and the initial value which is kept + at reduction->inital_value. */ + + stmt = build_gimple_modify_stmt (bvar, load_struct); + name = make_ssa_name (bvar, stmt); + GIMPLE_STMT_OPERAND (stmt, 0) = name; + SSA_NAME_DEF_STMT (name) = stmt; + + bsi_insert_after (&bsi, stmt, BSI_NEW_STMT); + + x = + fold_build2 (red->reduction_code, TREE_TYPE (load_struct), + name, red->initial_value); + name = PHI_RESULT (red->keep_res); + stmt = build_gimple_modify_stmt (name, x); + GIMPLE_STMT_OPERAND (stmt, 0) = name; + SSA_NAME_DEF_STMT (name) = stmt; + + bsi_insert_after (&bsi, stmt, BSI_NEW_STMT); + + remove_phi_node (red->keep_res, NULL_TREE, false); + + return 1; +} + +/* Load the reduction result that was stored in LD_ST_DATA. + REDUCTION_LIST describes the list of reductions that the + loades should be generated for. */ +static void +create_final_loads_for_reduction (htab_t reduction_list, + struct clsn_data *ld_st_data) +{ + block_stmt_iterator bsi; + tree t; + + bsi = bsi_after_labels (ld_st_data->load_bb); + t = build_fold_addr_expr (ld_st_data->store); + t = + build_gimple_modify_stmt (ld_st_data->load, + build_fold_addr_expr (ld_st_data->store)); + + bsi_insert_before (&bsi, t, BSI_NEW_STMT); + SSA_NAME_DEF_STMT (ld_st_data->load) = t; + GIMPLE_STMT_OPERAND (t, 0) = ld_st_data->load; + + htab_traverse (reduction_list, create_loads_for_reductions, ld_st_data); + +} + +/* Callback for htab_traverse. Creates loads to a field of LOAD in LOAD_BB and + store to a field of STORE in STORE_BB for the ssa name and its duplicate + specified in SLOT. */ + static int create_loads_and_stores_for_name (void **slot, void *data) { @@ -502,19 +1119,19 @@ create_loads_and_stores_for_name (void **slot, void *data) tree load_struct; bsi = bsi_last (clsn_data->store_bb); - stmt = build_gimple_modify_stmt ( - build3 (COMPONENT_REF, type, clsn_data->store, elt->field, - NULL_TREE), - ssa_name (elt->version)); + stmt = + build_gimple_modify_stmt (build3 + (COMPONENT_REF, type, clsn_data->store, + elt->field, NULL_TREE), + ssa_name (elt->version)); mark_virtual_ops_for_renaming (stmt); bsi_insert_after (&bsi, stmt, BSI_NEW_STMT); bsi = bsi_last (clsn_data->load_bb); load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load); - stmt = build_gimple_modify_stmt ( - elt->new_name, - build3 (COMPONENT_REF, type, load_struct, elt->field, - NULL_TREE)); + stmt = build_gimple_modify_stmt (elt->new_name, + build3 (COMPONENT_REF, type, load_struct, + elt->field, NULL_TREE)); SSA_NAME_DEF_STMT (elt->new_name) = stmt; bsi_insert_after (&bsi, stmt, BSI_NEW_STMT); @@ -549,11 +1166,17 @@ create_loads_and_stores_for_name (void **slot, void *data) `old' is stored to *ARG_STRUCT and `new' is stored to NEW_ARG_STRUCT. The pointer `new' is intentionally not initialized (the loop will be split to a separate function later, and `new' will be initialized from its arguments). - */ + LD_ST_DATA holds information about the shared data structure used to pass + information among the threads. It is initialized here, and + gen_parallel_loop will pass it to create_call_for_reduction that + needs this information. REDUCTION_LIST describes the reductions + in LOOP. */ static void -separate_decls_in_loop (struct loop *loop, tree *arg_struct, - tree *new_arg_struct) +separate_decls_in_loop (struct loop *loop, htab_t reduction_list, + tree * arg_struct, tree * new_arg_struct, + struct clsn_data *ld_st_data) + { basic_block bb1 = split_edge (loop_preheader_edge (loop)); basic_block bb0 = single_pred (bb1); @@ -584,19 +1207,23 @@ separate_decls_in_loop (struct loop *loop, tree *arg_struct, if (htab_elements (name_copies) == 0) { /* It may happen that there is nothing to copy (if there are only - loop carried and external variables in the loop). */ + loop carried and external variables in the loop). */ *arg_struct = NULL; *new_arg_struct = NULL; } else { + struct data_arg data_arg; + /* Create the type for the structure to store the ssa names to. */ type = lang_hooks.types.make_type (RECORD_TYPE); type_name = build_decl (TYPE_DECL, create_tmp_var_name (".paral_data"), type); TYPE_NAME (type) = type_name; - htab_traverse (name_copies, add_field_for_name, type); + data_arg.type = type; + data_arg.reduction_list = reduction_list; + htab_traverse (name_copies, add_field_for_name, &data_arg); layout_type (type); /* Create the loads and stores. */ @@ -606,12 +1233,22 @@ separate_decls_in_loop (struct loop *loop, tree *arg_struct, add_referenced_var (nvar); *new_arg_struct = make_ssa_name (nvar, NULL_TREE); - clsn_data.store = *arg_struct; - clsn_data.load = *new_arg_struct; - clsn_data.store_bb = bb0; - clsn_data.load_bb = bb1; + ld_st_data->store = *arg_struct; + ld_st_data->load = *new_arg_struct; + ld_st_data->store_bb = bb0; + ld_st_data->load_bb = bb1; htab_traverse (name_copies, create_loads_and_stores_for_name, - &clsn_data); + ld_st_data); + + /* Load the calculation from memory into a new + reduction variable (after the join of the threads). */ + if (htab_elements (reduction_list) > 0) + { + clsn_data.load = make_ssa_name (nvar, NULL_TREE); + clsn_data.load_bb = single_dom_exit (loop)->dest; + clsn_data.store = ld_st_data->store; + create_final_loads_for_reduction (reduction_list, &clsn_data); + } } htab_delete (decl_copies); @@ -692,10 +1329,11 @@ create_loop_fn (void) /* Bases all the induction variables in LOOP on a single induction variable (unsigned with base 0 and step 1), whose final value is compared with - NIT. The induction variable is incremented in the loop latch. */ + NIT. The induction variable is incremented in the loop latch. + REDUCTION_LIST describes the reductions in LOOP. */ static void -canonicalize_loop_ivs (struct loop *loop, tree nit) +canonicalize_loop_ivs (struct loop *loop, htab_t reduction_list, tree nit) { unsigned precision = TYPE_PRECISION (TREE_TYPE (nit)); tree phi, prev, res, type, var_before, val, atype, t, next; @@ -703,13 +1341,13 @@ canonicalize_loop_ivs (struct loop *loop, tree nit) bool ok; affine_iv iv; edge exit = single_dom_exit (loop); + struct reduction_info *red; for (phi = phi_nodes (loop->header); phi; phi = PHI_CHAIN (phi)) { res = PHI_RESULT (phi); - if (is_gimple_reg (res) - && TYPE_PRECISION (TREE_TYPE (res)) > precision) + if (is_gimple_reg (res) && TYPE_PRECISION (TREE_TYPE (res)) > precision) precision = TYPE_PRECISION (TREE_TYPE (res)); } @@ -726,16 +1364,22 @@ canonicalize_loop_ivs (struct loop *loop, tree nit) next = PHI_CHAIN (phi); res = PHI_RESULT (phi); - if (!is_gimple_reg (res) - || res == var_before) + if (!is_gimple_reg (res) || res == var_before) { prev = phi; continue; } - - ok = simple_iv (loop, phi, res, &iv, true); - gcc_assert (ok); + ok = simple_iv (loop, phi, res, &iv, true); + red = reduction_phi (reduction_list, phi); + /* We preserve the reduction phi nodes. */ + if (!ok && red) + { + prev = phi; + continue; + } + else + gcc_assert (ok); remove_phi_node (phi, prev, false); atype = TREE_TYPE (res); @@ -773,10 +1417,11 @@ canonicalize_loop_ivs (struct loop *loop, tree nit) follows the loop exit. In this case, it would be better not to copy the body of the loop, but only move the entry of the loop directly before the exit check and increase the number of iterations of the loop by one. - This may need some additional preconditioning in case NIT = ~0. */ + This may need some additional preconditioning in case NIT = ~0. + REDUCTION_LIST describes the reductions in LOOP. */ static void -transform_to_exit_first_loop (struct loop *loop, tree nit) +transform_to_exit_first_loop (struct loop *loop, htab_t reduction_list, tree nit) { basic_block *bbs, *nbbs, ex_bb, orig_header; unsigned n; @@ -825,8 +1470,9 @@ transform_to_exit_first_loop (struct loop *loop, tree nit) ex_bb = nbbs[0]; free (nbbs); - /* The only gimple reg that should be copied out of the loop is the - control variable. */ + /* Other than reductions, the only gimple reg that should be copied + out of the loop is the control variable. */ + control_name = NULL_TREE; for (phi = phi_nodes (ex_bb); phi; phi = PHI_CHAIN (phi)) { @@ -834,8 +1480,26 @@ transform_to_exit_first_loop (struct loop *loop, tree nit) if (!is_gimple_reg (res)) continue; - gcc_assert (control_name == NULL_TREE - && SSA_NAME_VAR (res) == SSA_NAME_VAR (control)); + /* Check if it is a part of reduction. If it is, + keep the phi at the reduction's keep_res field. The + PHI_RESULT of this phi is the resulting value of the reduction + variable when exiting the loop. */ + + exit = single_dom_exit (loop); + + if (htab_elements (reduction_list) > 0) + { + struct reduction_info *red; + + tree val = PHI_ARG_DEF_FROM_EDGE (phi, exit); + + red = reduction_phi (reduction_list, SSA_NAME_DEF_STMT (val)); + if (red) + red->keep_res = phi; + } + else + gcc_assert (control_name == NULL_TREE + && SSA_NAME_VAR (res) == SSA_NAME_VAR (control)); control_name = res; } gcc_assert (control_name != NULL_TREE); @@ -872,9 +1536,8 @@ create_parallel_loop (struct loop *loop, tree loop_fn, tree data, t = build_omp_clause (OMP_CLAUSE_NUM_THREADS); OMP_CLAUSE_NUM_THREADS_EXPR (t) - = build_int_cst (integer_type_node, n_threads); - t = build4 (OMP_PARALLEL, void_type_node, NULL_TREE, t, - loop_fn, data); + = build_int_cst (integer_type_node, n_threads); + t = build4 (OMP_PARALLEL, void_type_node, NULL_TREE, t, loop_fn, data); bsi_insert_after (&bsi, t, BSI_NEW_STMT); @@ -889,7 +1552,8 @@ create_parallel_loop (struct loop *loop, tree loop_fn, tree data, SSA_NAME_DEF_STMT (param) = t; t = build_gimple_modify_stmt (new_data, - fold_convert (TREE_TYPE (new_data), param)); + fold_convert (TREE_TYPE (new_data), + param)); bsi_insert_before (&bsi, t, BSI_SAME_STMT); SSA_NAME_DEF_STMT (new_data) = t; } @@ -949,11 +1613,11 @@ create_parallel_loop (struct loop *loop, tree loop_fn, tree data, OMP_FOR_CLAUSES (for_stmt) = t; OMP_FOR_INIT (for_stmt) = build_gimple_modify_stmt (initvar, cvar_init); OMP_FOR_COND (for_stmt) = cond; - OMP_FOR_INCR (for_stmt) = build_gimple_modify_stmt ( - cvar_base, - build2 (PLUS_EXPR, type, - cvar_base, - build_int_cst (type, 1))); + OMP_FOR_INCR (for_stmt) = build_gimple_modify_stmt (cvar_base, + build2 (PLUS_EXPR, type, + cvar_base, + build_int_cst + (type, 1))); OMP_FOR_BODY (for_stmt) = NULL_TREE; OMP_FOR_PRE_BODY (for_stmt) = NULL_TREE; @@ -975,16 +1639,18 @@ create_parallel_loop (struct loop *loop, tree loop_fn, tree data, } /* Generates code to execute the iterations of LOOP in N_THREADS threads in - parallel. NITER describes number of iterations of LOOP. */ + parallel. NITER describes number of iterations of LOOP. + REDUCTION_LIST describes the reductions existant in the LOOP. */ static void -gen_parallel_loop (struct loop *loop, unsigned n_threads, - struct tree_niter_desc *niter) +gen_parallel_loop (struct loop *loop, htab_t reduction_list, + unsigned n_threads, struct tree_niter_desc *niter) { struct loop *nloop; tree many_iterations_cond, type, nit; tree stmts, arg_struct, new_arg_struct; basic_block parallel_head; + struct clsn_data clsn_data; unsigned prob; /* From @@ -1006,8 +1672,8 @@ gen_parallel_loop (struct loop *loop, unsigned n_threads, --------------------------------------------------------------------- if (MAY_BE_ZERO - || NITER < MIN_PER_THREAD * N_THREADS) - goto original; + || NITER < MIN_PER_THREAD * N_THREADS) + goto original; BODY1; store all local loop-invariant variables used in body of the loop to DATA. @@ -1017,8 +1683,8 @@ gen_parallel_loop (struct loop *loop, unsigned n_threads, BODY2; BODY1; OMP_CONTINUE; - OMP_RETURN -- OMP_FOR - OMP_RETURN -- OMP_PARALLEL + OMP_RETURN -- OMP_FOR + OMP_RETURN -- OMP_PARALLEL goto end; original: @@ -1039,7 +1705,7 @@ gen_parallel_loop (struct loop *loop, unsigned n_threads, number of iterations is large enough, and we will transform it into the loop that will be split to loop_fn, the new one will be used for the remaining iterations. */ - + type = TREE_TYPE (niter->niter); nit = force_gimple_operand (unshare_expr (niter->niter), &stmts, true, NULL_TREE); @@ -1047,22 +1713,21 @@ gen_parallel_loop (struct loop *loop, unsigned n_threads, bsi_insert_on_edge_immediate (loop_preheader_edge (loop), stmts); many_iterations_cond = - fold_build2 (GE_EXPR, boolean_type_node, - nit, build_int_cst (type, MIN_PER_THREAD * n_threads)); + fold_build2 (GE_EXPR, boolean_type_node, + nit, build_int_cst (type, MIN_PER_THREAD * n_threads)); many_iterations_cond - = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, - invert_truthvalue (unshare_expr (niter->may_be_zero)), - many_iterations_cond); + = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, + invert_truthvalue (unshare_expr (niter->may_be_zero)), + many_iterations_cond); many_iterations_cond - = force_gimple_operand (many_iterations_cond, &stmts, - false, NULL_TREE); + = force_gimple_operand (many_iterations_cond, &stmts, false, NULL_TREE); if (stmts) bsi_insert_on_edge_immediate (loop_preheader_edge (loop), stmts); if (!is_gimple_condexpr (many_iterations_cond)) { many_iterations_cond - = force_gimple_operand (many_iterations_cond, &stmts, - true, NULL_TREE); + = force_gimple_operand (many_iterations_cond, &stmts, + true, NULL_TREE); if (stmts) bsi_insert_on_edge_immediate (loop_preheader_edge (loop), stmts); } @@ -1077,21 +1742,29 @@ gen_parallel_loop (struct loop *loop, unsigned n_threads, free_original_copy_tables (); /* Base all the induction variables in LOOP on a single control one. */ - canonicalize_loop_ivs (loop, nit); + canonicalize_loop_ivs (loop, reduction_list, nit); /* Ensure that the exit condition is the first statement in the loop. */ - transform_to_exit_first_loop (loop, nit); + transform_to_exit_first_loop (loop, reduction_list, nit); + + + /* Generate intializations for reductions. */ + + if (htab_elements (reduction_list) > 0) + htab_traverse (reduction_list, initialize_reductions, loop); /* Eliminate the references to local variables from the loop. */ eliminate_local_variables (loop); /* In the old loop, move all variables non-local to the loop to a structure and back, and create separate decls for the variables used in loop. */ - separate_decls_in_loop (loop, &arg_struct, &new_arg_struct); + separate_decls_in_loop (loop, reduction_list, &arg_struct, &new_arg_struct, &clsn_data); /* Create the parallel constructs. */ parallel_head = create_parallel_loop (loop, create_loop_fn (), arg_struct, new_arg_struct, n_threads); + if (htab_elements (reduction_list) > 0) + create_call_for_reduction (loop, reduction_list, &clsn_data); scev_reset (); @@ -1103,6 +1776,7 @@ gen_parallel_loop (struct loop *loop, unsigned n_threads, a separate expand_omp pass, since it is more efficient, and less likely to cause troubles with further analyses not being able to deal with the OMP trees. */ + omp_expand_local (parallel_head); } @@ -1118,30 +1792,36 @@ parallelize_loops (void) struct loop *loop; struct tree_niter_desc niter_desc; loop_iterator li; + htab_t reduction_list; /* Do not parallelize loops in the functions created by parallelization. */ if (parallelized_function_p (cfun->decl)) return false; + reduction_list = htab_create (10, reduction_info_hash, + reduction_info_eq, free); + FOR_EACH_LOOP (li, loop, 0) { + htab_empty (reduction_list); if (/* Do not bother with loops in cold areas. */ !maybe_hot_bb_p (loop->header) /* Or loops that roll too little. */ || expected_loop_iterations (loop) <= n_threads /* And of course, the loop must be parallelizable. */ || !can_duplicate_loop_p (loop) - || !loop_parallel_p (loop, &niter_desc)) + || !loop_parallel_p (loop, reduction_list, &niter_desc)) continue; changed = true; - gen_parallel_loop (loop, n_threads, &niter_desc); + gen_parallel_loop (loop, reduction_list, n_threads, &niter_desc); verify_flow_info (); verify_dominators (CDI_DOMINATORS); verify_loop_structure (); verify_loop_closed_ssa (); } + htab_delete (reduction_list); return changed; } |