diff options
author | liuhongt <hongtao.liu@intel.com> | 2021-10-25 13:59:51 +0800 |
---|---|---|
committer | liuhongt <hongtao.liu@intel.com> | 2021-11-10 17:16:42 +0800 |
commit | fb161782545224f55ba26ba663889c5e6e9a04d1 (patch) | |
tree | 0e1df016da34d448c1147993b1cf8859eb7d8fa8 /gcc/tree-ssa-ccp.c | |
parent | f15ad1e3f9488a31abf1c122bd186c1a3d2a5dbc (diff) | |
download | gcc-fb161782545224f55ba26ba663889c5e6e9a04d1.zip gcc-fb161782545224f55ba26ba663889c5e6e9a04d1.tar.gz gcc-fb161782545224f55ba26ba663889c5e6e9a04d1.tar.bz2 |
Improve integer bit test on __atomic_fetch_[or|and]_* returns
commit adedd5c173388ae505470df152b9cb3947339566
Author: Jakub Jelinek <jakub@redhat.com>
Date: Tue May 3 13:37:25 2016 +0200
re PR target/49244 (__sync or __atomic builtins will not emit 'lock bts/btr/btc')
optimized bit test on __atomic_fetch_or_* and __atomic_fetch_and_* returns
with lock bts/btr/btc by turning
mask_2 = 1 << cnt_1;
_4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
_5 = _4 & mask_2;
into
_4 = ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
_5 = _4;
and
mask_6 = 1 << bit_5(D);
_1 = ~mask_6;
_2 = __atomic_fetch_and_4 (v_8(D), _1, 0);
_3 = _2 & mask_6;
_4 = _3 != 0;
into
mask_6 = 1 << bit_5(D);
_1 = ~mask_6;
_11 = .ATOMIC_BIT_TEST_AND_RESET (v_8(D), bit_5(D), 1, 0);
_4 = _11 != 0;
But it failed to optimize many equivalent, but slighly different cases:
1.
_1 = __atomic_fetch_or_4 (ptr_6, 1, _3);
_4 = (_Bool) _1;
2.
_1 = __atomic_fetch_and_4 (ptr_6, ~1, _3);
_4 = (_Bool) _1;
3.
_1 = __atomic_fetch_or_4 (ptr_6, 1, _3);
_7 = ~_1;
_5 = (_Bool) _7;
4.
_1 = __atomic_fetch_and_4 (ptr_6, ~1, _3);
_7 = ~_1;
_5 = (_Bool) _7;
5.
_1 = __atomic_fetch_or_4 (ptr_6, 1, _3);
_2 = (int) _1;
_7 = ~_2;
_5 = (_Bool) _7;
6.
_1 = __atomic_fetch_and_4 (ptr_6, ~1, _3);
_2 = (int) _1;
_7 = ~_2;
_5 = (_Bool) _7;
7.
_1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);
_5 = (signed int) _1;
_4 = _5 < 0;
8.
_1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);
_5 = (signed int) _1;
_4 = _5 < 0;
9.
_1 = 1 << bit_4(D);
mask_5 = (unsigned int) _1;
_2 = __atomic_fetch_or_4 (v_7(D), mask_5, 0);
_3 = _2 & mask_5;
10.
mask_7 = 1 << bit_6(D);
_1 = ~mask_7;
_2 = (unsigned int) _1;
_3 = __atomic_fetch_and_4 (v_9(D), _2, 0);
_4 = (int) _3;
_5 = _4 & mask_7;
We make
mask_2 = 1 << cnt_1;
_4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
_5 = _4 & mask_2;
and
mask_6 = 1 << bit_5(D);
_1 = ~mask_6;
_2 = __atomic_fetch_and_4 (v_8(D), _1, 0);
_3 = _2 & mask_6;
_4 = _3 != 0;
the canonical forms for this optimization and transform cases 1-9 to the
equivalent canonical form. For cases 10 and 11, we simply remove the cast
before __atomic_fetch_or_4/__atomic_fetch_and_4 with
_1 = 1 << bit_4(D);
_2 = __atomic_fetch_or_4 (v_7(D), _1, 0);
_3 = _2 & _1;
and
mask_7 = 1 << bit_6(D);
_1 = ~mask_7;
_3 = __atomic_fetch_and_4 (v_9(D), _1, 0);
_6 = _3 & mask_7;
_5 = (int) _6;
2021-11-04 H.J. Lu <hongjiu.lu@intel.com>
Hongtao Liu <hongtao.liu@intel.com>
gcc/
PR middle-end/102566
* match.pd (nop_atomic_bit_test_and_p): New match.
* tree-ssa-ccp.c (convert_atomic_bit_not): New function.
(gimple_nop_atomic_bit_test_and_p): New prototype.
(optimize_atomic_bit_test_and): Transform equivalent, but slighly
different cases to their canonical forms.
gcc/testsuite/
PR middle-end/102566
* g++.target/i386/pr102566-1.C: New test.
* g++.target/i386/pr102566-2.C: Likewise.
* g++.target/i386/pr102566-3.C: Likewise.
* g++.target/i386/pr102566-4.C: Likewise.
* g++.target/i386/pr102566-5a.C: Likewise.
* g++.target/i386/pr102566-5b.C: Likewise.
* g++.target/i386/pr102566-6a.C: Likewise.
* g++.target/i386/pr102566-6b.C: Likewise.
* gcc.target/i386/pr102566-1a.c: Likewise.
* gcc.target/i386/pr102566-1b.c: Likewise.
* gcc.target/i386/pr102566-2.c: Likewise.
* gcc.target/i386/pr102566-3a.c: Likewise.
* gcc.target/i386/pr102566-3b.c: Likewise.
* gcc.target/i386/pr102566-4.c: Likewise.
* gcc.target/i386/pr102566-5.c: Likewise.
* gcc.target/i386/pr102566-6.c: Likewise.
* gcc.target/i386/pr102566-7.c: Likewise.
* gcc.target/i386/pr102566-8a.c: Likewise.
* gcc.target/i386/pr102566-8b.c: Likewise.
* gcc.target/i386/pr102566-9a.c: Likewise.
* gcc.target/i386/pr102566-9b.c: Likewise.
* gcc.target/i386/pr102566-10a.c: Likewise.
* gcc.target/i386/pr102566-10b.c: Likewise.
* gcc.target/i386/pr102566-11.c: Likewise.
* gcc.target/i386/pr102566-12.c: Likewise.
* gcc.target/i386/pr102566-13.c: New test.
* gcc.target/i386/pr102566-14.c: New test.
Diffstat (limited to 'gcc/tree-ssa-ccp.c')
-rw-r--r-- | gcc/tree-ssa-ccp.c | 456 |
1 files changed, 414 insertions, 42 deletions
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c index 60ae5e6..0f79e9f 100644 --- a/gcc/tree-ssa-ccp.c +++ b/gcc/tree-ssa-ccp.c @@ -3243,6 +3243,90 @@ optimize_unreachable (gimple_stmt_iterator i) return ret; } +/* Convert + _1 = __atomic_fetch_or_* (ptr_6, 1, _3); + _7 = ~_1; + _5 = (_Bool) _7; + to + _1 = __atomic_fetch_or_* (ptr_6, 1, _3); + _8 = _1 & 1; + _5 = _8 == 0; + and convert + _1 = __atomic_fetch_and_* (ptr_6, ~1, _3); + _7 = ~_1; + _4 = (_Bool) _7; + to + _1 = __atomic_fetch_and_* (ptr_6, ~1, _3); + _8 = _1 & 1; + _4 = (_Bool) _8; + + USE_STMT is the gimplt statement which uses the return value of + __atomic_fetch_or_*. LHS is the return value of __atomic_fetch_or_*. + MASK is the mask passed to __atomic_fetch_or_*. + */ + +static gimple * +convert_atomic_bit_not (enum internal_fn fn, gimple *use_stmt, + tree lhs, tree mask) +{ + tree and_mask; + if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET) + { + /* MASK must be ~1. */ + if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs), + ~HOST_WIDE_INT_1), mask, 0)) + return nullptr; + and_mask = build_int_cst (TREE_TYPE (lhs), 1); + } + else + { + /* MASK must be 1. */ + if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs), 1), mask, 0)) + return nullptr; + and_mask = mask; + } + + tree use_lhs = gimple_assign_lhs (use_stmt); + + use_operand_p use_p; + gimple *use_not_stmt; + + if (!single_imm_use (use_lhs, &use_p, &use_not_stmt) + || !is_gimple_assign (use_not_stmt)) + return nullptr; + + if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_not_stmt))) + return nullptr; + + tree use_not_lhs = gimple_assign_lhs (use_not_stmt); + if (TREE_CODE (TREE_TYPE (use_not_lhs)) != BOOLEAN_TYPE) + return nullptr; + + gimple_stmt_iterator gsi; + gsi = gsi_for_stmt (use_stmt); + gsi_remove (&gsi, true); + tree var = make_ssa_name (TREE_TYPE (lhs)); + use_stmt = gimple_build_assign (var, BIT_AND_EXPR, lhs, and_mask); + gsi = gsi_for_stmt (use_not_stmt); + gsi_insert_before (&gsi, use_stmt, GSI_NEW_STMT); + lhs = gimple_assign_lhs (use_not_stmt); + gimple *g = gimple_build_assign (lhs, EQ_EXPR, var, + build_zero_cst (TREE_TYPE (mask))); + gsi_insert_after (&gsi, g, GSI_NEW_STMT); + gsi = gsi_for_stmt (use_not_stmt); + gsi_remove (&gsi, true); + return use_stmt; +} + +/* match.pd function to match atomic_bit_test_and pattern which + has nop_convert: + _1 = __atomic_fetch_or_4 (&v, 1, 0); + _2 = (int) _1; + _5 = _2 & 1; + */ +extern bool gimple_nop_atomic_bit_test_and_p (tree, tree *, + tree (*) (tree)); + /* Optimize mask_2 = 1 << cnt_1; _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3); @@ -3269,7 +3353,7 @@ optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip, tree lhs = gimple_call_lhs (call); use_operand_p use_p; gimple *use_stmt; - tree mask, bit; + tree mask; optab optab; if (!flag_inline_atomics @@ -3279,10 +3363,271 @@ optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip, || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs) || !single_imm_use (lhs, &use_p, &use_stmt) || !is_gimple_assign (use_stmt) - || gimple_assign_rhs_code (use_stmt) != BIT_AND_EXPR || !gimple_vdef (call)) return; + tree bit = nullptr; + + mask = gimple_call_arg (call, 1); + tree_code rhs_code = gimple_assign_rhs_code (use_stmt); + if (rhs_code != BIT_AND_EXPR) + { + if (rhs_code != NOP_EXPR && rhs_code != BIT_NOT_EXPR) + return; + + tree use_lhs = gimple_assign_lhs (use_stmt); + if (TREE_CODE (use_lhs) == SSA_NAME + && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs)) + return; + + tree use_rhs = gimple_assign_rhs1 (use_stmt); + if (lhs != use_rhs) + return; + + gimple *g; + gimple_stmt_iterator gsi; + tree var; + int ibit = -1; + + if (rhs_code == BIT_NOT_EXPR) + { + g = convert_atomic_bit_not (fn, use_stmt, lhs, mask); + if (!g) + return; + use_stmt = g; + ibit = 0; + } + else if (TREE_CODE (TREE_TYPE (use_lhs)) == BOOLEAN_TYPE) + { + tree and_mask; + if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET) + { + /* MASK must be ~1. */ + if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs), + ~HOST_WIDE_INT_1), + mask, 0)) + return; + + /* Convert + _1 = __atomic_fetch_and_* (ptr_6, ~1, _3); + _4 = (_Bool) _1; + to + _1 = __atomic_fetch_and_* (ptr_6, ~1, _3); + _5 = _1 & 1; + _4 = (_Bool) _5; + */ + and_mask = build_int_cst (TREE_TYPE (lhs), 1); + } + else + { + and_mask = build_int_cst (TREE_TYPE (lhs), 1); + if (!operand_equal_p (and_mask, mask, 0)) + return; + + /* Convert + _1 = __atomic_fetch_or_* (ptr_6, 1, _3); + _4 = (_Bool) _1; + to + _1 = __atomic_fetch_or_* (ptr_6, 1, _3); + _5 = _1 & 1; + _4 = (_Bool) _5; + */ + } + var = make_ssa_name (TREE_TYPE (use_rhs)); + replace_uses_by (use_rhs, var); + g = gimple_build_assign (var, BIT_AND_EXPR, use_rhs, + and_mask); + gsi = gsi_for_stmt (use_stmt); + gsi_insert_before (&gsi, g, GSI_NEW_STMT); + use_stmt = g; + ibit = 0; + } + else if (TYPE_PRECISION (TREE_TYPE (use_lhs)) + == TYPE_PRECISION (TREE_TYPE (use_rhs))) + { + gimple *use_nop_stmt; + if (!single_imm_use (use_lhs, &use_p, &use_nop_stmt) + || !is_gimple_assign (use_nop_stmt)) + return; + rhs_code = gimple_assign_rhs_code (use_nop_stmt); + if (rhs_code != BIT_AND_EXPR) + { + tree use_nop_lhs = gimple_assign_lhs (use_nop_stmt); + if (TREE_CODE (use_nop_lhs) == SSA_NAME + && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_nop_lhs)) + return; + if (rhs_code == BIT_NOT_EXPR) + { + g = convert_atomic_bit_not (fn, use_nop_stmt, lhs, + mask); + if (!g) + return; + /* Convert + _1 = __atomic_fetch_or_4 (ptr_6, 1, _3); + _2 = (int) _1; + _7 = ~_2; + _5 = (_Bool) _7; + to + _1 = __atomic_fetch_or_4 (ptr_6, ~1, _3); + _8 = _1 & 1; + _5 = _8 == 0; + and convert + _1 = __atomic_fetch_and_4 (ptr_6, ~1, _3); + _2 = (int) _1; + _7 = ~_2; + _5 = (_Bool) _7; + to + _1 = __atomic_fetch_and_4 (ptr_6, 1, _3); + _8 = _1 & 1; + _5 = _8 == 0; + */ + gsi = gsi_for_stmt (use_stmt); + gsi_remove (&gsi, true); + use_stmt = g; + ibit = 0; + } + else + { + if (TREE_CODE (TREE_TYPE (use_nop_lhs)) != BOOLEAN_TYPE) + return; + if (rhs_code != GE_EXPR && rhs_code != LT_EXPR) + return; + tree cmp_rhs1 = gimple_assign_rhs1 (use_nop_stmt); + if (use_lhs != cmp_rhs1) + return; + tree cmp_rhs2 = gimple_assign_rhs2 (use_nop_stmt); + if (!integer_zerop (cmp_rhs2)) + return; + + tree and_mask; + + unsigned HOST_WIDE_INT bytes + = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (use_rhs))); + ibit = bytes * BITS_PER_UNIT - 1; + unsigned HOST_WIDE_INT highest + = HOST_WIDE_INT_1U << ibit; + + if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET) + { + /* Get the signed maximum of the USE_RHS type. */ + and_mask = build_int_cst (TREE_TYPE (use_rhs), + highest - 1); + if (!operand_equal_p (and_mask, mask, 0)) + return; + + /* Convert + _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3); + _5 = (signed int) _1; + _4 = _5 < 0 or _5 >= 0; + to + _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3); + _6 = _1 & 0x80000000; + _4 = _6 != 0 or _6 == 0; + */ + and_mask = build_int_cst (TREE_TYPE (use_rhs), + highest); + } + else + { + /* Get the signed minimum of the USE_RHS type. */ + and_mask = build_int_cst (TREE_TYPE (use_rhs), + highest); + if (!operand_equal_p (and_mask, mask, 0)) + return; + + /* Convert + _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3); + _5 = (signed int) _1; + _4 = _5 < 0 or _5 >= 0; + to + _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3); + _6 = _1 & 0x80000000; + _4 = _6 != 0 or _6 == 0; + */ + } + var = make_ssa_name (TREE_TYPE (use_rhs)); + gsi = gsi_for_stmt (use_stmt); + gsi_remove (&gsi, true); + g = gimple_build_assign (var, BIT_AND_EXPR, use_rhs, + and_mask); + gsi = gsi_for_stmt (use_nop_stmt); + gsi_insert_before (&gsi, g, GSI_NEW_STMT); + use_stmt = g; + g = gimple_build_assign (use_nop_lhs, + (rhs_code == GE_EXPR + ? EQ_EXPR : NE_EXPR), + var, + build_zero_cst (TREE_TYPE (use_rhs))); + gsi_insert_after (&gsi, g, GSI_NEW_STMT); + gsi = gsi_for_stmt (use_nop_stmt); + gsi_remove (&gsi, true); + } + } + else + { + tree and_expr = gimple_assign_lhs (use_nop_stmt); + tree match_op[3]; + gimple *g; + if (!gimple_nop_atomic_bit_test_and_p (and_expr, + &match_op[0], NULL) + || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (match_op[2]) + || !single_imm_use (match_op[2], &use_p, &g) + || !is_gimple_assign (g)) + return; + mask = match_op[1]; + if (TREE_CODE (mask) == INTEGER_CST) + { + ibit = tree_log2 (mask); + gcc_assert (ibit >= 0); + } + else + { + g = SSA_NAME_DEF_STMT (mask); + gcc_assert (is_gimple_assign (g)); + bit = gimple_assign_rhs2 (g); + } + /* Convert + _1 = __atomic_fetch_or_4 (ptr_6, mask, _3); + _2 = (int) _1; + _5 = _2 & mask; + to + _1 = __atomic_fetch_or_4 (ptr_6, mask, _3); + _6 = _1 & mask; + _5 = (int) _6; + and convert + _1 = ~mask_7; + _2 = (unsigned int) _1; + _3 = __atomic_fetch_and_4 (ptr_6, _2, 0); + _4 = (int) _3; + _5 = _4 & mask_7; + to + _1 = __atomic_fetch_and_* (ptr_6, ~mask_7, _3); + _12 = _3 & mask_7; + _5 = (int) _12; + */ + replace_uses_by (use_lhs, lhs); + tree use_nop_lhs = gimple_assign_lhs (use_nop_stmt); + var = make_ssa_name (TREE_TYPE (use_nop_lhs)); + gimple_assign_set_lhs (use_nop_stmt, var); + gsi = gsi_for_stmt (use_stmt); + gsi_remove (&gsi, true); + release_defs (use_stmt); + gsi_remove (gsip, true); + g = gimple_build_assign (use_nop_lhs, NOP_EXPR, var); + gsi = gsi_for_stmt (use_nop_stmt); + gsi_insert_after (&gsi, g, GSI_NEW_STMT); + use_stmt = use_nop_stmt; + } + } + + if (!bit) + { + if (ibit < 0) + gcc_unreachable (); + bit = build_int_cst (TREE_TYPE (lhs), ibit); + } + } + switch (fn) { case IFN_ATOMIC_BIT_TEST_AND_SET: @@ -3301,51 +3646,76 @@ optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip, if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs))) == CODE_FOR_nothing) return; - mask = gimple_call_arg (call, 1); tree use_lhs = gimple_assign_lhs (use_stmt); if (!use_lhs) return; - if (TREE_CODE (mask) == INTEGER_CST) - { - if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET) - mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask); - mask = fold_convert (TREE_TYPE (lhs), mask); - int ibit = tree_log2 (mask); - if (ibit < 0) - return; - bit = build_int_cst (TREE_TYPE (lhs), ibit); - } - else if (TREE_CODE (mask) == SSA_NAME) + if (!bit) { - gimple *g = SSA_NAME_DEF_STMT (mask); - if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET) + if (TREE_CODE (mask) == INTEGER_CST) { - if (!is_gimple_assign (g) - || gimple_assign_rhs_code (g) != BIT_NOT_EXPR) + if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET) + mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask); + mask = fold_convert (TREE_TYPE (lhs), mask); + int ibit = tree_log2 (mask); + if (ibit < 0) return; - mask = gimple_assign_rhs1 (g); - if (TREE_CODE (mask) != SSA_NAME) + bit = build_int_cst (TREE_TYPE (lhs), ibit); + } + else if (TREE_CODE (mask) == SSA_NAME) + { + gimple *g = SSA_NAME_DEF_STMT (mask); + if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET) + { + if (!is_gimple_assign (g) + || gimple_assign_rhs_code (g) != BIT_NOT_EXPR) + return; + mask = gimple_assign_rhs1 (g); + if (TREE_CODE (mask) != SSA_NAME) + return; + g = SSA_NAME_DEF_STMT (mask); + } + if (!is_gimple_assign (g)) return; - g = SSA_NAME_DEF_STMT (mask); + rhs_code = gimple_assign_rhs_code (g); + if (rhs_code != LSHIFT_EXPR) + { + if (rhs_code != NOP_EXPR) + return; + + /* Handle + _1 = 1 << bit_4(D); + mask_5 = (unsigned int) _1; + _2 = __atomic_fetch_or_4 (v_7(D), mask_5, 0); + _3 = _2 & mask_5; + */ + tree nop_lhs = gimple_assign_lhs (g); + tree nop_rhs = gimple_assign_rhs1 (g); + if (TYPE_PRECISION (TREE_TYPE (nop_lhs)) + != TYPE_PRECISION (TREE_TYPE (nop_rhs))) + return; + g = SSA_NAME_DEF_STMT (nop_rhs); + if (!is_gimple_assign (g) + || gimple_assign_rhs_code (g) != LSHIFT_EXPR) + return; + } + if (!integer_onep (gimple_assign_rhs1 (g))) + return; + bit = gimple_assign_rhs2 (g); } - if (!is_gimple_assign (g) - || gimple_assign_rhs_code (g) != LSHIFT_EXPR - || !integer_onep (gimple_assign_rhs1 (g))) + else return; - bit = gimple_assign_rhs2 (g); - } - else - return; - if (gimple_assign_rhs1 (use_stmt) == lhs) - { - if (!operand_equal_p (gimple_assign_rhs2 (use_stmt), mask, 0)) + if (gimple_assign_rhs1 (use_stmt) == lhs) + { + if (!operand_equal_p (gimple_assign_rhs2 (use_stmt), mask, 0)) + return; + } + else if (gimple_assign_rhs2 (use_stmt) != lhs + || !operand_equal_p (gimple_assign_rhs1 (use_stmt), + mask, 0)) return; } - else if (gimple_assign_rhs2 (use_stmt) != lhs - || !operand_equal_p (gimple_assign_rhs1 (use_stmt), mask, 0)) - return; bool use_bool = true; bool has_debug_uses = false; @@ -3434,18 +3804,20 @@ optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip, of the specified bit after the atomic operation (makes only sense for xor, otherwise the bit content is compile time known), we need to invert the bit. */ - g = gimple_build_assign (make_ssa_name (TREE_TYPE (lhs)), - BIT_XOR_EXPR, new_lhs, - use_bool ? build_int_cst (TREE_TYPE (lhs), 1) - : mask); - new_lhs = gimple_assign_lhs (g); + tree mask_convert = mask; + gimple_seq stmts = NULL; + if (!use_bool) + mask_convert = gimple_convert (&stmts, TREE_TYPE (lhs), mask); + new_lhs = gimple_build (&stmts, BIT_XOR_EXPR, TREE_TYPE (lhs), new_lhs, + use_bool ? build_int_cst (TREE_TYPE (lhs), 1) + : mask_convert); if (throws) { - gsi_insert_on_edge_immediate (e, g); - gsi = gsi_for_stmt (g); + gsi_insert_seq_on_edge_immediate (e, stmts); + gsi = gsi_for_stmt (gimple_seq_last (stmts)); } else - gsi_insert_after (&gsi, g, GSI_NEW_STMT); + gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT); } if (use_bool && has_debug_uses) { |