aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/Makefile.in1
-rw-r--r--gcc/gimple-lower-bitint.cc6074
-rw-r--r--gcc/gimple-lower-bitint.h31
-rw-r--r--gcc/passes.def3
-rw-r--r--gcc/tree-pass.h3
-rw-r--r--gcc/tree-ssa-coalesce.cc148
-rw-r--r--gcc/tree-ssa-live.cc8
-rw-r--r--gcc/tree-ssa-live.h8
8 files changed, 6270 insertions, 6 deletions
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 5930b52..6d608db 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -1457,6 +1457,7 @@ OBJS = \
gimple-loop-jam.o \
gimple-loop-versioning.o \
gimple-low.o \
+ gimple-lower-bitint.o \
gimple-predicate-analysis.o \
gimple-pretty-print.o \
gimple-range.o \
diff --git a/gcc/gimple-lower-bitint.cc b/gcc/gimple-lower-bitint.cc
new file mode 100644
index 0000000..3906c00
--- /dev/null
+++ b/gcc/gimple-lower-bitint.cc
@@ -0,0 +1,6074 @@
+/* Lower _BitInt(N) operations to scalar operations.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ Contributed by Jakub Jelinek <jakub@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "rtl.h"
+#include "tree.h"
+#include "gimple.h"
+#include "cfghooks.h"
+#include "tree-pass.h"
+#include "ssa.h"
+#include "fold-const.h"
+#include "gimplify.h"
+#include "gimple-iterator.h"
+#include "tree-cfg.h"
+#include "tree-dfa.h"
+#include "cfgloop.h"
+#include "cfganal.h"
+#include "target.h"
+#include "tree-ssa-live.h"
+#include "tree-ssa-coalesce.h"
+#include "domwalk.h"
+#include "memmodel.h"
+#include "optabs.h"
+#include "varasm.h"
+#include "gimple-range.h"
+#include "value-range.h"
+#include "langhooks.h"
+#include "gimplify-me.h"
+#include "diagnostic-core.h"
+#include "tree-eh.h"
+#include "tree-pretty-print.h"
+#include "alloc-pool.h"
+#include "tree-into-ssa.h"
+#include "tree-cfgcleanup.h"
+#include "tree-switch-conversion.h"
+#include "ubsan.h"
+#include "gimple-lower-bitint.h"
+
+/* Split BITINT_TYPE precisions in 4 categories. Small _BitInt, where
+ target hook says it is a single limb, middle _BitInt which per ABI
+ does not, but there is some INTEGER_TYPE in which arithmetics can be
+ performed (operations on such _BitInt are lowered to casts to that
+ arithmetic type and cast back; e.g. on x86_64 limb is DImode, but
+ target supports TImode, so _BitInt(65) to _BitInt(128) are middle
+ ones), large _BitInt which should by straight line code and
+ finally huge _BitInt which should be handled by loops over the limbs. */
+
+enum bitint_prec_kind {
+ bitint_prec_small,
+ bitint_prec_middle,
+ bitint_prec_large,
+ bitint_prec_huge
+};
+
+/* Caches to speed up bitint_precision_kind. */
+
+static int small_max_prec, mid_min_prec, large_min_prec, huge_min_prec;
+static int limb_prec;
+
+/* Categorize _BitInt(PREC) as small, middle, large or huge. */
+
+static bitint_prec_kind
+bitint_precision_kind (int prec)
+{
+ if (prec <= small_max_prec)
+ return bitint_prec_small;
+ if (huge_min_prec && prec >= huge_min_prec)
+ return bitint_prec_huge;
+ if (large_min_prec && prec >= large_min_prec)
+ return bitint_prec_large;
+ if (mid_min_prec && prec >= mid_min_prec)
+ return bitint_prec_middle;
+
+ struct bitint_info info;
+ gcc_assert (targetm.c.bitint_type_info (prec, &info));
+ scalar_int_mode limb_mode = as_a <scalar_int_mode> (info.limb_mode);
+ if (prec <= GET_MODE_PRECISION (limb_mode))
+ {
+ small_max_prec = prec;
+ return bitint_prec_small;
+ }
+ scalar_int_mode arith_mode = (targetm.scalar_mode_supported_p (TImode)
+ ? TImode : DImode);
+ if (!large_min_prec
+ && GET_MODE_PRECISION (arith_mode) > GET_MODE_PRECISION (limb_mode))
+ large_min_prec = GET_MODE_PRECISION (arith_mode) + 1;
+ if (!limb_prec)
+ limb_prec = GET_MODE_PRECISION (limb_mode);
+ if (!huge_min_prec)
+ {
+ if (4 * limb_prec >= GET_MODE_PRECISION (arith_mode))
+ huge_min_prec = 4 * limb_prec;
+ else
+ huge_min_prec = GET_MODE_PRECISION (arith_mode) + 1;
+ }
+ if (prec <= GET_MODE_PRECISION (arith_mode))
+ {
+ if (!mid_min_prec || prec < mid_min_prec)
+ mid_min_prec = prec;
+ return bitint_prec_middle;
+ }
+ if (large_min_prec && prec <= large_min_prec)
+ return bitint_prec_large;
+ return bitint_prec_huge;
+}
+
+/* Same for a TYPE. */
+
+static bitint_prec_kind
+bitint_precision_kind (tree type)
+{
+ return bitint_precision_kind (TYPE_PRECISION (type));
+}
+
+/* Return minimum precision needed to describe INTEGER_CST
+ CST. All bits above that precision up to precision of
+ TREE_TYPE (CST) are cleared if EXT is set to 0, or set
+ if EXT is set to -1. */
+
+static unsigned
+bitint_min_cst_precision (tree cst, int &ext)
+{
+ ext = tree_int_cst_sgn (cst) < 0 ? -1 : 0;
+ wide_int w = wi::to_wide (cst);
+ unsigned min_prec = wi::min_precision (w, TYPE_SIGN (TREE_TYPE (cst)));
+ /* For signed values, we don't need to count the sign bit,
+ we'll use constant 0 or -1 for the upper bits. */
+ if (!TYPE_UNSIGNED (TREE_TYPE (cst)))
+ --min_prec;
+ else
+ {
+ /* For unsigned values, also try signed min_precision
+ in case the constant has lots of most significant bits set. */
+ unsigned min_prec2 = wi::min_precision (w, SIGNED) - 1;
+ if (min_prec2 < min_prec)
+ {
+ ext = -1;
+ return min_prec2;
+ }
+ }
+ return min_prec;
+}
+
+namespace {
+
+/* If OP is middle _BitInt, cast it to corresponding INTEGER_TYPE
+ cached in TYPE and return it. */
+
+tree
+maybe_cast_middle_bitint (gimple_stmt_iterator *gsi, tree op, tree &type)
+{
+ if (op == NULL_TREE
+ || TREE_CODE (TREE_TYPE (op)) != BITINT_TYPE
+ || bitint_precision_kind (TREE_TYPE (op)) != bitint_prec_middle)
+ return op;
+
+ int prec = TYPE_PRECISION (TREE_TYPE (op));
+ int uns = TYPE_UNSIGNED (TREE_TYPE (op));
+ if (type == NULL_TREE
+ || TYPE_PRECISION (type) != prec
+ || TYPE_UNSIGNED (type) != uns)
+ type = build_nonstandard_integer_type (prec, uns);
+
+ if (TREE_CODE (op) != SSA_NAME)
+ {
+ tree nop = fold_convert (type, op);
+ if (is_gimple_val (nop))
+ return nop;
+ }
+
+ tree nop = make_ssa_name (type);
+ gimple *g = gimple_build_assign (nop, NOP_EXPR, op);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ return nop;
+}
+
+/* Return true if STMT can be handled in a loop from least to most
+ significant limb together with its dependencies. */
+
+bool
+mergeable_op (gimple *stmt)
+{
+ if (!is_gimple_assign (stmt))
+ return false;
+ switch (gimple_assign_rhs_code (stmt))
+ {
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case NEGATE_EXPR:
+ case BIT_AND_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ case BIT_NOT_EXPR:
+ case SSA_NAME:
+ case INTEGER_CST:
+ return true;
+ case LSHIFT_EXPR:
+ {
+ tree cnt = gimple_assign_rhs2 (stmt);
+ if (tree_fits_uhwi_p (cnt)
+ && tree_to_uhwi (cnt) < (unsigned HOST_WIDE_INT) limb_prec)
+ return true;
+ }
+ break;
+ CASE_CONVERT:
+ case VIEW_CONVERT_EXPR:
+ {
+ tree lhs_type = TREE_TYPE (gimple_assign_lhs (stmt));
+ tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
+ if (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
+ && TREE_CODE (lhs_type) == BITINT_TYPE
+ && TREE_CODE (rhs_type) == BITINT_TYPE
+ && bitint_precision_kind (lhs_type) >= bitint_prec_large
+ && bitint_precision_kind (rhs_type) >= bitint_prec_large
+ && tree_int_cst_equal (TYPE_SIZE (lhs_type), TYPE_SIZE (rhs_type)))
+ {
+ if (TYPE_PRECISION (rhs_type) >= TYPE_PRECISION (lhs_type))
+ return true;
+ if ((unsigned) TYPE_PRECISION (lhs_type) % (2 * limb_prec) != 0)
+ return true;
+ if (bitint_precision_kind (lhs_type) == bitint_prec_large)
+ return true;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return false;
+}
+
+/* Return non-zero if stmt is .{ADD,SUB,MUL}_OVERFLOW call with
+ _Complex large/huge _BitInt lhs which has at most two immediate uses,
+ at most one use in REALPART_EXPR stmt in the same bb and exactly one
+ IMAGPART_EXPR use in the same bb with a single use which casts it to
+ non-BITINT_TYPE integral type. If there is a REALPART_EXPR use,
+ return 2. Such cases (most common uses of those builtins) can be
+ optimized by marking their lhs and lhs of IMAGPART_EXPR and maybe lhs
+ of REALPART_EXPR as not needed to be backed up by a stack variable.
+ For .UBSAN_CHECK_{ADD,SUB,MUL} return 3. */
+
+int
+optimizable_arith_overflow (gimple *stmt)
+{
+ bool is_ubsan = false;
+ if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt))
+ return false;
+ switch (gimple_call_internal_fn (stmt))
+ {
+ case IFN_ADD_OVERFLOW:
+ case IFN_SUB_OVERFLOW:
+ case IFN_MUL_OVERFLOW:
+ break;
+ case IFN_UBSAN_CHECK_ADD:
+ case IFN_UBSAN_CHECK_SUB:
+ case IFN_UBSAN_CHECK_MUL:
+ is_ubsan = true;
+ break;
+ default:
+ return 0;
+ }
+ tree lhs = gimple_call_lhs (stmt);
+ if (!lhs)
+ return 0;
+ if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
+ return 0;
+ tree type = is_ubsan ? TREE_TYPE (lhs) : TREE_TYPE (TREE_TYPE (lhs));
+ if (TREE_CODE (type) != BITINT_TYPE
+ || bitint_precision_kind (type) < bitint_prec_large)
+ return 0;
+
+ if (is_ubsan)
+ {
+ use_operand_p use_p;
+ gimple *use_stmt;
+ if (!single_imm_use (lhs, &use_p, &use_stmt)
+ || gimple_bb (use_stmt) != gimple_bb (stmt)
+ || !gimple_store_p (use_stmt)
+ || !is_gimple_assign (use_stmt)
+ || gimple_has_volatile_ops (use_stmt)
+ || stmt_ends_bb_p (use_stmt))
+ return 0;
+ return 3;
+ }
+
+ imm_use_iterator ui;
+ use_operand_p use_p;
+ int seen = 0;
+ FOR_EACH_IMM_USE_FAST (use_p, ui, lhs)
+ {
+ gimple *g = USE_STMT (use_p);
+ if (is_gimple_debug (g))
+ continue;
+ if (!is_gimple_assign (g) || gimple_bb (g) != gimple_bb (stmt))
+ return 0;
+ if (gimple_assign_rhs_code (g) == REALPART_EXPR)
+ {
+ if ((seen & 1) != 0)
+ return 0;
+ seen |= 1;
+ }
+ else if (gimple_assign_rhs_code (g) == IMAGPART_EXPR)
+ {
+ if ((seen & 2) != 0)
+ return 0;
+ seen |= 2;
+
+ use_operand_p use2_p;
+ gimple *use_stmt;
+ tree lhs2 = gimple_assign_lhs (g);
+ if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs2))
+ return 0;
+ if (!single_imm_use (lhs2, &use2_p, &use_stmt)
+ || gimple_bb (use_stmt) != gimple_bb (stmt)
+ || !gimple_assign_cast_p (use_stmt))
+ return 0;
+
+ lhs2 = gimple_assign_lhs (use_stmt);
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (lhs2))
+ || TREE_CODE (TREE_TYPE (lhs2)) == BITINT_TYPE)
+ return 0;
+ }
+ else
+ return 0;
+ }
+ if ((seen & 2) == 0)
+ return 0;
+ return seen == 3 ? 2 : 1;
+}
+
+/* If STMT is some kind of comparison (GIMPLE_COND, comparison assignment)
+ comparing large/huge _BitInt types, return the comparison code and if
+ non-NULL fill in the comparison operands to *POP1 and *POP2. */
+
+tree_code
+comparison_op (gimple *stmt, tree *pop1, tree *pop2)
+{
+ tree op1 = NULL_TREE, op2 = NULL_TREE;
+ tree_code code = ERROR_MARK;
+ if (gimple_code (stmt) == GIMPLE_COND)
+ {
+ code = gimple_cond_code (stmt);
+ op1 = gimple_cond_lhs (stmt);
+ op2 = gimple_cond_rhs (stmt);
+ }
+ else if (is_gimple_assign (stmt))
+ {
+ code = gimple_assign_rhs_code (stmt);
+ op1 = gimple_assign_rhs1 (stmt);
+ if (TREE_CODE_CLASS (code) == tcc_comparison
+ || TREE_CODE_CLASS (code) == tcc_binary)
+ op2 = gimple_assign_rhs2 (stmt);
+ }
+ if (TREE_CODE_CLASS (code) != tcc_comparison)
+ return ERROR_MARK;
+ tree type = TREE_TYPE (op1);
+ if (TREE_CODE (type) != BITINT_TYPE
+ || bitint_precision_kind (type) < bitint_prec_large)
+ return ERROR_MARK;
+ if (pop1)
+ {
+ *pop1 = op1;
+ *pop2 = op2;
+ }
+ return code;
+}
+
+/* Class used during large/huge _BitInt lowering containing all the
+ state for the methods. */
+
+struct bitint_large_huge
+{
+ bitint_large_huge ()
+ : m_names (NULL), m_loads (NULL), m_preserved (NULL),
+ m_single_use_names (NULL), m_map (NULL), m_vars (NULL),
+ m_limb_type (NULL_TREE), m_data (vNULL) {}
+
+ ~bitint_large_huge ();
+
+ void insert_before (gimple *);
+ tree limb_access_type (tree, tree);
+ tree limb_access (tree, tree, tree, bool);
+ void if_then (gimple *, profile_probability, edge &, edge &);
+ void if_then_else (gimple *, profile_probability, edge &, edge &);
+ void if_then_if_then_else (gimple *g, gimple *,
+ profile_probability, profile_probability,
+ edge &, edge &, edge &);
+ tree handle_operand (tree, tree);
+ tree prepare_data_in_out (tree, tree, tree *);
+ tree add_cast (tree, tree);
+ tree handle_plus_minus (tree_code, tree, tree, tree);
+ tree handle_lshift (tree, tree, tree);
+ tree handle_cast (tree, tree, tree);
+ tree handle_load (gimple *, tree);
+ tree handle_stmt (gimple *, tree);
+ tree handle_operand_addr (tree, gimple *, int *, int *);
+ tree create_loop (tree, tree *);
+ tree lower_mergeable_stmt (gimple *, tree_code &, tree, tree);
+ tree lower_comparison_stmt (gimple *, tree_code &, tree, tree);
+ void lower_shift_stmt (tree, gimple *);
+ void lower_muldiv_stmt (tree, gimple *);
+ void lower_float_conv_stmt (tree, gimple *);
+ tree arith_overflow_extract_bits (unsigned int, unsigned int, tree,
+ unsigned int, bool);
+ void finish_arith_overflow (tree, tree, tree, tree, tree, tree, gimple *,
+ tree_code);
+ void lower_addsub_overflow (tree, gimple *);
+ void lower_mul_overflow (tree, gimple *);
+ void lower_cplxpart_stmt (tree, gimple *);
+ void lower_complexexpr_stmt (gimple *);
+ void lower_call (tree, gimple *);
+ void lower_asm (gimple *);
+ void lower_stmt (gimple *);
+
+ /* Bitmap of large/huge _BitInt SSA_NAMEs except those can be
+ merged with their uses. */
+ bitmap m_names;
+ /* Subset of those for lhs of load statements. These will be
+ cleared in m_names if the loads will be mergeable with all
+ their uses. */
+ bitmap m_loads;
+ /* Bitmap of large/huge _BitInt SSA_NAMEs that should survive
+ to later passes (arguments or return values of calls). */
+ bitmap m_preserved;
+ /* Subset of m_names which have a single use. As the lowering
+ can replace various original statements with their lowered
+ form even before it is done iterating over all basic blocks,
+ testing has_single_use for the purpose of emitting clobbers
+ doesn't work properly. */
+ bitmap m_single_use_names;
+ /* Used for coalescing/partitioning of large/huge _BitInt SSA_NAMEs
+ set in m_names. */
+ var_map m_map;
+ /* Mapping of the partitions to corresponding decls. */
+ tree *m_vars;
+ /* Unsigned integer type with limb precision. */
+ tree m_limb_type;
+ /* Its TYPE_SIZE_UNIT. */
+ unsigned HOST_WIDE_INT m_limb_size;
+ /* Location of a gimple stmt which is being currently lowered. */
+ location_t m_loc;
+ /* Current stmt iterator where code is being lowered currently. */
+ gimple_stmt_iterator m_gsi;
+ /* Statement after which any clobbers should be added if non-NULL. */
+ gimple *m_after_stmt;
+ /* Set when creating loops to the loop header bb and its preheader. */
+ basic_block m_bb, m_preheader_bb;
+ /* Stmt iterator after which initialization statements should be emitted. */
+ gimple_stmt_iterator m_init_gsi;
+ /* Decl into which a mergeable statement stores result. */
+ tree m_lhs;
+ /* handle_operand/handle_stmt can be invoked in various ways.
+
+ lower_mergeable_stmt for large _BitInt calls those with constant
+ idx only, expanding to straight line code, for huge _BitInt
+ emits a loop from least significant limb upwards, where each loop
+ iteration handles 2 limbs, plus there can be up to one full limb
+ and one partial limb processed after the loop, where handle_operand
+ and/or handle_stmt are called with constant idx. m_upwards_2limb
+ is set for this case, false otherwise. m_upwards is true if it
+ is either large or huge _BitInt handled by lower_mergeable_stmt,
+ i.e. indexes always increase.
+
+ Another way is used by lower_comparison_stmt, which walks limbs
+ from most significant to least significant, partial limb if any
+ processed first with constant idx and then loop processing a single
+ limb per iteration with non-constant idx.
+
+ Another way is used in lower_shift_stmt, where for LSHIFT_EXPR
+ destination limbs are processed from most significant to least
+ significant or for RSHIFT_EXPR the other way around, in loops or
+ straight line code, but idx usually is non-constant (so from
+ handle_operand/handle_stmt POV random access). The LSHIFT_EXPR
+ handling there can access even partial limbs using non-constant
+ idx (then m_var_msb should be true, for all the other cases
+ including lower_mergeable_stmt/lower_comparison_stmt that is
+ not the case and so m_var_msb should be false.
+
+ m_first should be set the first time handle_operand/handle_stmt
+ is called and clear when it is called for some other limb with
+ the same argument. If the lowering of an operand (e.g. INTEGER_CST)
+ or statement (e.g. +/-/<< with < limb_prec constant) needs some
+ state between the different calls, when m_first is true it should
+ push some trees to m_data vector and also make sure m_data_cnt is
+ incremented by how many trees were pushed, and when m_first is
+ false, it can use the m_data[m_data_cnt] etc. data or update them,
+ just needs to bump m_data_cnt by the same amount as when it was
+ called with m_first set. The toplevel calls to
+ handle_operand/handle_stmt should set m_data_cnt to 0 and truncate
+ m_data vector when setting m_first to true.
+
+ m_cast_conditional and m_bitfld_load are used when handling a
+ bit-field load inside of a widening cast. handle_cast sometimes
+ needs to do runtime comparisons and handle_operand only conditionally
+ or even in two separate conditional blocks for one idx (once with
+ constant index after comparing the runtime one for equality with the
+ constant). In these cases, m_cast_conditional is set to true and
+ the bit-field load then communicates its m_data_cnt to handle_cast
+ using m_bitfld_load. */
+ bool m_first;
+ bool m_var_msb;
+ unsigned m_upwards_2limb;
+ bool m_upwards;
+ bool m_cast_conditional;
+ unsigned m_bitfld_load;
+ vec<tree> m_data;
+ unsigned int m_data_cnt;
+};
+
+bitint_large_huge::~bitint_large_huge ()
+{
+ BITMAP_FREE (m_names);
+ BITMAP_FREE (m_loads);
+ BITMAP_FREE (m_preserved);
+ BITMAP_FREE (m_single_use_names);
+ if (m_map)
+ delete_var_map (m_map);
+ XDELETEVEC (m_vars);
+ m_data.release ();
+}
+
+/* Insert gimple statement G before current location
+ and set its gimple_location. */
+
+void
+bitint_large_huge::insert_before (gimple *g)
+{
+ gimple_set_location (g, m_loc);
+ gsi_insert_before (&m_gsi, g, GSI_SAME_STMT);
+}
+
+/* Return type for accessing limb IDX of BITINT_TYPE TYPE.
+ This is normally m_limb_type, except for a partial most
+ significant limb if any. */
+
+tree
+bitint_large_huge::limb_access_type (tree type, tree idx)
+{
+ if (type == NULL_TREE)
+ return m_limb_type;
+ unsigned HOST_WIDE_INT i = tree_to_uhwi (idx);
+ unsigned int prec = TYPE_PRECISION (type);
+ gcc_assert (i * limb_prec < prec);
+ if ((i + 1) * limb_prec <= prec)
+ return m_limb_type;
+ else
+ return build_nonstandard_integer_type (prec % limb_prec,
+ TYPE_UNSIGNED (type));
+}
+
+/* Return a tree how to access limb IDX of VAR corresponding to BITINT_TYPE
+ TYPE. If WRITE_P is true, it will be a store, otherwise a read. */
+
+tree
+bitint_large_huge::limb_access (tree type, tree var, tree idx, bool write_p)
+{
+ tree atype = (tree_fits_uhwi_p (idx)
+ ? limb_access_type (type, idx) : m_limb_type);
+ tree ret;
+ if (DECL_P (var) && tree_fits_uhwi_p (idx))
+ {
+ tree ptype = build_pointer_type (strip_array_types (TREE_TYPE (var)));
+ unsigned HOST_WIDE_INT off = tree_to_uhwi (idx) * m_limb_size;
+ ret = build2 (MEM_REF, m_limb_type,
+ build_fold_addr_expr (var),
+ build_int_cst (ptype, off));
+ TREE_THIS_VOLATILE (ret) = TREE_THIS_VOLATILE (var);
+ TREE_SIDE_EFFECTS (ret) = TREE_SIDE_EFFECTS (var);
+ }
+ else if (TREE_CODE (var) == MEM_REF && tree_fits_uhwi_p (idx))
+ {
+ ret
+ = build2 (MEM_REF, m_limb_type, TREE_OPERAND (var, 0),
+ size_binop (PLUS_EXPR, TREE_OPERAND (var, 1),
+ build_int_cst (TREE_TYPE (TREE_OPERAND (var, 1)),
+ tree_to_uhwi (idx)
+ * m_limb_size)));
+ TREE_THIS_VOLATILE (ret) = TREE_THIS_VOLATILE (var);
+ TREE_SIDE_EFFECTS (ret) = TREE_SIDE_EFFECTS (var);
+ TREE_THIS_NOTRAP (ret) = TREE_THIS_NOTRAP (var);
+ }
+ else
+ {
+ var = unshare_expr (var);
+ if (TREE_CODE (TREE_TYPE (var)) != ARRAY_TYPE
+ || !useless_type_conversion_p (m_limb_type,
+ TREE_TYPE (TREE_TYPE (var))))
+ {
+ unsigned HOST_WIDE_INT nelts
+ = CEIL (tree_to_uhwi (TYPE_SIZE (type)), limb_prec);
+ tree atype = build_array_type_nelts (m_limb_type, nelts);
+ var = build1 (VIEW_CONVERT_EXPR, atype, var);
+ }
+ ret = build4 (ARRAY_REF, m_limb_type, var, idx, NULL_TREE, NULL_TREE);
+ }
+ if (!write_p && !useless_type_conversion_p (atype, m_limb_type))
+ {
+ gimple *g = gimple_build_assign (make_ssa_name (m_limb_type), ret);
+ insert_before (g);
+ ret = gimple_assign_lhs (g);
+ ret = build1 (NOP_EXPR, atype, ret);
+ }
+ return ret;
+}
+
+/* Emit a half diamond,
+ if (COND)
+ |\
+ | \
+ | \
+ | new_bb1
+ | /
+ | /
+ |/
+ or if (COND) new_bb1;
+ PROB is the probability that the condition is true.
+ Updates m_gsi to start of new_bb1.
+ Sets EDGE_TRUE to edge from new_bb1 to successor and
+ EDGE_FALSE to the EDGE_FALSE_VALUE edge from if (COND) bb. */
+
+void
+bitint_large_huge::if_then (gimple *cond, profile_probability prob,
+ edge &edge_true, edge &edge_false)
+{
+ insert_before (cond);
+ edge e1 = split_block (gsi_bb (m_gsi), cond);
+ edge e2 = split_block (e1->dest, (gimple *) NULL);
+ edge e3 = make_edge (e1->src, e2->dest, EDGE_FALSE_VALUE);
+ e1->flags = EDGE_TRUE_VALUE;
+ e1->probability = prob;
+ e3->probability = prob.invert ();
+ set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src);
+ edge_true = e2;
+ edge_false = e3;
+ m_gsi = gsi_after_labels (e1->dest);
+}
+
+/* Emit a full diamond,
+ if (COND)
+ /\
+ / \
+ / \
+ new_bb1 new_bb2
+ \ /
+ \ /
+ \/
+ or if (COND) new_bb2; else new_bb1;
+ PROB is the probability that the condition is true.
+ Updates m_gsi to start of new_bb2.
+ Sets EDGE_TRUE to edge from new_bb1 to successor and
+ EDGE_FALSE to the EDGE_FALSE_VALUE edge from if (COND) bb. */
+
+void
+bitint_large_huge::if_then_else (gimple *cond, profile_probability prob,
+ edge &edge_true, edge &edge_false)
+{
+ insert_before (cond);
+ edge e1 = split_block (gsi_bb (m_gsi), cond);
+ edge e2 = split_block (e1->dest, (gimple *) NULL);
+ basic_block bb = create_empty_bb (e1->dest);
+ add_bb_to_loop (bb, e1->dest->loop_father);
+ edge e3 = make_edge (e1->src, bb, EDGE_TRUE_VALUE);
+ e1->flags = EDGE_FALSE_VALUE;
+ e3->probability = prob;
+ e1->probability = prob.invert ();
+ set_immediate_dominator (CDI_DOMINATORS, bb, e1->src);
+ set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src);
+ edge_true = make_edge (bb, e2->dest, EDGE_FALLTHRU);
+ edge_false = e2;
+ m_gsi = gsi_after_labels (bb);
+}
+
+/* Emit a half diamond with full diamond in it
+ if (COND1)
+ |\
+ | \
+ | \
+ | if (COND2)
+ | / \
+ | / \
+ |new_bb1 new_bb2
+ | | /
+ \ | /
+ \ | /
+ \ | /
+ \|/
+ or if (COND1) { if (COND2) new_bb2; else new_bb1; }
+ PROB1 is the probability that the condition 1 is true.
+ PROB2 is the probability that the condition 2 is true.
+ Updates m_gsi to start of new_bb1.
+ Sets EDGE_TRUE_TRUE to edge from new_bb2 to successor,
+ EDGE_TRUE_FALSE to edge from new_bb1 to successor and
+ EDGE_FALSE to the EDGE_FALSE_VALUE edge from if (COND1) bb.
+ If COND2 is NULL, this is equivalent to
+ if_then (COND1, PROB1, EDGE_TRUE_FALSE, EDGE_FALSE);
+ EDGE_TRUE_TRUE = NULL; */
+
+void
+bitint_large_huge::if_then_if_then_else (gimple *cond1, gimple *cond2,
+ profile_probability prob1,
+ profile_probability prob2,
+ edge &edge_true_true,
+ edge &edge_true_false,
+ edge &edge_false)
+{
+ edge e2, e3, e4 = NULL;
+ if_then (cond1, prob1, e2, e3);
+ if (cond2 == NULL)
+ {
+ edge_true_true = NULL;
+ edge_true_false = e2;
+ edge_false = e3;
+ return;
+ }
+ insert_before (cond2);
+ e2 = split_block (gsi_bb (m_gsi), cond2);
+ basic_block bb = create_empty_bb (e2->dest);
+ add_bb_to_loop (bb, e2->dest->loop_father);
+ e4 = make_edge (e2->src, bb, EDGE_TRUE_VALUE);
+ set_immediate_dominator (CDI_DOMINATORS, bb, e2->src);
+ e4->probability = prob2;
+ e2->flags = EDGE_FALSE_VALUE;
+ e2->probability = prob2.invert ();
+ e4 = make_edge (bb, e3->dest, EDGE_FALLTHRU);
+ e2 = find_edge (e2->dest, e3->dest);
+ edge_true_true = e4;
+ edge_true_false = e2;
+ edge_false = e3;
+ m_gsi = gsi_after_labels (e2->src);
+}
+
+/* Emit code to access limb IDX from OP. */
+
+tree
+bitint_large_huge::handle_operand (tree op, tree idx)
+{
+ switch (TREE_CODE (op))
+ {
+ case SSA_NAME:
+ if (m_names == NULL
+ || !bitmap_bit_p (m_names, SSA_NAME_VERSION (op)))
+ {
+ if (SSA_NAME_IS_DEFAULT_DEF (op))
+ {
+ if (m_first)
+ {
+ tree v = create_tmp_reg (m_limb_type);
+ if (SSA_NAME_VAR (op) && VAR_P (SSA_NAME_VAR (op)))
+ {
+ DECL_NAME (v) = DECL_NAME (SSA_NAME_VAR (op));
+ DECL_SOURCE_LOCATION (v)
+ = DECL_SOURCE_LOCATION (SSA_NAME_VAR (op));
+ }
+ v = get_or_create_ssa_default_def (cfun, v);
+ m_data.safe_push (v);
+ }
+ tree ret = m_data[m_data_cnt];
+ m_data_cnt++;
+ if (tree_fits_uhwi_p (idx))
+ {
+ tree type = limb_access_type (TREE_TYPE (op), idx);
+ ret = add_cast (type, ret);
+ }
+ return ret;
+ }
+ location_t loc_save = m_loc;
+ m_loc = gimple_location (SSA_NAME_DEF_STMT (op));
+ tree ret = handle_stmt (SSA_NAME_DEF_STMT (op), idx);
+ m_loc = loc_save;
+ return ret;
+ }
+ int p;
+ gimple *g;
+ tree t;
+ p = var_to_partition (m_map, op);
+ gcc_assert (m_vars[p] != NULL_TREE);
+ t = limb_access (TREE_TYPE (op), m_vars[p], idx, false);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (t)), t);
+ insert_before (g);
+ t = gimple_assign_lhs (g);
+ if (m_first
+ && m_single_use_names
+ && m_vars[p] != m_lhs
+ && m_after_stmt
+ && bitmap_bit_p (m_single_use_names, SSA_NAME_VERSION (op)))
+ {
+ tree clobber = build_clobber (TREE_TYPE (m_vars[p]), CLOBBER_EOL);
+ g = gimple_build_assign (m_vars[p], clobber);
+ gimple_stmt_iterator gsi = gsi_for_stmt (m_after_stmt);
+ gsi_insert_after (&gsi, g, GSI_SAME_STMT);
+ }
+ return t;
+ case INTEGER_CST:
+ if (tree_fits_uhwi_p (idx))
+ {
+ tree c, type = limb_access_type (TREE_TYPE (op), idx);
+ unsigned HOST_WIDE_INT i = tree_to_uhwi (idx);
+ if (m_first)
+ {
+ m_data.safe_push (NULL_TREE);
+ m_data.safe_push (NULL_TREE);
+ }
+ if (limb_prec != HOST_BITS_PER_WIDE_INT)
+ {
+ wide_int w = wi::rshift (wi::to_wide (op), i * limb_prec,
+ TYPE_SIGN (TREE_TYPE (op)));
+ c = wide_int_to_tree (type,
+ wide_int::from (w, TYPE_PRECISION (type),
+ UNSIGNED));
+ }
+ else if (i >= TREE_INT_CST_EXT_NUNITS (op))
+ c = build_int_cst (type,
+ tree_int_cst_sgn (op) < 0 ? -1 : 0);
+ else
+ c = build_int_cst (type, TREE_INT_CST_ELT (op, i));
+ m_data_cnt += 2;
+ return c;
+ }
+ if (m_first
+ || (m_data[m_data_cnt] == NULL_TREE
+ && m_data[m_data_cnt + 1] == NULL_TREE))
+ {
+ unsigned int prec = TYPE_PRECISION (TREE_TYPE (op));
+ unsigned int rem = prec % (2 * limb_prec);
+ int ext;
+ unsigned min_prec = bitint_min_cst_precision (op, ext);
+ if (m_first)
+ {
+ m_data.safe_push (NULL_TREE);
+ m_data.safe_push (NULL_TREE);
+ }
+ if (integer_zerop (op))
+ {
+ tree c = build_zero_cst (m_limb_type);
+ m_data[m_data_cnt] = c;
+ m_data[m_data_cnt + 1] = c;
+ }
+ else if (integer_all_onesp (op))
+ {
+ tree c = build_all_ones_cst (m_limb_type);
+ m_data[m_data_cnt] = c;
+ m_data[m_data_cnt + 1] = c;
+ }
+ else if (m_upwards_2limb && min_prec <= (unsigned) limb_prec)
+ {
+ /* Single limb constant. Use a phi with that limb from
+ the preheader edge and 0 or -1 constant from the other edge
+ and for the second limb in the loop. */
+ tree out;
+ gcc_assert (m_first);
+ m_data.pop ();
+ m_data.pop ();
+ prepare_data_in_out (fold_convert (m_limb_type, op), idx, &out);
+ g = gimple_build_assign (m_data[m_data_cnt + 1],
+ build_int_cst (m_limb_type, ext));
+ insert_before (g);
+ m_data[m_data_cnt + 1] = gimple_assign_rhs1 (g);
+ }
+ else if (min_prec > prec - rem - 2 * limb_prec)
+ {
+ /* Constant which has enough significant bits that it isn't
+ worth trying to save .rodata space by extending from smaller
+ number. */
+ tree type;
+ if (m_var_msb)
+ type = TREE_TYPE (op);
+ else
+ /* If we have a guarantee the most significant partial limb
+ (if any) will be only accessed through handle_operand
+ with INTEGER_CST idx, we don't need to include the partial
+ limb in .rodata. */
+ type = build_bitint_type (prec - rem, 1);
+ tree c = tree_output_constant_def (fold_convert (type, op));
+ m_data[m_data_cnt] = c;
+ m_data[m_data_cnt + 1] = NULL_TREE;
+ }
+ else if (m_upwards_2limb)
+ {
+ /* Constant with smaller number of bits. Trade conditional
+ code for .rodata space by extending from smaller number. */
+ min_prec = CEIL (min_prec, 2 * limb_prec) * (2 * limb_prec);
+ tree type = build_bitint_type (min_prec, 1);
+ tree c = tree_output_constant_def (fold_convert (type, op));
+ tree idx2 = make_ssa_name (sizetype);
+ g = gimple_build_assign (idx2, PLUS_EXPR, idx, size_one_node);
+ insert_before (g);
+ g = gimple_build_cond (LT_EXPR, idx,
+ size_int (min_prec / limb_prec),
+ NULL_TREE, NULL_TREE);
+ edge edge_true, edge_false;
+ if_then (g, (min_prec >= (prec - rem) / 2
+ ? profile_probability::likely ()
+ : profile_probability::unlikely ()),
+ edge_true, edge_false);
+ tree c1 = limb_access (TREE_TYPE (op), c, idx, false);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (c1)), c1);
+ insert_before (g);
+ c1 = gimple_assign_lhs (g);
+ tree c2 = limb_access (TREE_TYPE (op), c, idx2, false);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (c2)), c2);
+ insert_before (g);
+ c2 = gimple_assign_lhs (g);
+ tree c3 = build_int_cst (m_limb_type, ext);
+ m_gsi = gsi_after_labels (edge_true->dest);
+ m_data[m_data_cnt] = make_ssa_name (m_limb_type);
+ m_data[m_data_cnt + 1] = make_ssa_name (m_limb_type);
+ gphi *phi = create_phi_node (m_data[m_data_cnt],
+ edge_true->dest);
+ add_phi_arg (phi, c1, edge_true, UNKNOWN_LOCATION);
+ add_phi_arg (phi, c3, edge_false, UNKNOWN_LOCATION);
+ phi = create_phi_node (m_data[m_data_cnt + 1], edge_true->dest);
+ add_phi_arg (phi, c2, edge_true, UNKNOWN_LOCATION);
+ add_phi_arg (phi, c3, edge_false, UNKNOWN_LOCATION);
+ }
+ else
+ {
+ /* Constant with smaller number of bits. Trade conditional
+ code for .rodata space by extending from smaller number.
+ Version for loops with random access to the limbs or
+ downwards loops. */
+ min_prec = CEIL (min_prec, limb_prec) * limb_prec;
+ tree c;
+ if (min_prec <= (unsigned) limb_prec)
+ c = fold_convert (m_limb_type, op);
+ else
+ {
+ tree type = build_bitint_type (min_prec, 1);
+ c = tree_output_constant_def (fold_convert (type, op));
+ }
+ m_data[m_data_cnt] = c;
+ m_data[m_data_cnt + 1] = integer_type_node;
+ }
+ t = m_data[m_data_cnt];
+ if (m_data[m_data_cnt + 1] == NULL_TREE)
+ {
+ t = limb_access (TREE_TYPE (op), t, idx, false);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (t)), t);
+ insert_before (g);
+ t = gimple_assign_lhs (g);
+ }
+ }
+ else if (m_data[m_data_cnt + 1] == NULL_TREE)
+ {
+ t = limb_access (TREE_TYPE (op), m_data[m_data_cnt], idx, false);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (t)), t);
+ insert_before (g);
+ t = gimple_assign_lhs (g);
+ }
+ else
+ t = m_data[m_data_cnt + 1];
+ if (m_data[m_data_cnt + 1] == integer_type_node)
+ {
+ unsigned int prec = TYPE_PRECISION (TREE_TYPE (op));
+ unsigned rem = prec % (2 * limb_prec);
+ int ext = tree_int_cst_sgn (op) < 0 ? -1 : 0;
+ tree c = m_data[m_data_cnt];
+ unsigned min_prec = TYPE_PRECISION (TREE_TYPE (c));
+ g = gimple_build_cond (LT_EXPR, idx,
+ size_int (min_prec / limb_prec),
+ NULL_TREE, NULL_TREE);
+ edge edge_true, edge_false;
+ if_then (g, (min_prec >= (prec - rem) / 2
+ ? profile_probability::likely ()
+ : profile_probability::unlikely ()),
+ edge_true, edge_false);
+ if (min_prec > (unsigned) limb_prec)
+ {
+ c = limb_access (TREE_TYPE (op), c, idx, false);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (c)), c);
+ insert_before (g);
+ c = gimple_assign_lhs (g);
+ }
+ tree c2 = build_int_cst (m_limb_type, ext);
+ m_gsi = gsi_after_labels (edge_true->dest);
+ t = make_ssa_name (m_limb_type);
+ gphi *phi = create_phi_node (t, edge_true->dest);
+ add_phi_arg (phi, c, edge_true, UNKNOWN_LOCATION);
+ add_phi_arg (phi, c2, edge_false, UNKNOWN_LOCATION);
+ }
+ m_data_cnt += 2;
+ return t;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Helper method, add a PHI node with VAL from preheader edge if
+ inside of a loop and m_first. Keep state in a pair of m_data
+ elements. */
+
+tree
+bitint_large_huge::prepare_data_in_out (tree val, tree idx, tree *data_out)
+{
+ if (!m_first)
+ {
+ *data_out = tree_fits_uhwi_p (idx) ? NULL_TREE : m_data[m_data_cnt + 1];
+ return m_data[m_data_cnt];
+ }
+
+ *data_out = NULL_TREE;
+ if (tree_fits_uhwi_p (idx))
+ {
+ m_data.safe_push (val);
+ m_data.safe_push (NULL_TREE);
+ return val;
+ }
+
+ tree in = make_ssa_name (TREE_TYPE (val));
+ gphi *phi = create_phi_node (in, m_bb);
+ edge e1 = find_edge (m_preheader_bb, m_bb);
+ edge e2 = EDGE_PRED (m_bb, 0);
+ if (e1 == e2)
+ e2 = EDGE_PRED (m_bb, 1);
+ add_phi_arg (phi, val, e1, UNKNOWN_LOCATION);
+ tree out = make_ssa_name (TREE_TYPE (val));
+ add_phi_arg (phi, out, e2, UNKNOWN_LOCATION);
+ m_data.safe_push (in);
+ m_data.safe_push (out);
+ return in;
+}
+
+/* Return VAL cast to TYPE. If VAL is INTEGER_CST, just
+ convert it without emitting any code, otherwise emit
+ the conversion statement before the current location. */
+
+tree
+bitint_large_huge::add_cast (tree type, tree val)
+{
+ if (TREE_CODE (val) == INTEGER_CST)
+ return fold_convert (type, val);
+
+ tree lhs = make_ssa_name (type);
+ gimple *g = gimple_build_assign (lhs, NOP_EXPR, val);
+ insert_before (g);
+ return lhs;
+}
+
+/* Helper of handle_stmt method, handle PLUS_EXPR or MINUS_EXPR. */
+
+tree
+bitint_large_huge::handle_plus_minus (tree_code code, tree rhs1, tree rhs2,
+ tree idx)
+{
+ tree lhs, data_out, ctype;
+ tree rhs1_type = TREE_TYPE (rhs1);
+ gimple *g;
+ tree data_in = prepare_data_in_out (build_zero_cst (m_limb_type), idx,
+ &data_out);
+
+ if (optab_handler (code == PLUS_EXPR ? uaddc5_optab : usubc5_optab,
+ TYPE_MODE (m_limb_type)) != CODE_FOR_nothing)
+ {
+ ctype = build_complex_type (m_limb_type);
+ if (!types_compatible_p (rhs1_type, m_limb_type))
+ {
+ if (!TYPE_UNSIGNED (rhs1_type))
+ {
+ tree type = unsigned_type_for (rhs1_type);
+ rhs1 = add_cast (type, rhs1);
+ rhs2 = add_cast (type, rhs2);
+ }
+ rhs1 = add_cast (m_limb_type, rhs1);
+ rhs2 = add_cast (m_limb_type, rhs2);
+ }
+ lhs = make_ssa_name (ctype);
+ g = gimple_build_call_internal (code == PLUS_EXPR
+ ? IFN_UADDC : IFN_USUBC,
+ 3, rhs1, rhs2, data_in);
+ gimple_call_set_lhs (g, lhs);
+ insert_before (g);
+ if (data_out == NULL_TREE)
+ data_out = make_ssa_name (m_limb_type);
+ g = gimple_build_assign (data_out, IMAGPART_EXPR,
+ build1 (IMAGPART_EXPR, m_limb_type, lhs));
+ insert_before (g);
+ }
+ else if (types_compatible_p (rhs1_type, m_limb_type))
+ {
+ ctype = build_complex_type (m_limb_type);
+ lhs = make_ssa_name (ctype);
+ g = gimple_build_call_internal (code == PLUS_EXPR
+ ? IFN_ADD_OVERFLOW : IFN_SUB_OVERFLOW,
+ 2, rhs1, rhs2);
+ gimple_call_set_lhs (g, lhs);
+ insert_before (g);
+ if (data_out == NULL_TREE)
+ data_out = make_ssa_name (m_limb_type);
+ if (!integer_zerop (data_in))
+ {
+ rhs1 = make_ssa_name (m_limb_type);
+ g = gimple_build_assign (rhs1, REALPART_EXPR,
+ build1 (REALPART_EXPR, m_limb_type, lhs));
+ insert_before (g);
+ rhs2 = make_ssa_name (m_limb_type);
+ g = gimple_build_assign (rhs2, IMAGPART_EXPR,
+ build1 (IMAGPART_EXPR, m_limb_type, lhs));
+ insert_before (g);
+ lhs = make_ssa_name (ctype);
+ g = gimple_build_call_internal (code == PLUS_EXPR
+ ? IFN_ADD_OVERFLOW
+ : IFN_SUB_OVERFLOW,
+ 2, rhs1, data_in);
+ gimple_call_set_lhs (g, lhs);
+ insert_before (g);
+ data_in = make_ssa_name (m_limb_type);
+ g = gimple_build_assign (data_in, IMAGPART_EXPR,
+ build1 (IMAGPART_EXPR, m_limb_type, lhs));
+ insert_before (g);
+ g = gimple_build_assign (data_out, PLUS_EXPR, rhs2, data_in);
+ insert_before (g);
+ }
+ else
+ {
+ g = gimple_build_assign (data_out, IMAGPART_EXPR,
+ build1 (IMAGPART_EXPR, m_limb_type, lhs));
+ insert_before (g);
+ }
+ }
+ else
+ {
+ tree in = add_cast (rhs1_type, data_in);
+ lhs = make_ssa_name (rhs1_type);
+ g = gimple_build_assign (lhs, code, rhs1, rhs2);
+ insert_before (g);
+ rhs1 = make_ssa_name (rhs1_type);
+ g = gimple_build_assign (rhs1, code, lhs, in);
+ insert_before (g);
+ m_data[m_data_cnt] = NULL_TREE;
+ m_data_cnt += 2;
+ return rhs1;
+ }
+ rhs1 = make_ssa_name (m_limb_type);
+ g = gimple_build_assign (rhs1, REALPART_EXPR,
+ build1 (REALPART_EXPR, m_limb_type, lhs));
+ insert_before (g);
+ if (!types_compatible_p (rhs1_type, m_limb_type))
+ rhs1 = add_cast (rhs1_type, rhs1);
+ m_data[m_data_cnt] = data_out;
+ m_data_cnt += 2;
+ return rhs1;
+}
+
+/* Helper function for handle_stmt method, handle LSHIFT_EXPR by
+ count in [0, limb_prec - 1] range. */
+
+tree
+bitint_large_huge::handle_lshift (tree rhs1, tree rhs2, tree idx)
+{
+ unsigned HOST_WIDE_INT cnt = tree_to_uhwi (rhs2);
+ gcc_checking_assert (cnt < (unsigned) limb_prec);
+ if (cnt == 0)
+ return rhs1;
+
+ tree lhs, data_out, rhs1_type = TREE_TYPE (rhs1);
+ gimple *g;
+ tree data_in = prepare_data_in_out (build_zero_cst (m_limb_type), idx,
+ &data_out);
+
+ if (!integer_zerop (data_in))
+ {
+ lhs = make_ssa_name (m_limb_type);
+ g = gimple_build_assign (lhs, RSHIFT_EXPR, data_in,
+ build_int_cst (unsigned_type_node,
+ limb_prec - cnt));
+ insert_before (g);
+ if (!types_compatible_p (rhs1_type, m_limb_type))
+ lhs = add_cast (rhs1_type, lhs);
+ data_in = lhs;
+ }
+ if (types_compatible_p (rhs1_type, m_limb_type))
+ {
+ if (data_out == NULL_TREE)
+ data_out = make_ssa_name (m_limb_type);
+ g = gimple_build_assign (data_out, rhs1);
+ insert_before (g);
+ }
+ if (cnt < (unsigned) TYPE_PRECISION (rhs1_type))
+ {
+ lhs = make_ssa_name (rhs1_type);
+ g = gimple_build_assign (lhs, LSHIFT_EXPR, rhs1, rhs2);
+ insert_before (g);
+ if (!integer_zerop (data_in))
+ {
+ rhs1 = lhs;
+ lhs = make_ssa_name (rhs1_type);
+ g = gimple_build_assign (lhs, BIT_IOR_EXPR, rhs1, data_in);
+ insert_before (g);
+ }
+ }
+ else
+ lhs = data_in;
+ m_data[m_data_cnt] = data_out;
+ m_data_cnt += 2;
+ return lhs;
+}
+
+/* Helper function for handle_stmt method, handle an integral
+ to integral conversion. */
+
+tree
+bitint_large_huge::handle_cast (tree lhs_type, tree rhs1, tree idx)
+{
+ tree rhs_type = TREE_TYPE (rhs1);
+ gimple *g;
+ if (TREE_CODE (rhs1) == SSA_NAME
+ && TREE_CODE (lhs_type) == BITINT_TYPE
+ && TREE_CODE (rhs_type) == BITINT_TYPE
+ && bitint_precision_kind (lhs_type) >= bitint_prec_large
+ && bitint_precision_kind (rhs_type) >= bitint_prec_large)
+ {
+ if (TYPE_PRECISION (rhs_type) >= TYPE_PRECISION (lhs_type)
+ /* If lhs has bigger precision than rhs, we can use
+ the simple case only if there is a guarantee that
+ the most significant limb is handled in straight
+ line code. If m_var_msb (on left shifts) or
+ if m_upwards_2limb * limb_prec is equal to
+ lhs precision that is not the case. */
+ || (!m_var_msb
+ && tree_int_cst_equal (TYPE_SIZE (rhs_type),
+ TYPE_SIZE (lhs_type))
+ && (!m_upwards_2limb
+ || (m_upwards_2limb * limb_prec
+ < TYPE_PRECISION (lhs_type)))))
+ {
+ rhs1 = handle_operand (rhs1, idx);
+ if (tree_fits_uhwi_p (idx))
+ {
+ tree type = limb_access_type (lhs_type, idx);
+ if (!types_compatible_p (type, TREE_TYPE (rhs1)))
+ rhs1 = add_cast (type, rhs1);
+ }
+ return rhs1;
+ }
+ tree t;
+ /* Indexes lower than this don't need any special processing. */
+ unsigned low = ((unsigned) TYPE_PRECISION (rhs_type)
+ - !TYPE_UNSIGNED (rhs_type)) / limb_prec;
+ /* Indexes >= than this always contain an extension. */
+ unsigned high = CEIL ((unsigned) TYPE_PRECISION (rhs_type), limb_prec);
+ bool save_first = m_first;
+ if (m_first)
+ {
+ m_data.safe_push (NULL_TREE);
+ m_data.safe_push (NULL_TREE);
+ m_data.safe_push (NULL_TREE);
+ if (TYPE_UNSIGNED (rhs_type))
+ /* No need to keep state between iterations. */
+ ;
+ else if (m_upwards && !m_upwards_2limb)
+ /* We need to keep state between iterations, but
+ not within any loop, everything is straight line
+ code with only increasing indexes. */
+ ;
+ else if (!m_upwards_2limb)
+ {
+ unsigned save_data_cnt = m_data_cnt;
+ gimple_stmt_iterator save_gsi = m_gsi;
+ m_gsi = m_init_gsi;
+ if (gsi_end_p (m_gsi))
+ m_gsi = gsi_after_labels (gsi_bb (m_gsi));
+ else
+ gsi_next (&m_gsi);
+ m_data_cnt = save_data_cnt + 3;
+ t = handle_operand (rhs1, size_int (low));
+ m_first = false;
+ m_data[save_data_cnt + 2]
+ = build_int_cst (NULL_TREE, m_data_cnt);
+ m_data_cnt = save_data_cnt;
+ t = add_cast (signed_type_for (m_limb_type), t);
+ tree lpm1 = build_int_cst (unsigned_type_node, limb_prec - 1);
+ tree n = make_ssa_name (TREE_TYPE (t));
+ g = gimple_build_assign (n, RSHIFT_EXPR, t, lpm1);
+ insert_before (g);
+ m_data[save_data_cnt + 1] = add_cast (m_limb_type, n);
+ m_gsi = save_gsi;
+ }
+ else if (m_upwards_2limb * limb_prec < TYPE_PRECISION (rhs_type))
+ /* We need to keep state between iterations, but
+ fortunately not within the loop, only afterwards. */
+ ;
+ else
+ {
+ tree out;
+ m_data.truncate (m_data_cnt);
+ prepare_data_in_out (build_zero_cst (m_limb_type), idx, &out);
+ m_data.safe_push (NULL_TREE);
+ }
+ }
+
+ unsigned save_data_cnt = m_data_cnt;
+ m_data_cnt += 3;
+ if (!tree_fits_uhwi_p (idx))
+ {
+ if (m_upwards_2limb
+ && (m_upwards_2limb * limb_prec
+ <= ((unsigned) TYPE_PRECISION (rhs_type)
+ - !TYPE_UNSIGNED (rhs_type))))
+ {
+ rhs1 = handle_operand (rhs1, idx);
+ if (m_first)
+ m_data[save_data_cnt + 2]
+ = build_int_cst (NULL_TREE, m_data_cnt);
+ m_first = save_first;
+ return rhs1;
+ }
+ bool single_comparison
+ = low == high || (m_upwards_2limb && (low & 1) == m_first);
+ g = gimple_build_cond (single_comparison ? LT_EXPR : LE_EXPR,
+ idx, size_int (low), NULL_TREE, NULL_TREE);
+ edge edge_true_true, edge_true_false, edge_false;
+ if_then_if_then_else (g, (single_comparison ? NULL
+ : gimple_build_cond (EQ_EXPR, idx,
+ size_int (low),
+ NULL_TREE,
+ NULL_TREE)),
+ profile_probability::likely (),
+ profile_probability::unlikely (),
+ edge_true_true, edge_true_false, edge_false);
+ bool save_cast_conditional = m_cast_conditional;
+ m_cast_conditional = true;
+ m_bitfld_load = 0;
+ tree t1 = handle_operand (rhs1, idx), t2 = NULL_TREE;
+ if (m_first)
+ m_data[save_data_cnt + 2]
+ = build_int_cst (NULL_TREE, m_data_cnt);
+ tree ext = NULL_TREE;
+ tree bitfld = NULL_TREE;
+ if (!single_comparison)
+ {
+ m_gsi = gsi_after_labels (edge_true_true->src);
+ m_first = false;
+ m_data_cnt = save_data_cnt + 3;
+ if (m_bitfld_load)
+ {
+ bitfld = m_data[m_bitfld_load];
+ m_data[m_bitfld_load] = m_data[m_bitfld_load + 2];
+ m_bitfld_load = 0;
+ }
+ t2 = handle_operand (rhs1, size_int (low));
+ if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (t2)))
+ t2 = add_cast (m_limb_type, t2);
+ if (!TYPE_UNSIGNED (rhs_type) && m_upwards_2limb)
+ {
+ ext = add_cast (signed_type_for (m_limb_type), t2);
+ tree lpm1 = build_int_cst (unsigned_type_node,
+ limb_prec - 1);
+ tree n = make_ssa_name (TREE_TYPE (ext));
+ g = gimple_build_assign (n, RSHIFT_EXPR, ext, lpm1);
+ insert_before (g);
+ ext = add_cast (m_limb_type, n);
+ }
+ }
+ tree t3;
+ if (TYPE_UNSIGNED (rhs_type))
+ t3 = build_zero_cst (m_limb_type);
+ else if (m_upwards_2limb && (save_first || ext != NULL_TREE))
+ t3 = m_data[save_data_cnt];
+ else
+ t3 = m_data[save_data_cnt + 1];
+ m_gsi = gsi_after_labels (edge_true_false->dest);
+ t = make_ssa_name (m_limb_type);
+ gphi *phi = create_phi_node (t, edge_true_false->dest);
+ add_phi_arg (phi, t1, edge_true_false, UNKNOWN_LOCATION);
+ add_phi_arg (phi, t3, edge_false, UNKNOWN_LOCATION);
+ if (edge_true_true)
+ add_phi_arg (phi, t2, edge_true_true, UNKNOWN_LOCATION);
+ if (ext)
+ {
+ tree t4 = make_ssa_name (m_limb_type);
+ phi = create_phi_node (t4, edge_true_false->dest);
+ add_phi_arg (phi, build_zero_cst (m_limb_type), edge_true_false,
+ UNKNOWN_LOCATION);
+ add_phi_arg (phi, m_data[save_data_cnt], edge_false,
+ UNKNOWN_LOCATION);
+ add_phi_arg (phi, ext, edge_true_true, UNKNOWN_LOCATION);
+ g = gimple_build_assign (m_data[save_data_cnt + 1], t4);
+ insert_before (g);
+ }
+ if (m_bitfld_load)
+ {
+ tree t4;
+ if (!m_first)
+ t4 = m_data[m_bitfld_load + 1];
+ else
+ t4 = make_ssa_name (m_limb_type);
+ phi = create_phi_node (t4, edge_true_false->dest);
+ add_phi_arg (phi,
+ edge_true_true ? bitfld : m_data[m_bitfld_load],
+ edge_true_false, UNKNOWN_LOCATION);
+ add_phi_arg (phi, m_data[m_bitfld_load + 2],
+ edge_false, UNKNOWN_LOCATION);
+ if (edge_true_true)
+ add_phi_arg (phi, m_data[m_bitfld_load], edge_true_true,
+ UNKNOWN_LOCATION);
+ m_data[m_bitfld_load] = t4;
+ m_data[m_bitfld_load + 2] = t4;
+ m_bitfld_load = 0;
+ }
+ m_cast_conditional = save_cast_conditional;
+ m_first = save_first;
+ return t;
+ }
+ else
+ {
+ if (tree_to_uhwi (idx) < low)
+ {
+ t = handle_operand (rhs1, idx);
+ if (m_first)
+ m_data[save_data_cnt + 2]
+ = build_int_cst (NULL_TREE, m_data_cnt);
+ }
+ else if (tree_to_uhwi (idx) < high)
+ {
+ t = handle_operand (rhs1, size_int (low));
+ if (m_first)
+ m_data[save_data_cnt + 2]
+ = build_int_cst (NULL_TREE, m_data_cnt);
+ if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (t)))
+ t = add_cast (m_limb_type, t);
+ tree ext = NULL_TREE;
+ if (!TYPE_UNSIGNED (rhs_type) && m_upwards)
+ {
+ ext = add_cast (signed_type_for (m_limb_type), t);
+ tree lpm1 = build_int_cst (unsigned_type_node,
+ limb_prec - 1);
+ tree n = make_ssa_name (TREE_TYPE (ext));
+ g = gimple_build_assign (n, RSHIFT_EXPR, ext, lpm1);
+ insert_before (g);
+ ext = add_cast (m_limb_type, n);
+ m_data[save_data_cnt + 1] = ext;
+ }
+ }
+ else
+ {
+ if (TYPE_UNSIGNED (rhs_type) && m_first)
+ {
+ handle_operand (rhs1, size_zero_node);
+ m_data[save_data_cnt + 2]
+ = build_int_cst (NULL_TREE, m_data_cnt);
+ }
+ else
+ m_data_cnt = tree_to_uhwi (m_data[save_data_cnt + 2]);
+ if (TYPE_UNSIGNED (rhs_type))
+ t = build_zero_cst (m_limb_type);
+ else
+ t = m_data[save_data_cnt + 1];
+ }
+ tree type = limb_access_type (lhs_type, idx);
+ if (!useless_type_conversion_p (type, m_limb_type))
+ t = add_cast (type, t);
+ m_first = save_first;
+ return t;
+ }
+ }
+ else if (TREE_CODE (lhs_type) == BITINT_TYPE
+ && bitint_precision_kind (lhs_type) >= bitint_prec_large
+ && INTEGRAL_TYPE_P (rhs_type))
+ {
+ /* Add support for 3 or more limbs filled in from normal integral
+ type if this assert fails. If no target chooses limb mode smaller
+ than half of largest supported normal integral type, this will not
+ be needed. */
+ gcc_assert (TYPE_PRECISION (rhs_type) <= 2 * limb_prec);
+ tree r1 = NULL_TREE, r2 = NULL_TREE, rext = NULL_TREE;
+ if (m_first)
+ {
+ gimple_stmt_iterator save_gsi = m_gsi;
+ m_gsi = m_init_gsi;
+ if (gsi_end_p (m_gsi))
+ m_gsi = gsi_after_labels (gsi_bb (m_gsi));
+ else
+ gsi_next (&m_gsi);
+ if (TREE_CODE (rhs_type) == BITINT_TYPE
+ && bitint_precision_kind (rhs_type) == bitint_prec_middle)
+ {
+ tree type = NULL_TREE;
+ rhs1 = maybe_cast_middle_bitint (&m_gsi, rhs1, type);
+ rhs_type = TREE_TYPE (rhs1);
+ }
+ r1 = rhs1;
+ if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (rhs1)))
+ r1 = add_cast (m_limb_type, rhs1);
+ if (TYPE_PRECISION (rhs_type) > limb_prec)
+ {
+ g = gimple_build_assign (make_ssa_name (rhs_type),
+ RSHIFT_EXPR, rhs1,
+ build_int_cst (unsigned_type_node,
+ limb_prec));
+ insert_before (g);
+ r2 = add_cast (m_limb_type, gimple_assign_lhs (g));
+ }
+ if (TYPE_UNSIGNED (rhs_type))
+ rext = build_zero_cst (m_limb_type);
+ else
+ {
+ rext = add_cast (signed_type_for (m_limb_type), r2 ? r2 : r1);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (rext)),
+ RSHIFT_EXPR, rext,
+ build_int_cst (unsigned_type_node,
+ limb_prec - 1));
+ insert_before (g);
+ rext = add_cast (m_limb_type, gimple_assign_lhs (g));
+ }
+ m_gsi = save_gsi;
+ }
+ tree t;
+ if (m_upwards_2limb)
+ {
+ if (m_first)
+ {
+ tree out1, out2;
+ prepare_data_in_out (r1, idx, &out1);
+ g = gimple_build_assign (m_data[m_data_cnt + 1], rext);
+ insert_before (g);
+ if (TYPE_PRECISION (rhs_type) > limb_prec)
+ {
+ prepare_data_in_out (r2, idx, &out2);
+ g = gimple_build_assign (m_data[m_data_cnt + 3], rext);
+ insert_before (g);
+ m_data.pop ();
+ t = m_data.pop ();
+ m_data[m_data_cnt + 1] = t;
+ }
+ else
+ m_data[m_data_cnt + 1] = rext;
+ m_data.safe_push (rext);
+ t = m_data[m_data_cnt];
+ }
+ else if (!tree_fits_uhwi_p (idx))
+ t = m_data[m_data_cnt + 1];
+ else
+ {
+ tree type = limb_access_type (lhs_type, idx);
+ t = m_data[m_data_cnt + 2];
+ if (!useless_type_conversion_p (type, m_limb_type))
+ t = add_cast (type, t);
+ }
+ m_data_cnt += 3;
+ return t;
+ }
+ else if (m_first)
+ {
+ m_data.safe_push (r1);
+ m_data.safe_push (r2);
+ m_data.safe_push (rext);
+ }
+ if (tree_fits_uhwi_p (idx))
+ {
+ tree type = limb_access_type (lhs_type, idx);
+ if (integer_zerop (idx))
+ t = m_data[m_data_cnt];
+ else if (TYPE_PRECISION (rhs_type) > limb_prec
+ && integer_onep (idx))
+ t = m_data[m_data_cnt + 1];
+ else
+ t = m_data[m_data_cnt + 2];
+ if (!useless_type_conversion_p (type, m_limb_type))
+ t = add_cast (type, t);
+ m_data_cnt += 3;
+ return t;
+ }
+ g = gimple_build_cond (NE_EXPR, idx, size_zero_node,
+ NULL_TREE, NULL_TREE);
+ edge e2, e3, e4 = NULL;
+ if_then (g, profile_probability::likely (), e2, e3);
+ if (m_data[m_data_cnt + 1])
+ {
+ g = gimple_build_cond (EQ_EXPR, idx, size_one_node,
+ NULL_TREE, NULL_TREE);
+ insert_before (g);
+ edge e5 = split_block (gsi_bb (m_gsi), g);
+ e4 = make_edge (e5->src, e2->dest, EDGE_TRUE_VALUE);
+ e2 = find_edge (e5->dest, e2->dest);
+ e4->probability = profile_probability::unlikely ();
+ e5->flags = EDGE_FALSE_VALUE;
+ e5->probability = e4->probability.invert ();
+ }
+ m_gsi = gsi_after_labels (e2->dest);
+ t = make_ssa_name (m_limb_type);
+ gphi *phi = create_phi_node (t, e2->dest);
+ add_phi_arg (phi, m_data[m_data_cnt + 2], e2, UNKNOWN_LOCATION);
+ add_phi_arg (phi, m_data[m_data_cnt], e3, UNKNOWN_LOCATION);
+ if (e4)
+ add_phi_arg (phi, m_data[m_data_cnt + 1], e4, UNKNOWN_LOCATION);
+ m_data_cnt += 3;
+ return t;
+ }
+ return NULL_TREE;
+}
+
+/* Helper function for handle_stmt method, handle a load from memory. */
+
+tree
+bitint_large_huge::handle_load (gimple *stmt, tree idx)
+{
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ tree rhs_type = TREE_TYPE (rhs1);
+ bool eh = stmt_ends_bb_p (stmt);
+ edge eh_edge = NULL;
+ gimple *g;
+
+ if (eh)
+ {
+ edge_iterator ei;
+ basic_block bb = gimple_bb (stmt);
+
+ FOR_EACH_EDGE (eh_edge, ei, bb->succs)
+ if (eh_edge->flags & EDGE_EH)
+ break;
+ }
+
+ if (TREE_CODE (rhs1) == COMPONENT_REF
+ && DECL_BIT_FIELD_TYPE (TREE_OPERAND (rhs1, 1)))
+ {
+ tree fld = TREE_OPERAND (rhs1, 1);
+ /* For little-endian, we can allow as inputs bit-fields
+ which start at a limb boundary. */
+ gcc_assert (tree_fits_uhwi_p (DECL_FIELD_BIT_OFFSET (fld)));
+ if (DECL_OFFSET_ALIGN (fld) >= TYPE_ALIGN (TREE_TYPE (rhs1))
+ && (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld)) % limb_prec) == 0)
+ goto normal_load;
+ /* Even if DECL_FIELD_BIT_OFFSET (fld) is a multiple of UNITS_PER_BIT,
+ handle it normally for now. */
+ if ((tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld)) % BITS_PER_UNIT) == 0)
+ goto normal_load;
+ tree repr = DECL_BIT_FIELD_REPRESENTATIVE (fld);
+ poly_int64 bitoffset;
+ poly_uint64 field_offset, repr_offset;
+ bool var_field_off = false;
+ if (poly_int_tree_p (DECL_FIELD_OFFSET (fld), &field_offset)
+ && poly_int_tree_p (DECL_FIELD_OFFSET (repr), &repr_offset))
+ bitoffset = (field_offset - repr_offset) * BITS_PER_UNIT;
+ else
+ {
+ bitoffset = 0;
+ var_field_off = true;
+ }
+ bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld))
+ - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
+ tree nrhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr),
+ TREE_OPERAND (rhs1, 0), repr,
+ var_field_off ? TREE_OPERAND (rhs1, 2) : NULL_TREE);
+ HOST_WIDE_INT bo = bitoffset.to_constant ();
+ unsigned bo_idx = (unsigned HOST_WIDE_INT) bo / limb_prec;
+ unsigned bo_bit = (unsigned HOST_WIDE_INT) bo % limb_prec;
+ if (m_first)
+ {
+ if (m_upwards)
+ {
+ gimple_stmt_iterator save_gsi = m_gsi;
+ m_gsi = m_init_gsi;
+ if (gsi_end_p (m_gsi))
+ m_gsi = gsi_after_labels (gsi_bb (m_gsi));
+ else
+ gsi_next (&m_gsi);
+ tree t = limb_access (rhs_type, nrhs1, size_int (bo_idx), true);
+ tree iv = make_ssa_name (m_limb_type);
+ g = gimple_build_assign (iv, t);
+ insert_before (g);
+ if (eh)
+ {
+ maybe_duplicate_eh_stmt (g, stmt);
+ if (eh_edge)
+ {
+ edge e = split_block (gsi_bb (m_gsi), g);
+ make_edge (e->src, eh_edge->dest, EDGE_EH)->probability
+ = profile_probability::very_unlikely ();
+ m_init_gsi.bb = e->dest;
+ }
+ }
+ m_gsi = save_gsi;
+ tree out;
+ prepare_data_in_out (iv, idx, &out);
+ out = m_data[m_data_cnt];
+ m_data.safe_push (out);
+ }
+ else
+ {
+ m_data.safe_push (NULL_TREE);
+ m_data.safe_push (NULL_TREE);
+ m_data.safe_push (NULL_TREE);
+ }
+ }
+
+ tree nidx0 = NULL_TREE, nidx1;
+ tree iv = m_data[m_data_cnt];
+ if (m_cast_conditional && iv)
+ {
+ gcc_assert (!m_bitfld_load);
+ m_bitfld_load = m_data_cnt;
+ }
+ if (tree_fits_uhwi_p (idx))
+ {
+ unsigned prec = TYPE_PRECISION (rhs_type);
+ unsigned HOST_WIDE_INT i = tree_to_uhwi (idx);
+ gcc_assert (i * limb_prec < prec);
+ nidx1 = size_int (i + bo_idx + 1);
+ if ((i + 1) * limb_prec > prec)
+ {
+ prec %= limb_prec;
+ if (prec + bo_bit <= (unsigned) limb_prec)
+ nidx1 = NULL_TREE;
+ }
+ if (!iv)
+ nidx0 = size_int (i + bo_idx);
+ }
+ else
+ {
+ if (!iv)
+ {
+ if (bo_idx == 0)
+ nidx0 = idx;
+ else
+ {
+ nidx0 = make_ssa_name (sizetype);
+ g = gimple_build_assign (nidx0, PLUS_EXPR, idx,
+ size_int (bo_idx));
+ insert_before (g);
+ }
+ }
+ nidx1 = make_ssa_name (sizetype);
+ g = gimple_build_assign (nidx1, PLUS_EXPR, idx,
+ size_int (bo_idx + 1));
+ insert_before (g);
+ }
+
+ tree iv2 = NULL_TREE;
+ if (nidx0)
+ {
+ tree t = limb_access (rhs_type, nrhs1, nidx0, true);
+ iv = make_ssa_name (m_limb_type);
+ g = gimple_build_assign (iv, t);
+ insert_before (g);
+ gcc_assert (!eh);
+ }
+ if (nidx1)
+ {
+ bool conditional = m_var_msb && !tree_fits_uhwi_p (idx);
+ unsigned prec = TYPE_PRECISION (rhs_type);
+ if (conditional)
+ {
+ if ((prec % limb_prec) == 0
+ || ((prec % limb_prec) + bo_bit > (unsigned) limb_prec))
+ conditional = false;
+ }
+ edge edge_true = NULL, edge_false = NULL;
+ if (conditional)
+ {
+ g = gimple_build_cond (NE_EXPR, idx,
+ size_int (prec / limb_prec),
+ NULL_TREE, NULL_TREE);
+ if_then (g, profile_probability::likely (),
+ edge_true, edge_false);
+ }
+ tree t = limb_access (rhs_type, nrhs1, nidx1, true);
+ if (m_upwards_2limb
+ && !m_first
+ && !m_bitfld_load
+ && !tree_fits_uhwi_p (idx))
+ iv2 = m_data[m_data_cnt + 1];
+ else
+ iv2 = make_ssa_name (m_limb_type);
+ g = gimple_build_assign (iv2, t);
+ insert_before (g);
+ if (eh)
+ {
+ maybe_duplicate_eh_stmt (g, stmt);
+ if (eh_edge)
+ {
+ edge e = split_block (gsi_bb (m_gsi), g);
+ m_gsi = gsi_after_labels (e->dest);
+ make_edge (e->src, eh_edge->dest, EDGE_EH)->probability
+ = profile_probability::very_unlikely ();
+ }
+ }
+ if (conditional)
+ {
+ tree iv3 = make_ssa_name (m_limb_type);
+ if (eh)
+ edge_true = find_edge (gsi_bb (m_gsi), edge_false->dest);
+ gphi *phi = create_phi_node (iv3, edge_true->dest);
+ add_phi_arg (phi, iv2, edge_true, UNKNOWN_LOCATION);
+ add_phi_arg (phi, build_zero_cst (m_limb_type),
+ edge_false, UNKNOWN_LOCATION);
+ m_gsi = gsi_after_labels (edge_true->dest);
+ }
+ }
+ g = gimple_build_assign (make_ssa_name (m_limb_type), RSHIFT_EXPR,
+ iv, build_int_cst (unsigned_type_node, bo_bit));
+ insert_before (g);
+ iv = gimple_assign_lhs (g);
+ if (iv2)
+ {
+ g = gimple_build_assign (make_ssa_name (m_limb_type), LSHIFT_EXPR,
+ iv2, build_int_cst (unsigned_type_node,
+ limb_prec - bo_bit));
+ insert_before (g);
+ g = gimple_build_assign (make_ssa_name (m_limb_type), BIT_IOR_EXPR,
+ gimple_assign_lhs (g), iv);
+ insert_before (g);
+ iv = gimple_assign_lhs (g);
+ if (m_data[m_data_cnt])
+ m_data[m_data_cnt] = iv2;
+ }
+ if (tree_fits_uhwi_p (idx))
+ {
+ tree atype = limb_access_type (rhs_type, idx);
+ if (!useless_type_conversion_p (atype, TREE_TYPE (iv)))
+ iv = add_cast (atype, iv);
+ }
+ m_data_cnt += 3;
+ return iv;
+ }
+
+normal_load:
+ /* Use write_p = true for loads with EH edges to make
+ sure limb_access doesn't add a cast as separate
+ statement after it. */
+ rhs1 = limb_access (rhs_type, rhs1, idx, eh);
+ tree ret = make_ssa_name (TREE_TYPE (rhs1));
+ g = gimple_build_assign (ret, rhs1);
+ insert_before (g);
+ if (eh)
+ {
+ maybe_duplicate_eh_stmt (g, stmt);
+ if (eh_edge)
+ {
+ edge e = split_block (gsi_bb (m_gsi), g);
+ m_gsi = gsi_after_labels (e->dest);
+ make_edge (e->src, eh_edge->dest, EDGE_EH)->probability
+ = profile_probability::very_unlikely ();
+ }
+ if (tree_fits_uhwi_p (idx))
+ {
+ tree atype = limb_access_type (rhs_type, idx);
+ if (!useless_type_conversion_p (atype, TREE_TYPE (rhs1)))
+ ret = add_cast (atype, ret);
+ }
+ }
+ return ret;
+}
+
+/* Return a limb IDX from a mergeable statement STMT. */
+
+tree
+bitint_large_huge::handle_stmt (gimple *stmt, tree idx)
+{
+ tree lhs, rhs1, rhs2 = NULL_TREE;
+ gimple *g;
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_ASSIGN:
+ if (gimple_assign_load_p (stmt))
+ return handle_load (stmt, idx);
+ switch (gimple_assign_rhs_code (stmt))
+ {
+ case BIT_AND_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ rhs2 = handle_operand (gimple_assign_rhs2 (stmt), idx);
+ /* FALLTHRU */
+ case BIT_NOT_EXPR:
+ rhs1 = handle_operand (gimple_assign_rhs1 (stmt), idx);
+ lhs = make_ssa_name (TREE_TYPE (rhs1));
+ g = gimple_build_assign (lhs, gimple_assign_rhs_code (stmt),
+ rhs1, rhs2);
+ insert_before (g);
+ return lhs;
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ rhs1 = handle_operand (gimple_assign_rhs1 (stmt), idx);
+ rhs2 = handle_operand (gimple_assign_rhs2 (stmt), idx);
+ return handle_plus_minus (gimple_assign_rhs_code (stmt),
+ rhs1, rhs2, idx);
+ case NEGATE_EXPR:
+ rhs2 = handle_operand (gimple_assign_rhs1 (stmt), idx);
+ rhs1 = build_zero_cst (TREE_TYPE (rhs2));
+ return handle_plus_minus (MINUS_EXPR, rhs1, rhs2, idx);
+ case LSHIFT_EXPR:
+ return handle_lshift (handle_operand (gimple_assign_rhs1 (stmt),
+ idx),
+ gimple_assign_rhs2 (stmt), idx);
+ case SSA_NAME:
+ case INTEGER_CST:
+ return handle_operand (gimple_assign_rhs1 (stmt), idx);
+ CASE_CONVERT:
+ case VIEW_CONVERT_EXPR:
+ return handle_cast (TREE_TYPE (gimple_assign_lhs (stmt)),
+ gimple_assign_rhs1 (stmt), idx);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ gcc_unreachable ();
+}
+
+/* Return minimum precision of OP at STMT.
+ Positive value is minimum precision above which all bits
+ are zero, negative means all bits above negation of the
+ value are copies of the sign bit. */
+
+static int
+range_to_prec (tree op, gimple *stmt)
+{
+ int_range_max r;
+ wide_int w;
+ tree type = TREE_TYPE (op);
+ unsigned int prec = TYPE_PRECISION (type);
+
+ if (!optimize
+ || !get_range_query (cfun)->range_of_expr (r, op, stmt))
+ {
+ if (TYPE_UNSIGNED (type))
+ return prec;
+ else
+ return -prec;
+ }
+
+ if (!TYPE_UNSIGNED (TREE_TYPE (op)))
+ {
+ w = r.lower_bound ();
+ if (wi::neg_p (w))
+ {
+ int min_prec1 = wi::min_precision (w, SIGNED);
+ w = r.upper_bound ();
+ int min_prec2 = wi::min_precision (w, SIGNED);
+ int min_prec = MAX (min_prec1, min_prec2);
+ return MIN (-min_prec, -2);
+ }
+ }
+
+ w = r.upper_bound ();
+ int min_prec = wi::min_precision (w, UNSIGNED);
+ return MAX (min_prec, 1);
+}
+
+/* Return address of the first limb of OP and write into *PREC
+ its precision. If positive, the operand is zero extended
+ from that precision, if it is negative, the operand is sign-extended
+ from -*PREC. If PREC_STORED is NULL, it is the toplevel call,
+ otherwise *PREC_STORED is prec from the innermost call without
+ range optimizations. */
+
+tree
+bitint_large_huge::handle_operand_addr (tree op, gimple *stmt,
+ int *prec_stored, int *prec)
+{
+ wide_int w;
+ location_t loc_save = m_loc;
+ if ((TREE_CODE (TREE_TYPE (op)) != BITINT_TYPE
+ || bitint_precision_kind (TREE_TYPE (op)) < bitint_prec_large)
+ && TREE_CODE (op) != INTEGER_CST)
+ {
+ do_int:
+ *prec = range_to_prec (op, stmt);
+ bitint_prec_kind kind = bitint_prec_small;
+ gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (op)));
+ if (TREE_CODE (TREE_TYPE (op)) == BITINT_TYPE)
+ kind = bitint_precision_kind (TREE_TYPE (op));
+ if (kind == bitint_prec_middle)
+ {
+ tree type = NULL_TREE;
+ op = maybe_cast_middle_bitint (&m_gsi, op, type);
+ }
+ tree op_type = TREE_TYPE (op);
+ unsigned HOST_WIDE_INT nelts
+ = CEIL (TYPE_PRECISION (op_type), limb_prec);
+ /* Add support for 3 or more limbs filled in from normal
+ integral type if this assert fails. If no target chooses
+ limb mode smaller than half of largest supported normal
+ integral type, this will not be needed. */
+ gcc_assert (nelts <= 2);
+ if (prec_stored)
+ *prec_stored = (TYPE_UNSIGNED (op_type)
+ ? TYPE_PRECISION (op_type)
+ : -TYPE_PRECISION (op_type));
+ if (*prec <= limb_prec && *prec >= -limb_prec)
+ {
+ nelts = 1;
+ if (prec_stored)
+ {
+ if (TYPE_UNSIGNED (op_type))
+ {
+ if (*prec_stored > limb_prec)
+ *prec_stored = limb_prec;
+ }
+ else if (*prec_stored < -limb_prec)
+ *prec_stored = -limb_prec;
+ }
+ }
+ tree atype = build_array_type_nelts (m_limb_type, nelts);
+ tree var = create_tmp_var (atype);
+ tree t1 = op;
+ if (!useless_type_conversion_p (m_limb_type, op_type))
+ t1 = add_cast (m_limb_type, t1);
+ tree v = build4 (ARRAY_REF, m_limb_type, var, size_zero_node,
+ NULL_TREE, NULL_TREE);
+ gimple *g = gimple_build_assign (v, t1);
+ insert_before (g);
+ if (nelts > 1)
+ {
+ tree lp = build_int_cst (unsigned_type_node, limb_prec);
+ g = gimple_build_assign (make_ssa_name (op_type),
+ RSHIFT_EXPR, op, lp);
+ insert_before (g);
+ tree t2 = gimple_assign_lhs (g);
+ t2 = add_cast (m_limb_type, t2);
+ v = build4 (ARRAY_REF, m_limb_type, var, size_one_node,
+ NULL_TREE, NULL_TREE);
+ g = gimple_build_assign (v, t2);
+ insert_before (g);
+ }
+ tree ret = build_fold_addr_expr (var);
+ if (!stmt_ends_bb_p (gsi_stmt (m_gsi)))
+ {
+ tree clobber = build_clobber (atype, CLOBBER_EOL);
+ g = gimple_build_assign (var, clobber);
+ gsi_insert_after (&m_gsi, g, GSI_SAME_STMT);
+ }
+ m_loc = loc_save;
+ return ret;
+ }
+ switch (TREE_CODE (op))
+ {
+ case SSA_NAME:
+ if (m_names == NULL
+ || !bitmap_bit_p (m_names, SSA_NAME_VERSION (op)))
+ {
+ gimple *g = SSA_NAME_DEF_STMT (op);
+ tree ret;
+ m_loc = gimple_location (g);
+ if (gimple_assign_load_p (g))
+ {
+ *prec = range_to_prec (op, NULL);
+ if (prec_stored)
+ *prec_stored = (TYPE_UNSIGNED (TREE_TYPE (op))
+ ? TYPE_PRECISION (TREE_TYPE (op))
+ : -TYPE_PRECISION (TREE_TYPE (op)));
+ ret = build_fold_addr_expr (gimple_assign_rhs1 (g));
+ ret = force_gimple_operand_gsi (&m_gsi, ret, true,
+ NULL_TREE, true, GSI_SAME_STMT);
+ }
+ else if (gimple_code (g) == GIMPLE_NOP)
+ {
+ tree var = create_tmp_var (m_limb_type);
+ TREE_ADDRESSABLE (var) = 1;
+ ret = build_fold_addr_expr (var);
+ if (!stmt_ends_bb_p (gsi_stmt (m_gsi)))
+ {
+ tree clobber = build_clobber (m_limb_type, CLOBBER_EOL);
+ g = gimple_build_assign (var, clobber);
+ gsi_insert_after (&m_gsi, g, GSI_SAME_STMT);
+ }
+ }
+ else
+ {
+ gcc_assert (gimple_assign_cast_p (g));
+ tree rhs1 = gimple_assign_rhs1 (g);
+ bitint_prec_kind kind = bitint_prec_small;
+ gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)));
+ if (TREE_CODE (TREE_TYPE (rhs1)) == BITINT_TYPE)
+ kind = bitint_precision_kind (TREE_TYPE (rhs1));
+ if (kind >= bitint_prec_large)
+ {
+ tree lhs_type = TREE_TYPE (op);
+ tree rhs_type = TREE_TYPE (rhs1);
+ int prec_stored_val = 0;
+ ret = handle_operand_addr (rhs1, g, &prec_stored_val, prec);
+ if (TYPE_PRECISION (lhs_type) > TYPE_PRECISION (rhs_type))
+ {
+ if (TYPE_UNSIGNED (lhs_type)
+ && !TYPE_UNSIGNED (rhs_type))
+ gcc_assert (*prec >= 0 || prec_stored == NULL);
+ }
+ else
+ {
+ if (*prec > 0 && *prec < TYPE_PRECISION (lhs_type))
+ ;
+ else if (TYPE_UNSIGNED (lhs_type))
+ {
+ gcc_assert (*prec > 0
+ || prec_stored_val > 0
+ || (-prec_stored_val
+ >= TYPE_PRECISION (lhs_type)));
+ *prec = TYPE_PRECISION (lhs_type);
+ }
+ else if (*prec < 0 && -*prec < TYPE_PRECISION (lhs_type))
+ ;
+ else
+ *prec = -TYPE_PRECISION (lhs_type);
+ }
+ }
+ else
+ {
+ op = rhs1;
+ stmt = g;
+ goto do_int;
+ }
+ }
+ m_loc = loc_save;
+ return ret;
+ }
+ else
+ {
+ int p = var_to_partition (m_map, op);
+ gcc_assert (m_vars[p] != NULL_TREE);
+ *prec = range_to_prec (op, stmt);
+ if (prec_stored)
+ *prec_stored = (TYPE_UNSIGNED (TREE_TYPE (op))
+ ? TYPE_PRECISION (TREE_TYPE (op))
+ : -TYPE_PRECISION (TREE_TYPE (op)));
+ return build_fold_addr_expr (m_vars[p]);
+ }
+ case INTEGER_CST:
+ unsigned int min_prec, mp;
+ tree type;
+ w = wi::to_wide (op);
+ if (tree_int_cst_sgn (op) >= 0)
+ {
+ min_prec = wi::min_precision (w, UNSIGNED);
+ *prec = MAX (min_prec, 1);
+ }
+ else
+ {
+ min_prec = wi::min_precision (w, SIGNED);
+ *prec = MIN ((int) -min_prec, -2);
+ }
+ mp = CEIL (min_prec, limb_prec) * limb_prec;
+ if (mp >= (unsigned) TYPE_PRECISION (TREE_TYPE (op)))
+ type = TREE_TYPE (op);
+ else
+ type = build_bitint_type (mp, 1);
+ if (TREE_CODE (type) != BITINT_TYPE
+ || bitint_precision_kind (type) == bitint_prec_small)
+ {
+ if (TYPE_PRECISION (type) <= limb_prec)
+ type = m_limb_type;
+ else
+ /* This case is for targets which e.g. have 64-bit
+ limb but categorize up to 128-bits _BitInts as
+ small. We could use type of m_limb_type[2] and
+ similar instead to save space. */
+ type = build_bitint_type (mid_min_prec, 1);
+ }
+ if (prec_stored)
+ {
+ if (tree_int_cst_sgn (op) >= 0)
+ *prec_stored = MAX (TYPE_PRECISION (type), 1);
+ else
+ *prec_stored = MIN ((int) -TYPE_PRECISION (type), -2);
+ }
+ op = tree_output_constant_def (fold_convert (type, op));
+ return build_fold_addr_expr (op);
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Helper function, create a loop before the current location,
+ start with sizetype INIT value from the preheader edge. Return
+ a PHI result and set *IDX_NEXT to SSA_NAME it creates and uses
+ from the latch edge. */
+
+tree
+bitint_large_huge::create_loop (tree init, tree *idx_next)
+{
+ if (!gsi_end_p (m_gsi))
+ gsi_prev (&m_gsi);
+ else
+ m_gsi = gsi_last_bb (gsi_bb (m_gsi));
+ edge e1 = split_block (gsi_bb (m_gsi), gsi_stmt (m_gsi));
+ edge e2 = split_block (e1->dest, (gimple *) NULL);
+ edge e3 = make_edge (e1->dest, e1->dest, EDGE_TRUE_VALUE);
+ e3->probability = profile_probability::very_unlikely ();
+ e2->flags = EDGE_FALSE_VALUE;
+ e2->probability = e3->probability.invert ();
+ tree idx = make_ssa_name (sizetype);
+ gphi *phi = create_phi_node (idx, e1->dest);
+ add_phi_arg (phi, init, e1, UNKNOWN_LOCATION);
+ *idx_next = make_ssa_name (sizetype);
+ add_phi_arg (phi, *idx_next, e3, UNKNOWN_LOCATION);
+ m_gsi = gsi_after_labels (e1->dest);
+ m_bb = e1->dest;
+ m_preheader_bb = e1->src;
+ class loop *loop = alloc_loop ();
+ loop->header = e1->dest;
+ add_loop (loop, e1->src->loop_father);
+ return idx;
+}
+
+/* Lower large/huge _BitInt statement mergeable or similar STMT which can be
+ lowered using iteration from the least significant limb up to the most
+ significant limb. For large _BitInt it is emitted as straight line code
+ before current location, for huge _BitInt as a loop handling two limbs
+ at once, followed by handling up to limbs in straight line code (at most
+ one full and one partial limb). It can also handle EQ_EXPR/NE_EXPR
+ comparisons, in that case CMP_CODE should be the comparison code and
+ CMP_OP1/CMP_OP2 the comparison operands. */
+
+tree
+bitint_large_huge::lower_mergeable_stmt (gimple *stmt, tree_code &cmp_code,
+ tree cmp_op1, tree cmp_op2)
+{
+ bool eq_p = cmp_code != ERROR_MARK;
+ tree type;
+ if (eq_p)
+ type = TREE_TYPE (cmp_op1);
+ else
+ type = TREE_TYPE (gimple_assign_lhs (stmt));
+ gcc_assert (TREE_CODE (type) == BITINT_TYPE);
+ bitint_prec_kind kind = bitint_precision_kind (type);
+ gcc_assert (kind >= bitint_prec_large);
+ gimple *g;
+ tree lhs = gimple_get_lhs (stmt);
+ tree rhs1, lhs_type = lhs ? TREE_TYPE (lhs) : NULL_TREE;
+ if (lhs
+ && TREE_CODE (lhs) == SSA_NAME
+ && TREE_CODE (TREE_TYPE (lhs)) == BITINT_TYPE
+ && bitint_precision_kind (TREE_TYPE (lhs)) >= bitint_prec_large)
+ {
+ int p = var_to_partition (m_map, lhs);
+ gcc_assert (m_vars[p] != NULL_TREE);
+ m_lhs = lhs = m_vars[p];
+ }
+ unsigned cnt, rem = 0, end = 0, prec = TYPE_PRECISION (type);
+ bool sext = false;
+ tree ext = NULL_TREE, store_operand = NULL_TREE;
+ bool eh = false;
+ basic_block eh_pad = NULL;
+ tree nlhs = NULL_TREE;
+ unsigned HOST_WIDE_INT bo_idx = 0;
+ unsigned HOST_WIDE_INT bo_bit = 0;
+ tree bf_cur = NULL_TREE, bf_next = NULL_TREE;
+ if (gimple_store_p (stmt))
+ {
+ store_operand = gimple_assign_rhs1 (stmt);
+ eh = stmt_ends_bb_p (stmt);
+ if (eh)
+ {
+ edge e;
+ edge_iterator ei;
+ basic_block bb = gimple_bb (stmt);
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (e->flags & EDGE_EH)
+ {
+ eh_pad = e->dest;
+ break;
+ }
+ }
+ if (TREE_CODE (lhs) == COMPONENT_REF
+ && DECL_BIT_FIELD_TYPE (TREE_OPERAND (lhs, 1)))
+ {
+ tree fld = TREE_OPERAND (lhs, 1);
+ gcc_assert (tree_fits_uhwi_p (DECL_FIELD_BIT_OFFSET (fld)));
+ tree repr = DECL_BIT_FIELD_REPRESENTATIVE (fld);
+ poly_int64 bitoffset;
+ poly_uint64 field_offset, repr_offset;
+ if ((tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld)) % BITS_PER_UNIT) == 0)
+ nlhs = lhs;
+ else
+ {
+ bool var_field_off = false;
+ if (poly_int_tree_p (DECL_FIELD_OFFSET (fld), &field_offset)
+ && poly_int_tree_p (DECL_FIELD_OFFSET (repr), &repr_offset))
+ bitoffset = (field_offset - repr_offset) * BITS_PER_UNIT;
+ else
+ {
+ bitoffset = 0;
+ var_field_off = true;
+ }
+ bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld))
+ - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
+ nlhs = build3 (COMPONENT_REF, TREE_TYPE (repr),
+ TREE_OPERAND (lhs, 0), repr,
+ var_field_off
+ ? TREE_OPERAND (lhs, 2) : NULL_TREE);
+ HOST_WIDE_INT bo = bitoffset.to_constant ();
+ bo_idx = (unsigned HOST_WIDE_INT) bo / limb_prec;
+ bo_bit = (unsigned HOST_WIDE_INT) bo % limb_prec;
+ }
+ }
+ }
+ if ((store_operand
+ && TREE_CODE (store_operand) == SSA_NAME
+ && (m_names == NULL
+ || !bitmap_bit_p (m_names, SSA_NAME_VERSION (store_operand)))
+ && gimple_assign_cast_p (SSA_NAME_DEF_STMT (store_operand)))
+ || gimple_assign_cast_p (stmt))
+ {
+ rhs1 = gimple_assign_rhs1 (store_operand
+ ? SSA_NAME_DEF_STMT (store_operand)
+ : stmt);
+ /* Optimize mergeable ops ending with widening cast to _BitInt
+ (or followed by store). We can lower just the limbs of the
+ cast operand and widen afterwards. */
+ if (TREE_CODE (rhs1) == SSA_NAME
+ && (m_names == NULL
+ || !bitmap_bit_p (m_names, SSA_NAME_VERSION (rhs1)))
+ && TREE_CODE (TREE_TYPE (rhs1)) == BITINT_TYPE
+ && bitint_precision_kind (TREE_TYPE (rhs1)) >= bitint_prec_large
+ && (CEIL ((unsigned) TYPE_PRECISION (TREE_TYPE (rhs1)),
+ limb_prec) < CEIL (prec, limb_prec)
+ || (kind == bitint_prec_huge
+ && TYPE_PRECISION (TREE_TYPE (rhs1)) < prec)))
+ {
+ store_operand = rhs1;
+ prec = TYPE_PRECISION (TREE_TYPE (rhs1));
+ kind = bitint_precision_kind (TREE_TYPE (rhs1));
+ if (!TYPE_UNSIGNED (TREE_TYPE (rhs1)))
+ sext = true;
+ }
+ }
+ tree idx = NULL_TREE, idx_first = NULL_TREE, idx_next = NULL_TREE;
+ if (kind == bitint_prec_large)
+ cnt = CEIL (prec, limb_prec);
+ else
+ {
+ rem = (prec % (2 * limb_prec));
+ end = (prec - rem) / limb_prec;
+ cnt = 2 + CEIL (rem, limb_prec);
+ idx = idx_first = create_loop (size_zero_node, &idx_next);
+ }
+
+ basic_block edge_bb = NULL;
+ if (eq_p)
+ {
+ gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
+ gsi_prev (&gsi);
+ edge e = split_block (gsi_bb (gsi), gsi_stmt (gsi));
+ edge_bb = e->src;
+ if (kind == bitint_prec_large)
+ {
+ m_gsi = gsi_last_bb (edge_bb);
+ if (!gsi_end_p (m_gsi))
+ gsi_next (&m_gsi);
+ }
+ }
+ else
+ m_after_stmt = stmt;
+ if (kind != bitint_prec_large)
+ m_upwards_2limb = end;
+ m_upwards = true;
+
+ bool separate_ext
+ = (prec != (unsigned) TYPE_PRECISION (type)
+ && (CEIL ((unsigned) TYPE_PRECISION (type), limb_prec)
+ > CEIL (prec, limb_prec)));
+
+ for (unsigned i = 0; i < cnt; i++)
+ {
+ m_data_cnt = 0;
+ if (kind == bitint_prec_large)
+ idx = size_int (i);
+ else if (i >= 2)
+ idx = size_int (end + (i > 2));
+ if (eq_p)
+ {
+ rhs1 = handle_operand (cmp_op1, idx);
+ tree rhs2 = handle_operand (cmp_op2, idx);
+ g = gimple_build_cond (NE_EXPR, rhs1, rhs2, NULL_TREE, NULL_TREE);
+ insert_before (g);
+ edge e1 = split_block (gsi_bb (m_gsi), g);
+ e1->flags = EDGE_FALSE_VALUE;
+ edge e2 = make_edge (e1->src, gimple_bb (stmt), EDGE_TRUE_VALUE);
+ e1->probability = profile_probability::unlikely ();
+ e2->probability = e1->probability.invert ();
+ if (i == 0)
+ set_immediate_dominator (CDI_DOMINATORS, e2->dest, e2->src);
+ m_gsi = gsi_after_labels (e1->dest);
+ }
+ else
+ {
+ if (store_operand)
+ rhs1 = handle_operand (store_operand, idx);
+ else
+ rhs1 = handle_stmt (stmt, idx);
+ if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (rhs1)))
+ rhs1 = add_cast (m_limb_type, rhs1);
+ if (sext && i == cnt - 1)
+ ext = rhs1;
+ tree nidx = idx;
+ if (bo_idx)
+ {
+ if (tree_fits_uhwi_p (idx))
+ nidx = size_int (tree_to_uhwi (idx) + bo_idx);
+ else
+ {
+ nidx = make_ssa_name (sizetype);
+ g = gimple_build_assign (nidx, PLUS_EXPR, idx,
+ size_int (bo_idx));
+ insert_before (g);
+ }
+ }
+ bool done = false;
+ basic_block new_bb = NULL;
+ /* Handle stores into bit-fields. */
+ if (bo_bit)
+ {
+ if (i == 0)
+ {
+ edge e2 = NULL;
+ if (kind != bitint_prec_large)
+ {
+ prepare_data_in_out (build_zero_cst (m_limb_type),
+ idx, &bf_next);
+ bf_next = m_data.pop ();
+ bf_cur = m_data.pop ();
+ g = gimple_build_cond (EQ_EXPR, idx, size_zero_node,
+ NULL_TREE, NULL_TREE);
+ edge edge_true;
+ if_then_else (g, profile_probability::unlikely (),
+ edge_true, e2);
+ new_bb = e2->dest;
+ }
+ tree ftype
+ = build_nonstandard_integer_type (limb_prec - bo_bit, 1);
+ tree bfr = build3 (BIT_FIELD_REF, ftype, unshare_expr (nlhs),
+ bitsize_int (limb_prec - bo_bit),
+ bitsize_int (bo_idx * limb_prec + bo_bit));
+ tree t = add_cast (ftype, rhs1);
+ g = gimple_build_assign (bfr, t);
+ insert_before (g);
+ if (eh)
+ {
+ maybe_duplicate_eh_stmt (g, stmt);
+ if (eh_pad)
+ {
+ edge e = split_block (gsi_bb (m_gsi), g);
+ m_gsi = gsi_after_labels (e->dest);
+ make_edge (e->src, eh_pad, EDGE_EH)->probability
+ = profile_probability::very_unlikely ();
+ }
+ }
+ if (kind == bitint_prec_large)
+ {
+ bf_cur = rhs1;
+ done = true;
+ }
+ else if (e2)
+ m_gsi = gsi_after_labels (e2->src);
+ }
+ if (!done)
+ {
+ tree t1 = make_ssa_name (m_limb_type);
+ tree t2 = make_ssa_name (m_limb_type);
+ tree t3 = make_ssa_name (m_limb_type);
+ g = gimple_build_assign (t1, RSHIFT_EXPR, bf_cur,
+ build_int_cst (unsigned_type_node,
+ limb_prec - bo_bit));
+ insert_before (g);
+ g = gimple_build_assign (t2, LSHIFT_EXPR, rhs1,
+ build_int_cst (unsigned_type_node,
+ bo_bit));
+ insert_before (g);
+ bf_cur = rhs1;
+ g = gimple_build_assign (t3, BIT_IOR_EXPR, t1, t2);
+ insert_before (g);
+ rhs1 = t3;
+ if (bf_next && i == 1)
+ {
+ g = gimple_build_assign (bf_next, bf_cur);
+ insert_before (g);
+ }
+ }
+ }
+ if (!done)
+ {
+ /* Handle bit-field access to partial last limb if needed. */
+ if (nlhs
+ && i == cnt - 1
+ && !separate_ext
+ && tree_fits_uhwi_p (idx))
+ {
+ unsigned int tprec = TYPE_PRECISION (type);
+ unsigned int rprec = tprec % limb_prec;
+ if (rprec + bo_bit < (unsigned) limb_prec)
+ {
+ tree ftype
+ = build_nonstandard_integer_type (rprec + bo_bit, 1);
+ tree bfr = build3 (BIT_FIELD_REF, ftype,
+ unshare_expr (nlhs),
+ bitsize_int (rprec + bo_bit),
+ bitsize_int ((bo_idx
+ + tprec / limb_prec)
+ * limb_prec));
+ tree t = add_cast (ftype, rhs1);
+ g = gimple_build_assign (bfr, t);
+ done = true;
+ bf_cur = NULL_TREE;
+ }
+ else if (rprec + bo_bit == (unsigned) limb_prec)
+ bf_cur = NULL_TREE;
+ }
+ /* Otherwise, stores to any other lhs. */
+ if (!done)
+ {
+ tree l = limb_access (lhs_type, nlhs ? nlhs : lhs,
+ nidx, true);
+ g = gimple_build_assign (l, rhs1);
+ }
+ insert_before (g);
+ if (eh)
+ {
+ maybe_duplicate_eh_stmt (g, stmt);
+ if (eh_pad)
+ {
+ edge e = split_block (gsi_bb (m_gsi), g);
+ m_gsi = gsi_after_labels (e->dest);
+ make_edge (e->src, eh_pad, EDGE_EH)->probability
+ = profile_probability::very_unlikely ();
+ }
+ }
+ if (new_bb)
+ m_gsi = gsi_after_labels (new_bb);
+ }
+ }
+ m_first = false;
+ if (kind == bitint_prec_huge && i <= 1)
+ {
+ if (i == 0)
+ {
+ idx = make_ssa_name (sizetype);
+ g = gimple_build_assign (idx, PLUS_EXPR, idx_first,
+ size_one_node);
+ insert_before (g);
+ }
+ else
+ {
+ g = gimple_build_assign (idx_next, PLUS_EXPR, idx_first,
+ size_int (2));
+ insert_before (g);
+ g = gimple_build_cond (NE_EXPR, idx_next, size_int (end),
+ NULL_TREE, NULL_TREE);
+ insert_before (g);
+ if (eq_p)
+ m_gsi = gsi_after_labels (edge_bb);
+ else
+ m_gsi = gsi_for_stmt (stmt);
+ }
+ }
+ }
+
+ if (separate_ext)
+ {
+ if (sext)
+ {
+ ext = add_cast (signed_type_for (m_limb_type), ext);
+ tree lpm1 = build_int_cst (unsigned_type_node,
+ limb_prec - 1);
+ tree n = make_ssa_name (TREE_TYPE (ext));
+ g = gimple_build_assign (n, RSHIFT_EXPR, ext, lpm1);
+ insert_before (g);
+ ext = add_cast (m_limb_type, n);
+ }
+ else
+ ext = build_zero_cst (m_limb_type);
+ kind = bitint_precision_kind (type);
+ unsigned start = CEIL (prec, limb_prec);
+ prec = TYPE_PRECISION (type);
+ idx = idx_first = idx_next = NULL_TREE;
+ if (prec <= (start + 2 + (bo_bit != 0)) * limb_prec)
+ kind = bitint_prec_large;
+ if (kind == bitint_prec_large)
+ cnt = CEIL (prec, limb_prec) - start;
+ else
+ {
+ rem = prec % limb_prec;
+ end = (prec - rem) / limb_prec;
+ cnt = (bo_bit != 0) + 1 + (rem != 0);
+ }
+ for (unsigned i = 0; i < cnt; i++)
+ {
+ if (kind == bitint_prec_large || (i == 0 && bo_bit != 0))
+ idx = size_int (start + i);
+ else if (i == cnt - 1)
+ idx = size_int (end);
+ else if (i == (bo_bit != 0))
+ idx = create_loop (size_int (start + i), &idx_next);
+ rhs1 = ext;
+ if (bf_cur != NULL_TREE && bf_cur != ext)
+ {
+ tree t1 = make_ssa_name (m_limb_type);
+ g = gimple_build_assign (t1, RSHIFT_EXPR, bf_cur,
+ build_int_cst (unsigned_type_node,
+ limb_prec - bo_bit));
+ insert_before (g);
+ if (integer_zerop (ext))
+ rhs1 = t1;
+ else
+ {
+ tree t2 = make_ssa_name (m_limb_type);
+ rhs1 = make_ssa_name (m_limb_type);
+ g = gimple_build_assign (t2, LSHIFT_EXPR, ext,
+ build_int_cst (unsigned_type_node,
+ bo_bit));
+ insert_before (g);
+ g = gimple_build_assign (rhs1, BIT_IOR_EXPR, t1, t2);
+ insert_before (g);
+ }
+ bf_cur = ext;
+ }
+ tree nidx = idx;
+ if (bo_idx)
+ {
+ if (tree_fits_uhwi_p (idx))
+ nidx = size_int (tree_to_uhwi (idx) + bo_idx);
+ else
+ {
+ nidx = make_ssa_name (sizetype);
+ g = gimple_build_assign (nidx, PLUS_EXPR, idx,
+ size_int (bo_idx));
+ insert_before (g);
+ }
+ }
+ bool done = false;
+ /* Handle bit-field access to partial last limb if needed. */
+ if (nlhs && i == cnt - 1)
+ {
+ unsigned int tprec = TYPE_PRECISION (type);
+ unsigned int rprec = tprec % limb_prec;
+ if (rprec + bo_bit < (unsigned) limb_prec)
+ {
+ tree ftype
+ = build_nonstandard_integer_type (rprec + bo_bit, 1);
+ tree bfr = build3 (BIT_FIELD_REF, ftype,
+ unshare_expr (nlhs),
+ bitsize_int (rprec + bo_bit),
+ bitsize_int ((bo_idx + tprec / limb_prec)
+ * limb_prec));
+ tree t = add_cast (ftype, rhs1);
+ g = gimple_build_assign (bfr, t);
+ done = true;
+ bf_cur = NULL_TREE;
+ }
+ else if (rprec + bo_bit == (unsigned) limb_prec)
+ bf_cur = NULL_TREE;
+ }
+ /* Otherwise, stores to any other lhs. */
+ if (!done)
+ {
+ tree l = limb_access (lhs_type, nlhs ? nlhs : lhs, nidx, true);
+ g = gimple_build_assign (l, rhs1);
+ }
+ insert_before (g);
+ if (eh)
+ {
+ maybe_duplicate_eh_stmt (g, stmt);
+ if (eh_pad)
+ {
+ edge e = split_block (gsi_bb (m_gsi), g);
+ m_gsi = gsi_after_labels (e->dest);
+ make_edge (e->src, eh_pad, EDGE_EH)->probability
+ = profile_probability::very_unlikely ();
+ }
+ }
+ if (kind == bitint_prec_huge && i == (bo_bit != 0))
+ {
+ g = gimple_build_assign (idx_next, PLUS_EXPR, idx,
+ size_one_node);
+ insert_before (g);
+ g = gimple_build_cond (NE_EXPR, idx_next, size_int (end),
+ NULL_TREE, NULL_TREE);
+ insert_before (g);
+ m_gsi = gsi_for_stmt (stmt);
+ }
+ }
+ }
+ if (bf_cur != NULL_TREE)
+ {
+ unsigned int tprec = TYPE_PRECISION (type);
+ unsigned int rprec = tprec % limb_prec;
+ tree ftype = build_nonstandard_integer_type (rprec + bo_bit, 1);
+ tree bfr = build3 (BIT_FIELD_REF, ftype, unshare_expr (nlhs),
+ bitsize_int (rprec + bo_bit),
+ bitsize_int ((bo_idx + tprec / limb_prec)
+ * limb_prec));
+ rhs1 = bf_cur;
+ if (bf_cur != ext)
+ {
+ rhs1 = make_ssa_name (TREE_TYPE (rhs1));
+ g = gimple_build_assign (rhs1, RSHIFT_EXPR, bf_cur,
+ build_int_cst (unsigned_type_node,
+ limb_prec - bo_bit));
+ insert_before (g);
+ }
+ rhs1 = add_cast (ftype, rhs1);
+ g = gimple_build_assign (bfr, rhs1);
+ insert_before (g);
+ if (eh)
+ {
+ maybe_duplicate_eh_stmt (g, stmt);
+ if (eh_pad)
+ {
+ edge e = split_block (gsi_bb (m_gsi), g);
+ m_gsi = gsi_after_labels (e->dest);
+ make_edge (e->src, eh_pad, EDGE_EH)->probability
+ = profile_probability::very_unlikely ();
+ }
+ }
+ }
+
+ if (gimple_store_p (stmt))
+ {
+ unlink_stmt_vdef (stmt);
+ release_ssa_name (gimple_vdef (stmt));
+ gsi_remove (&m_gsi, true);
+ }
+ if (eq_p)
+ {
+ lhs = make_ssa_name (boolean_type_node);
+ basic_block bb = gimple_bb (stmt);
+ gphi *phi = create_phi_node (lhs, bb);
+ edge e = find_edge (gsi_bb (m_gsi), bb);
+ unsigned int n = EDGE_COUNT (bb->preds);
+ for (unsigned int i = 0; i < n; i++)
+ {
+ edge e2 = EDGE_PRED (bb, i);
+ add_phi_arg (phi, e == e2 ? boolean_true_node : boolean_false_node,
+ e2, UNKNOWN_LOCATION);
+ }
+ cmp_code = cmp_code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
+ return lhs;
+ }
+ else
+ return NULL_TREE;
+}
+
+/* Handle a large/huge _BitInt comparison statement STMT other than
+ EQ_EXPR/NE_EXPR. CMP_CODE, CMP_OP1 and CMP_OP2 meaning is like in
+ lower_mergeable_stmt. The {GT,GE,LT,LE}_EXPR comparisons are
+ lowered by iteration from the most significant limb downwards to
+ the least significant one, for large _BitInt in straight line code,
+ otherwise with most significant limb handled in
+ straight line code followed by a loop handling one limb at a time.
+ Comparisons with unsigned huge _BitInt with precisions which are
+ multiples of limb precision can use just the loop and don't need to
+ handle most significant limb before the loop. The loop or straight
+ line code jumps to final basic block if a particular pair of limbs
+ is not equal. */
+
+tree
+bitint_large_huge::lower_comparison_stmt (gimple *stmt, tree_code &cmp_code,
+ tree cmp_op1, tree cmp_op2)
+{
+ tree type = TREE_TYPE (cmp_op1);
+ gcc_assert (TREE_CODE (type) == BITINT_TYPE);
+ bitint_prec_kind kind = bitint_precision_kind (type);
+ gcc_assert (kind >= bitint_prec_large);
+ gimple *g;
+ if (!TYPE_UNSIGNED (type)
+ && integer_zerop (cmp_op2)
+ && (cmp_code == GE_EXPR || cmp_code == LT_EXPR))
+ {
+ unsigned end = CEIL ((unsigned) TYPE_PRECISION (type), limb_prec) - 1;
+ tree idx = size_int (end);
+ m_data_cnt = 0;
+ tree rhs1 = handle_operand (cmp_op1, idx);
+ if (TYPE_UNSIGNED (TREE_TYPE (rhs1)))
+ {
+ tree stype = signed_type_for (TREE_TYPE (rhs1));
+ rhs1 = add_cast (stype, rhs1);
+ }
+ tree lhs = make_ssa_name (boolean_type_node);
+ g = gimple_build_assign (lhs, cmp_code, rhs1,
+ build_zero_cst (TREE_TYPE (rhs1)));
+ insert_before (g);
+ cmp_code = NE_EXPR;
+ return lhs;
+ }
+
+ unsigned cnt, rem = 0, end = 0;
+ tree idx = NULL_TREE, idx_next = NULL_TREE;
+ if (kind == bitint_prec_large)
+ cnt = CEIL ((unsigned) TYPE_PRECISION (type), limb_prec);
+ else
+ {
+ rem = ((unsigned) TYPE_PRECISION (type) % limb_prec);
+ if (rem == 0 && !TYPE_UNSIGNED (type))
+ rem = limb_prec;
+ end = ((unsigned) TYPE_PRECISION (type) - rem) / limb_prec;
+ cnt = 1 + (rem != 0);
+ }
+
+ basic_block edge_bb = NULL;
+ gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
+ gsi_prev (&gsi);
+ edge e = split_block (gsi_bb (gsi), gsi_stmt (gsi));
+ edge_bb = e->src;
+ m_gsi = gsi_last_bb (edge_bb);
+ if (!gsi_end_p (m_gsi))
+ gsi_next (&m_gsi);
+
+ edge *edges = XALLOCAVEC (edge, cnt * 2);
+ for (unsigned i = 0; i < cnt; i++)
+ {
+ m_data_cnt = 0;
+ if (kind == bitint_prec_large)
+ idx = size_int (cnt - i - 1);
+ else if (i == cnt - 1)
+ idx = create_loop (size_int (end - 1), &idx_next);
+ else
+ idx = size_int (end);
+ tree rhs1 = handle_operand (cmp_op1, idx);
+ tree rhs2 = handle_operand (cmp_op2, idx);
+ if (i == 0
+ && !TYPE_UNSIGNED (type)
+ && TYPE_UNSIGNED (TREE_TYPE (rhs1)))
+ {
+ tree stype = signed_type_for (TREE_TYPE (rhs1));
+ rhs1 = add_cast (stype, rhs1);
+ rhs2 = add_cast (stype, rhs2);
+ }
+ g = gimple_build_cond (GT_EXPR, rhs1, rhs2, NULL_TREE, NULL_TREE);
+ insert_before (g);
+ edge e1 = split_block (gsi_bb (m_gsi), g);
+ e1->flags = EDGE_FALSE_VALUE;
+ edge e2 = make_edge (e1->src, gimple_bb (stmt), EDGE_TRUE_VALUE);
+ e1->probability = profile_probability::likely ();
+ e2->probability = e1->probability.invert ();
+ if (i == 0)
+ set_immediate_dominator (CDI_DOMINATORS, e2->dest, e2->src);
+ m_gsi = gsi_after_labels (e1->dest);
+ edges[2 * i] = e2;
+ g = gimple_build_cond (LT_EXPR, rhs1, rhs2, NULL_TREE, NULL_TREE);
+ insert_before (g);
+ e1 = split_block (gsi_bb (m_gsi), g);
+ e1->flags = EDGE_FALSE_VALUE;
+ e2 = make_edge (e1->src, gimple_bb (stmt), EDGE_TRUE_VALUE);
+ e1->probability = profile_probability::unlikely ();
+ e2->probability = e1->probability.invert ();
+ m_gsi = gsi_after_labels (e1->dest);
+ edges[2 * i + 1] = e2;
+ m_first = false;
+ if (kind == bitint_prec_huge && i == cnt - 1)
+ {
+ g = gimple_build_assign (idx_next, PLUS_EXPR, idx, size_int (-1));
+ insert_before (g);
+ g = gimple_build_cond (NE_EXPR, idx, size_zero_node,
+ NULL_TREE, NULL_TREE);
+ insert_before (g);
+ edge true_edge, false_edge;
+ extract_true_false_edges_from_block (gsi_bb (m_gsi),
+ &true_edge, &false_edge);
+ m_gsi = gsi_after_labels (false_edge->dest);
+ }
+ }
+
+ tree lhs = make_ssa_name (boolean_type_node);
+ basic_block bb = gimple_bb (stmt);
+ gphi *phi = create_phi_node (lhs, bb);
+ for (unsigned int i = 0; i < cnt * 2; i++)
+ {
+ tree val = ((cmp_code == GT_EXPR || cmp_code == GE_EXPR)
+ ^ (i & 1)) ? boolean_true_node : boolean_false_node;
+ add_phi_arg (phi, val, edges[i], UNKNOWN_LOCATION);
+ }
+ add_phi_arg (phi, (cmp_code == GE_EXPR || cmp_code == LE_EXPR)
+ ? boolean_true_node : boolean_false_node,
+ find_edge (gsi_bb (m_gsi), bb), UNKNOWN_LOCATION);
+ cmp_code = NE_EXPR;
+ return lhs;
+}
+
+/* Lower large/huge _BitInt left and right shift except for left
+ shift by < limb_prec constant. */
+
+void
+bitint_large_huge::lower_shift_stmt (tree obj, gimple *stmt)
+{
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ tree lhs = gimple_assign_lhs (stmt);
+ tree_code rhs_code = gimple_assign_rhs_code (stmt);
+ tree type = TREE_TYPE (rhs1);
+ gimple *final_stmt = gsi_stmt (m_gsi);
+ gcc_assert (TREE_CODE (type) == BITINT_TYPE
+ && bitint_precision_kind (type) >= bitint_prec_large);
+ int prec = TYPE_PRECISION (type);
+ tree n = gimple_assign_rhs2 (stmt), n1, n2, n3, n4;
+ gimple *g;
+ if (obj == NULL_TREE)
+ {
+ int part = var_to_partition (m_map, lhs);
+ gcc_assert (m_vars[part] != NULL_TREE);
+ obj = m_vars[part];
+ }
+ /* Preparation code common for both left and right shifts.
+ unsigned n1 = n % limb_prec;
+ size_t n2 = n / limb_prec;
+ size_t n3 = n1 != 0;
+ unsigned n4 = (limb_prec - n1) % limb_prec;
+ (for power of 2 limb_prec n4 can be -n1 & (limb_prec)). */
+ if (TREE_CODE (n) == INTEGER_CST)
+ {
+ tree lp = build_int_cst (TREE_TYPE (n), limb_prec);
+ n1 = int_const_binop (TRUNC_MOD_EXPR, n, lp);
+ n2 = fold_convert (sizetype, int_const_binop (TRUNC_DIV_EXPR, n, lp));
+ n3 = size_int (!integer_zerop (n1));
+ n4 = int_const_binop (TRUNC_MOD_EXPR,
+ int_const_binop (MINUS_EXPR, lp, n1), lp);
+ }
+ else
+ {
+ n1 = make_ssa_name (TREE_TYPE (n));
+ n2 = make_ssa_name (sizetype);
+ n3 = make_ssa_name (sizetype);
+ n4 = make_ssa_name (TREE_TYPE (n));
+ if (pow2p_hwi (limb_prec))
+ {
+ tree lpm1 = build_int_cst (TREE_TYPE (n), limb_prec - 1);
+ g = gimple_build_assign (n1, BIT_AND_EXPR, n, lpm1);
+ insert_before (g);
+ g = gimple_build_assign (useless_type_conversion_p (sizetype,
+ TREE_TYPE (n))
+ ? n2 : make_ssa_name (TREE_TYPE (n)),
+ RSHIFT_EXPR, n,
+ build_int_cst (TREE_TYPE (n),
+ exact_log2 (limb_prec)));
+ insert_before (g);
+ if (gimple_assign_lhs (g) != n2)
+ {
+ g = gimple_build_assign (n2, NOP_EXPR, gimple_assign_lhs (g));
+ insert_before (g);
+ }
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (n)),
+ NEGATE_EXPR, n1);
+ insert_before (g);
+ g = gimple_build_assign (n4, BIT_AND_EXPR, gimple_assign_lhs (g),
+ lpm1);
+ insert_before (g);
+ }
+ else
+ {
+ tree lp = build_int_cst (TREE_TYPE (n), limb_prec);
+ g = gimple_build_assign (n1, TRUNC_MOD_EXPR, n, lp);
+ insert_before (g);
+ g = gimple_build_assign (useless_type_conversion_p (sizetype,
+ TREE_TYPE (n))
+ ? n2 : make_ssa_name (TREE_TYPE (n)),
+ TRUNC_DIV_EXPR, n, lp);
+ insert_before (g);
+ if (gimple_assign_lhs (g) != n2)
+ {
+ g = gimple_build_assign (n2, NOP_EXPR, gimple_assign_lhs (g));
+ insert_before (g);
+ }
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (n)),
+ MINUS_EXPR, lp, n1);
+ insert_before (g);
+ g = gimple_build_assign (n4, TRUNC_MOD_EXPR, gimple_assign_lhs (g),
+ lp);
+ insert_before (g);
+ }
+ g = gimple_build_assign (make_ssa_name (boolean_type_node), NE_EXPR, n1,
+ build_zero_cst (TREE_TYPE (n)));
+ insert_before (g);
+ g = gimple_build_assign (n3, NOP_EXPR, gimple_assign_lhs (g));
+ insert_before (g);
+ }
+ tree p = build_int_cst (sizetype,
+ prec / limb_prec - (prec % limb_prec == 0));
+ if (rhs_code == RSHIFT_EXPR)
+ {
+ /* Lower
+ dst = src >> n;
+ as
+ unsigned n1 = n % limb_prec;
+ size_t n2 = n / limb_prec;
+ size_t n3 = n1 != 0;
+ unsigned n4 = (limb_prec - n1) % limb_prec;
+ size_t idx;
+ size_t p = prec / limb_prec - (prec % limb_prec == 0);
+ int signed_p = (typeof (src) -1) < 0;
+ for (idx = n2; idx < ((!signed_p && (prec % limb_prec == 0))
+ ? p : p - n3); ++idx)
+ dst[idx - n2] = (src[idx] >> n1) | (src[idx + n3] << n4);
+ limb_type ext;
+ if (prec % limb_prec == 0)
+ ext = src[p];
+ else if (signed_p)
+ ext = ((signed limb_type) (src[p] << (limb_prec
+ - (prec % limb_prec))))
+ >> (limb_prec - (prec % limb_prec));
+ else
+ ext = src[p] & (((limb_type) 1 << (prec % limb_prec)) - 1);
+ if (!signed_p && (prec % limb_prec == 0))
+ ;
+ else if (idx < prec / 64)
+ {
+ dst[idx - n2] = (src[idx] >> n1) | (ext << n4);
+ ++idx;
+ }
+ idx -= n2;
+ if (signed_p)
+ {
+ dst[idx] = ((signed limb_type) ext) >> n1;
+ ext = ((signed limb_type) ext) >> (limb_prec - 1);
+ }
+ else
+ {
+ dst[idx] = ext >> n1;
+ ext = 0;
+ }
+ for (++idx; idx <= p; ++idx)
+ dst[idx] = ext; */
+ tree pmn3;
+ if (TYPE_UNSIGNED (type) && prec % limb_prec == 0)
+ pmn3 = p;
+ else if (TREE_CODE (n3) == INTEGER_CST)
+ pmn3 = int_const_binop (MINUS_EXPR, p, n3);
+ else
+ {
+ pmn3 = make_ssa_name (sizetype);
+ g = gimple_build_assign (pmn3, MINUS_EXPR, p, n3);
+ insert_before (g);
+ }
+ g = gimple_build_cond (LT_EXPR, n2, pmn3, NULL_TREE, NULL_TREE);
+ edge edge_true, edge_false;
+ if_then (g, profile_probability::likely (), edge_true, edge_false);
+ tree idx_next;
+ tree idx = create_loop (n2, &idx_next);
+ tree idxmn2 = make_ssa_name (sizetype);
+ tree idxpn3 = make_ssa_name (sizetype);
+ g = gimple_build_assign (idxmn2, MINUS_EXPR, idx, n2);
+ insert_before (g);
+ g = gimple_build_assign (idxpn3, PLUS_EXPR, idx, n3);
+ insert_before (g);
+ m_data_cnt = 0;
+ tree t1 = handle_operand (rhs1, idx);
+ m_first = false;
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ RSHIFT_EXPR, t1, n1);
+ insert_before (g);
+ t1 = gimple_assign_lhs (g);
+ if (!integer_zerop (n3))
+ {
+ m_data_cnt = 0;
+ tree t2 = handle_operand (rhs1, idxpn3);
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ LSHIFT_EXPR, t2, n4);
+ insert_before (g);
+ t2 = gimple_assign_lhs (g);
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ BIT_IOR_EXPR, t1, t2);
+ insert_before (g);
+ t1 = gimple_assign_lhs (g);
+ }
+ tree l = limb_access (TREE_TYPE (lhs), obj, idxmn2, true);
+ g = gimple_build_assign (l, t1);
+ insert_before (g);
+ g = gimple_build_assign (idx_next, PLUS_EXPR, idx, size_one_node);
+ insert_before (g);
+ g = gimple_build_cond (LT_EXPR, idx_next, pmn3, NULL_TREE, NULL_TREE);
+ insert_before (g);
+ idx = make_ssa_name (sizetype);
+ m_gsi = gsi_for_stmt (final_stmt);
+ gphi *phi = create_phi_node (idx, gsi_bb (m_gsi));
+ edge_false = find_edge (edge_false->src, gsi_bb (m_gsi));
+ edge_true = EDGE_PRED (gsi_bb (m_gsi),
+ EDGE_PRED (gsi_bb (m_gsi), 0) == edge_false);
+ add_phi_arg (phi, n2, edge_false, UNKNOWN_LOCATION);
+ add_phi_arg (phi, idx_next, edge_true, UNKNOWN_LOCATION);
+ m_data_cnt = 0;
+ tree ms = handle_operand (rhs1, p);
+ tree ext = ms;
+ if (!types_compatible_p (TREE_TYPE (ms), m_limb_type))
+ ext = add_cast (m_limb_type, ms);
+ if (!(TYPE_UNSIGNED (type) && prec % limb_prec == 0)
+ && !integer_zerop (n3))
+ {
+ g = gimple_build_cond (LT_EXPR, idx, p, NULL_TREE, NULL_TREE);
+ if_then (g, profile_probability::likely (), edge_true, edge_false);
+ m_data_cnt = 0;
+ t1 = handle_operand (rhs1, idx);
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ RSHIFT_EXPR, t1, n1);
+ insert_before (g);
+ t1 = gimple_assign_lhs (g);
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ LSHIFT_EXPR, ext, n4);
+ insert_before (g);
+ tree t2 = gimple_assign_lhs (g);
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ BIT_IOR_EXPR, t1, t2);
+ insert_before (g);
+ t1 = gimple_assign_lhs (g);
+ idxmn2 = make_ssa_name (sizetype);
+ g = gimple_build_assign (idxmn2, MINUS_EXPR, idx, n2);
+ insert_before (g);
+ l = limb_access (TREE_TYPE (lhs), obj, idxmn2, true);
+ g = gimple_build_assign (l, t1);
+ insert_before (g);
+ idx_next = make_ssa_name (sizetype);
+ g = gimple_build_assign (idx_next, PLUS_EXPR, idx, size_one_node);
+ insert_before (g);
+ m_gsi = gsi_for_stmt (final_stmt);
+ tree nidx = make_ssa_name (sizetype);
+ phi = create_phi_node (nidx, gsi_bb (m_gsi));
+ edge_false = find_edge (edge_false->src, gsi_bb (m_gsi));
+ edge_true = EDGE_PRED (gsi_bb (m_gsi),
+ EDGE_PRED (gsi_bb (m_gsi), 0) == edge_false);
+ add_phi_arg (phi, idx, edge_false, UNKNOWN_LOCATION);
+ add_phi_arg (phi, idx_next, edge_true, UNKNOWN_LOCATION);
+ idx = nidx;
+ }
+ g = gimple_build_assign (make_ssa_name (sizetype), MINUS_EXPR, idx, n2);
+ insert_before (g);
+ idx = gimple_assign_lhs (g);
+ tree sext = ext;
+ if (!TYPE_UNSIGNED (type))
+ sext = add_cast (signed_type_for (m_limb_type), ext);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (sext)),
+ RSHIFT_EXPR, sext, n1);
+ insert_before (g);
+ t1 = gimple_assign_lhs (g);
+ if (!TYPE_UNSIGNED (type))
+ {
+ t1 = add_cast (m_limb_type, t1);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (sext)),
+ RSHIFT_EXPR, sext,
+ build_int_cst (TREE_TYPE (n),
+ limb_prec - 1));
+ insert_before (g);
+ ext = add_cast (m_limb_type, gimple_assign_lhs (g));
+ }
+ else
+ ext = build_zero_cst (m_limb_type);
+ l = limb_access (TREE_TYPE (lhs), obj, idx, true);
+ g = gimple_build_assign (l, t1);
+ insert_before (g);
+ g = gimple_build_assign (make_ssa_name (sizetype), PLUS_EXPR, idx,
+ size_one_node);
+ insert_before (g);
+ idx = gimple_assign_lhs (g);
+ g = gimple_build_cond (LE_EXPR, idx, p, NULL_TREE, NULL_TREE);
+ if_then (g, profile_probability::likely (), edge_true, edge_false);
+ idx = create_loop (idx, &idx_next);
+ l = limb_access (TREE_TYPE (lhs), obj, idx, true);
+ g = gimple_build_assign (l, ext);
+ insert_before (g);
+ g = gimple_build_assign (idx_next, PLUS_EXPR, idx, size_one_node);
+ insert_before (g);
+ g = gimple_build_cond (LE_EXPR, idx_next, p, NULL_TREE, NULL_TREE);
+ insert_before (g);
+ }
+ else
+ {
+ /* Lower
+ dst = src << n;
+ as
+ unsigned n1 = n % limb_prec;
+ size_t n2 = n / limb_prec;
+ size_t n3 = n1 != 0;
+ unsigned n4 = (limb_prec - n1) % limb_prec;
+ size_t idx;
+ size_t p = prec / limb_prec - (prec % limb_prec == 0);
+ for (idx = p; (ssize_t) idx >= (ssize_t) (n2 + n3); --idx)
+ dst[idx] = (src[idx - n2] << n1) | (src[idx - n2 - n3] >> n4);
+ if (n1)
+ {
+ dst[idx] = src[idx - n2] << n1;
+ --idx;
+ }
+ for (; (ssize_t) idx >= 0; --idx)
+ dst[idx] = 0; */
+ tree n2pn3;
+ if (TREE_CODE (n2) == INTEGER_CST && TREE_CODE (n3) == INTEGER_CST)
+ n2pn3 = int_const_binop (PLUS_EXPR, n2, n3);
+ else
+ {
+ n2pn3 = make_ssa_name (sizetype);
+ g = gimple_build_assign (n2pn3, PLUS_EXPR, n2, n3);
+ insert_before (g);
+ }
+ /* For LSHIFT_EXPR, we can use handle_operand with non-INTEGER_CST
+ idx even to access the most significant partial limb. */
+ m_var_msb = true;
+ if (integer_zerop (n3))
+ /* For n3 == 0 p >= n2 + n3 is always true for all valid shift
+ counts. Emit if (true) condition that can be optimized later. */
+ g = gimple_build_cond (NE_EXPR, boolean_true_node, boolean_false_node,
+ NULL_TREE, NULL_TREE);
+ else
+ g = gimple_build_cond (LE_EXPR, n2pn3, p, NULL_TREE, NULL_TREE);
+ edge edge_true, edge_false;
+ if_then (g, profile_probability::likely (), edge_true, edge_false);
+ tree idx_next;
+ tree idx = create_loop (p, &idx_next);
+ tree idxmn2 = make_ssa_name (sizetype);
+ tree idxmn2mn3 = make_ssa_name (sizetype);
+ g = gimple_build_assign (idxmn2, MINUS_EXPR, idx, n2);
+ insert_before (g);
+ g = gimple_build_assign (idxmn2mn3, MINUS_EXPR, idxmn2, n3);
+ insert_before (g);
+ m_data_cnt = 0;
+ tree t1 = handle_operand (rhs1, idxmn2);
+ m_first = false;
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ LSHIFT_EXPR, t1, n1);
+ insert_before (g);
+ t1 = gimple_assign_lhs (g);
+ if (!integer_zerop (n3))
+ {
+ m_data_cnt = 0;
+ tree t2 = handle_operand (rhs1, idxmn2mn3);
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ RSHIFT_EXPR, t2, n4);
+ insert_before (g);
+ t2 = gimple_assign_lhs (g);
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ BIT_IOR_EXPR, t1, t2);
+ insert_before (g);
+ t1 = gimple_assign_lhs (g);
+ }
+ tree l = limb_access (TREE_TYPE (lhs), obj, idx, true);
+ g = gimple_build_assign (l, t1);
+ insert_before (g);
+ g = gimple_build_assign (idx_next, PLUS_EXPR, idx, size_int (-1));
+ insert_before (g);
+ tree sn2pn3 = add_cast (ssizetype, n2pn3);
+ g = gimple_build_cond (GE_EXPR, add_cast (ssizetype, idx_next), sn2pn3,
+ NULL_TREE, NULL_TREE);
+ insert_before (g);
+ idx = make_ssa_name (sizetype);
+ m_gsi = gsi_for_stmt (final_stmt);
+ gphi *phi = create_phi_node (idx, gsi_bb (m_gsi));
+ edge_false = find_edge (edge_false->src, gsi_bb (m_gsi));
+ edge_true = EDGE_PRED (gsi_bb (m_gsi),
+ EDGE_PRED (gsi_bb (m_gsi), 0) == edge_false);
+ add_phi_arg (phi, p, edge_false, UNKNOWN_LOCATION);
+ add_phi_arg (phi, idx_next, edge_true, UNKNOWN_LOCATION);
+ m_data_cnt = 0;
+ if (!integer_zerop (n3))
+ {
+ g = gimple_build_cond (NE_EXPR, n3, size_zero_node,
+ NULL_TREE, NULL_TREE);
+ if_then (g, profile_probability::likely (), edge_true, edge_false);
+ idxmn2 = make_ssa_name (sizetype);
+ g = gimple_build_assign (idxmn2, MINUS_EXPR, idx, n2);
+ insert_before (g);
+ m_data_cnt = 0;
+ t1 = handle_operand (rhs1, idxmn2);
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ LSHIFT_EXPR, t1, n1);
+ insert_before (g);
+ t1 = gimple_assign_lhs (g);
+ l = limb_access (TREE_TYPE (lhs), obj, idx, true);
+ g = gimple_build_assign (l, t1);
+ insert_before (g);
+ idx_next = make_ssa_name (sizetype);
+ g = gimple_build_assign (idx_next, PLUS_EXPR, idx, size_int (-1));
+ insert_before (g);
+ m_gsi = gsi_for_stmt (final_stmt);
+ tree nidx = make_ssa_name (sizetype);
+ phi = create_phi_node (nidx, gsi_bb (m_gsi));
+ edge_false = find_edge (edge_false->src, gsi_bb (m_gsi));
+ edge_true = EDGE_PRED (gsi_bb (m_gsi),
+ EDGE_PRED (gsi_bb (m_gsi), 0) == edge_false);
+ add_phi_arg (phi, idx, edge_false, UNKNOWN_LOCATION);
+ add_phi_arg (phi, idx_next, edge_true, UNKNOWN_LOCATION);
+ idx = nidx;
+ }
+ g = gimple_build_cond (GE_EXPR, add_cast (ssizetype, idx),
+ ssize_int (0), NULL_TREE, NULL_TREE);
+ if_then (g, profile_probability::likely (), edge_true, edge_false);
+ idx = create_loop (idx, &idx_next);
+ l = limb_access (TREE_TYPE (lhs), obj, idx, true);
+ g = gimple_build_assign (l, build_zero_cst (m_limb_type));
+ insert_before (g);
+ g = gimple_build_assign (idx_next, PLUS_EXPR, idx, size_int (-1));
+ insert_before (g);
+ g = gimple_build_cond (GE_EXPR, add_cast (ssizetype, idx_next),
+ ssize_int (0), NULL_TREE, NULL_TREE);
+ insert_before (g);
+ }
+}
+
+/* Lower large/huge _BitInt multiplication or division. */
+
+void
+bitint_large_huge::lower_muldiv_stmt (tree obj, gimple *stmt)
+{
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ tree rhs2 = gimple_assign_rhs2 (stmt);
+ tree lhs = gimple_assign_lhs (stmt);
+ tree_code rhs_code = gimple_assign_rhs_code (stmt);
+ tree type = TREE_TYPE (rhs1);
+ gcc_assert (TREE_CODE (type) == BITINT_TYPE
+ && bitint_precision_kind (type) >= bitint_prec_large);
+ int prec = TYPE_PRECISION (type), prec1, prec2;
+ rhs1 = handle_operand_addr (rhs1, stmt, NULL, &prec1);
+ rhs2 = handle_operand_addr (rhs2, stmt, NULL, &prec2);
+ if (obj == NULL_TREE)
+ {
+ int part = var_to_partition (m_map, lhs);
+ gcc_assert (m_vars[part] != NULL_TREE);
+ obj = m_vars[part];
+ lhs = build_fold_addr_expr (obj);
+ }
+ else
+ {
+ lhs = build_fold_addr_expr (obj);
+ lhs = force_gimple_operand_gsi (&m_gsi, lhs, true,
+ NULL_TREE, true, GSI_SAME_STMT);
+ }
+ tree sitype = lang_hooks.types.type_for_mode (SImode, 0);
+ gimple *g;
+ switch (rhs_code)
+ {
+ case MULT_EXPR:
+ g = gimple_build_call_internal (IFN_MULBITINT, 6,
+ lhs, build_int_cst (sitype, prec),
+ rhs1, build_int_cst (sitype, prec1),
+ rhs2, build_int_cst (sitype, prec2));
+ insert_before (g);
+ break;
+ case TRUNC_DIV_EXPR:
+ g = gimple_build_call_internal (IFN_DIVMODBITINT, 8,
+ lhs, build_int_cst (sitype, prec),
+ null_pointer_node,
+ build_int_cst (sitype, 0),
+ rhs1, build_int_cst (sitype, prec1),
+ rhs2, build_int_cst (sitype, prec2));
+ if (!stmt_ends_bb_p (stmt))
+ gimple_call_set_nothrow (as_a <gcall *> (g), true);
+ insert_before (g);
+ break;
+ case TRUNC_MOD_EXPR:
+ g = gimple_build_call_internal (IFN_DIVMODBITINT, 8, null_pointer_node,
+ build_int_cst (sitype, 0),
+ lhs, build_int_cst (sitype, prec),
+ rhs1, build_int_cst (sitype, prec1),
+ rhs2, build_int_cst (sitype, prec2));
+ if (!stmt_ends_bb_p (stmt))
+ gimple_call_set_nothrow (as_a <gcall *> (g), true);
+ insert_before (g);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ if (stmt_ends_bb_p (stmt))
+ {
+ maybe_duplicate_eh_stmt (g, stmt);
+ edge e1;
+ edge_iterator ei;
+ basic_block bb = gimple_bb (stmt);
+
+ FOR_EACH_EDGE (e1, ei, bb->succs)
+ if (e1->flags & EDGE_EH)
+ break;
+ if (e1)
+ {
+ edge e2 = split_block (gsi_bb (m_gsi), g);
+ m_gsi = gsi_after_labels (e2->dest);
+ make_edge (e2->src, e1->dest, EDGE_EH)->probability
+ = profile_probability::very_unlikely ();
+ }
+ }
+}
+
+/* Lower large/huge _BitInt conversion to/from floating point. */
+
+void
+bitint_large_huge::lower_float_conv_stmt (tree obj, gimple *stmt)
+{
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ tree lhs = gimple_assign_lhs (stmt);
+ tree_code rhs_code = gimple_assign_rhs_code (stmt);
+ tree sitype = lang_hooks.types.type_for_mode (SImode, 0);
+ gimple *g;
+ if (rhs_code == FIX_TRUNC_EXPR)
+ {
+ int prec = TYPE_PRECISION (TREE_TYPE (lhs));
+ if (!TYPE_UNSIGNED (TREE_TYPE (lhs)))
+ prec = -prec;
+ if (obj == NULL_TREE)
+ {
+ int part = var_to_partition (m_map, lhs);
+ gcc_assert (m_vars[part] != NULL_TREE);
+ obj = m_vars[part];
+ lhs = build_fold_addr_expr (obj);
+ }
+ else
+ {
+ lhs = build_fold_addr_expr (obj);
+ lhs = force_gimple_operand_gsi (&m_gsi, lhs, true,
+ NULL_TREE, true, GSI_SAME_STMT);
+ }
+ scalar_mode from_mode
+ = as_a <scalar_mode> (TYPE_MODE (TREE_TYPE (rhs1)));
+#ifdef HAVE_SFmode
+ /* IEEE single is a full superset of both IEEE half and
+ bfloat formats, convert to float first and then to _BitInt
+ to avoid the need of another 2 library routines. */
+ if ((REAL_MODE_FORMAT (from_mode) == &arm_bfloat_half_format
+ || REAL_MODE_FORMAT (from_mode) == &ieee_half_format)
+ && REAL_MODE_FORMAT (SFmode) == &ieee_single_format)
+ {
+ tree type = lang_hooks.types.type_for_mode (SFmode, 0);
+ if (type)
+ rhs1 = add_cast (type, rhs1);
+ }
+#endif
+ g = gimple_build_call_internal (IFN_FLOATTOBITINT, 3,
+ lhs, build_int_cst (sitype, prec),
+ rhs1);
+ insert_before (g);
+ }
+ else
+ {
+ int prec;
+ rhs1 = handle_operand_addr (rhs1, stmt, NULL, &prec);
+ g = gimple_build_call_internal (IFN_BITINTTOFLOAT, 2,
+ rhs1, build_int_cst (sitype, prec));
+ gimple_call_set_lhs (g, lhs);
+ if (!stmt_ends_bb_p (stmt))
+ gimple_call_set_nothrow (as_a <gcall *> (g), true);
+ gsi_replace (&m_gsi, g, true);
+ }
+}
+
+/* Helper method for lower_addsub_overflow and lower_mul_overflow.
+ If check_zero is true, caller wants to check if all bits in [start, end)
+ are zero, otherwise if bits in [start, end) are either all zero or
+ all ones. L is the limb with index LIMB, START and END are measured
+ in bits. */
+
+tree
+bitint_large_huge::arith_overflow_extract_bits (unsigned int start,
+ unsigned int end, tree l,
+ unsigned int limb,
+ bool check_zero)
+{
+ unsigned startlimb = start / limb_prec;
+ unsigned endlimb = (end - 1) / limb_prec;
+ gimple *g;
+
+ if ((start % limb_prec) == 0 && (end % limb_prec) == 0)
+ return l;
+ if (startlimb == endlimb && limb == startlimb)
+ {
+ if (check_zero)
+ {
+ wide_int w = wi::shifted_mask (start % limb_prec,
+ end - start, false, limb_prec);
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ BIT_AND_EXPR, l,
+ wide_int_to_tree (m_limb_type, w));
+ insert_before (g);
+ return gimple_assign_lhs (g);
+ }
+ unsigned int shift = start % limb_prec;
+ if ((end % limb_prec) != 0)
+ {
+ unsigned int lshift = (-end) % limb_prec;
+ shift += lshift;
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ LSHIFT_EXPR, l,
+ build_int_cst (unsigned_type_node,
+ lshift));
+ insert_before (g);
+ l = gimple_assign_lhs (g);
+ }
+ l = add_cast (signed_type_for (m_limb_type), l);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (l)),
+ RSHIFT_EXPR, l,
+ build_int_cst (unsigned_type_node, shift));
+ insert_before (g);
+ return add_cast (m_limb_type, gimple_assign_lhs (g));
+ }
+ else if (limb == startlimb)
+ {
+ if ((start % limb_prec) == 0)
+ return l;
+ if (!check_zero)
+ l = add_cast (signed_type_for (m_limb_type), l);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (l)),
+ RSHIFT_EXPR, l,
+ build_int_cst (unsigned_type_node,
+ start % limb_prec));
+ insert_before (g);
+ l = gimple_assign_lhs (g);
+ if (!check_zero)
+ l = add_cast (m_limb_type, l);
+ return l;
+ }
+ else if (limb == endlimb)
+ {
+ if ((end % limb_prec) == 0)
+ return l;
+ if (check_zero)
+ {
+ wide_int w = wi::mask (end % limb_prec, false, limb_prec);
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ BIT_AND_EXPR, l,
+ wide_int_to_tree (m_limb_type, w));
+ insert_before (g);
+ return gimple_assign_lhs (g);
+ }
+ unsigned int shift = (-end) % limb_prec;
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ LSHIFT_EXPR, l,
+ build_int_cst (unsigned_type_node, shift));
+ insert_before (g);
+ l = add_cast (signed_type_for (m_limb_type), gimple_assign_lhs (g));
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (l)),
+ RSHIFT_EXPR, l,
+ build_int_cst (unsigned_type_node, shift));
+ insert_before (g);
+ return add_cast (m_limb_type, gimple_assign_lhs (g));
+ }
+ return l;
+}
+
+/* Helper method for lower_addsub_overflow and lower_mul_overflow. Store
+ result including overflow flag into the right locations. */
+
+void
+bitint_large_huge::finish_arith_overflow (tree var, tree obj, tree type,
+ tree ovf, tree lhs, tree orig_obj,
+ gimple *stmt, tree_code code)
+{
+ gimple *g;
+
+ if (obj == NULL_TREE
+ && (TREE_CODE (type) != BITINT_TYPE
+ || bitint_precision_kind (type) < bitint_prec_large))
+ {
+ /* Add support for 3 or more limbs filled in from normal integral
+ type if this assert fails. If no target chooses limb mode smaller
+ than half of largest supported normal integral type, this will not
+ be needed. */
+ gcc_assert (TYPE_PRECISION (type) <= 2 * limb_prec);
+ tree lhs_type = type;
+ if (TREE_CODE (type) == BITINT_TYPE
+ && bitint_precision_kind (type) == bitint_prec_middle)
+ lhs_type = build_nonstandard_integer_type (TYPE_PRECISION (type),
+ TYPE_UNSIGNED (type));
+ tree r1 = limb_access (NULL_TREE, var, size_int (0), true);
+ g = gimple_build_assign (make_ssa_name (m_limb_type), r1);
+ insert_before (g);
+ r1 = gimple_assign_lhs (g);
+ if (!useless_type_conversion_p (lhs_type, TREE_TYPE (r1)))
+ r1 = add_cast (lhs_type, r1);
+ if (TYPE_PRECISION (lhs_type) > limb_prec)
+ {
+ tree r2 = limb_access (NULL_TREE, var, size_int (1), true);
+ g = gimple_build_assign (make_ssa_name (m_limb_type), r2);
+ insert_before (g);
+ r2 = gimple_assign_lhs (g);
+ r2 = add_cast (lhs_type, r2);
+ g = gimple_build_assign (make_ssa_name (lhs_type), LSHIFT_EXPR, r2,
+ build_int_cst (unsigned_type_node,
+ limb_prec));
+ insert_before (g);
+ g = gimple_build_assign (make_ssa_name (lhs_type), BIT_IOR_EXPR, r1,
+ gimple_assign_lhs (g));
+ insert_before (g);
+ r1 = gimple_assign_lhs (g);
+ }
+ if (lhs_type != type)
+ r1 = add_cast (type, r1);
+ ovf = add_cast (lhs_type, ovf);
+ if (lhs_type != type)
+ ovf = add_cast (type, ovf);
+ g = gimple_build_assign (lhs, COMPLEX_EXPR, r1, ovf);
+ m_gsi = gsi_for_stmt (stmt);
+ gsi_replace (&m_gsi, g, true);
+ }
+ else
+ {
+ unsigned HOST_WIDE_INT nelts = 0;
+ tree atype = NULL_TREE;
+ if (obj)
+ {
+ nelts = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (obj))) / limb_prec;
+ if (orig_obj == NULL_TREE)
+ nelts >>= 1;
+ atype = build_array_type_nelts (m_limb_type, nelts);
+ }
+ if (var && obj)
+ {
+ tree v1, v2;
+ tree zero;
+ if (orig_obj == NULL_TREE)
+ {
+ zero = build_zero_cst (build_pointer_type (TREE_TYPE (obj)));
+ v1 = build2 (MEM_REF, atype,
+ build_fold_addr_expr (unshare_expr (obj)), zero);
+ }
+ else if (!useless_type_conversion_p (atype, TREE_TYPE (obj)))
+ v1 = build1 (VIEW_CONVERT_EXPR, atype, unshare_expr (obj));
+ else
+ v1 = unshare_expr (obj);
+ zero = build_zero_cst (build_pointer_type (TREE_TYPE (var)));
+ v2 = build2 (MEM_REF, atype, build_fold_addr_expr (var), zero);
+ g = gimple_build_assign (v1, v2);
+ insert_before (g);
+ }
+ if (orig_obj == NULL_TREE && obj)
+ {
+ ovf = add_cast (m_limb_type, ovf);
+ tree l = limb_access (NULL_TREE, obj, size_int (nelts), true);
+ g = gimple_build_assign (l, ovf);
+ insert_before (g);
+ if (nelts > 1)
+ {
+ atype = build_array_type_nelts (m_limb_type, nelts - 1);
+ tree off = build_int_cst (build_pointer_type (TREE_TYPE (obj)),
+ (nelts + 1) * m_limb_size);
+ tree v1 = build2 (MEM_REF, atype,
+ build_fold_addr_expr (unshare_expr (obj)),
+ off);
+ g = gimple_build_assign (v1, build_zero_cst (atype));
+ insert_before (g);
+ }
+ }
+ else if (TREE_CODE (TREE_TYPE (lhs)) == COMPLEX_TYPE)
+ {
+ imm_use_iterator ui;
+ use_operand_p use_p;
+ FOR_EACH_IMM_USE_FAST (use_p, ui, lhs)
+ {
+ g = USE_STMT (use_p);
+ if (!is_gimple_assign (g)
+ || gimple_assign_rhs_code (g) != IMAGPART_EXPR)
+ continue;
+ tree lhs2 = gimple_assign_lhs (g);
+ gimple *use_stmt;
+ single_imm_use (lhs2, &use_p, &use_stmt);
+ lhs2 = gimple_assign_lhs (use_stmt);
+ gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
+ if (useless_type_conversion_p (TREE_TYPE (lhs2), TREE_TYPE (ovf)))
+ g = gimple_build_assign (lhs2, ovf);
+ else
+ g = gimple_build_assign (lhs2, NOP_EXPR, ovf);
+ gsi_replace (&gsi, g, true);
+ break;
+ }
+ }
+ else if (ovf != boolean_false_node)
+ {
+ g = gimple_build_cond (NE_EXPR, ovf, boolean_false_node,
+ NULL_TREE, NULL_TREE);
+ edge edge_true, edge_false;
+ if_then (g, profile_probability::very_unlikely (),
+ edge_true, edge_false);
+ tree zero = build_zero_cst (TREE_TYPE (lhs));
+ tree fn = ubsan_build_overflow_builtin (code, m_loc,
+ TREE_TYPE (lhs),
+ zero, zero, NULL);
+ force_gimple_operand_gsi (&m_gsi, fn, true, NULL_TREE,
+ true, GSI_SAME_STMT);
+ m_gsi = gsi_after_labels (edge_true->dest);
+ }
+ }
+ if (var)
+ {
+ tree clobber = build_clobber (TREE_TYPE (var), CLOBBER_EOL);
+ g = gimple_build_assign (var, clobber);
+ gsi_insert_after (&m_gsi, g, GSI_SAME_STMT);
+ }
+}
+
+/* Helper function for lower_addsub_overflow and lower_mul_overflow.
+ Given precisions of result TYPE (PREC), argument 0 precision PREC0,
+ argument 1 precision PREC1 and minimum precision for the result
+ PREC2, compute *START, *END, *CHECK_ZERO and return OVF. */
+
+static tree
+arith_overflow (tree_code code, tree type, int prec, int prec0, int prec1,
+ int prec2, unsigned *start, unsigned *end, bool *check_zero)
+{
+ *start = 0;
+ *end = 0;
+ *check_zero = true;
+ /* Ignore this special rule for subtraction, even if both
+ prec0 >= 0 and prec1 >= 0, their subtraction can be negative
+ in infinite precision. */
+ if (code != MINUS_EXPR && prec0 >= 0 && prec1 >= 0)
+ {
+ /* Result in [0, prec2) is unsigned, if prec > prec2,
+ all bits above it will be zero. */
+ if ((prec - !TYPE_UNSIGNED (type)) >= prec2)
+ return boolean_false_node;
+ else
+ {
+ /* ovf if any of bits in [start, end) is non-zero. */
+ *start = prec - !TYPE_UNSIGNED (type);
+ *end = prec2;
+ }
+ }
+ else if (TYPE_UNSIGNED (type))
+ {
+ /* If result in [0, prec2) is signed and if prec > prec2,
+ all bits above it will be sign bit copies. */
+ if (prec >= prec2)
+ {
+ /* ovf if bit prec - 1 is non-zero. */
+ *start = prec - 1;
+ *end = prec;
+ }
+ else
+ {
+ /* ovf if any of bits in [start, end) is non-zero. */
+ *start = prec;
+ *end = prec2;
+ }
+ }
+ else if (prec >= prec2)
+ return boolean_false_node;
+ else
+ {
+ /* ovf if [start, end) bits aren't all zeros or all ones. */
+ *start = prec - 1;
+ *end = prec2;
+ *check_zero = false;
+ }
+ return NULL_TREE;
+}
+
+/* Lower a .{ADD,SUB}_OVERFLOW call with at least one large/huge _BitInt
+ argument or return type _Complex large/huge _BitInt. */
+
+void
+bitint_large_huge::lower_addsub_overflow (tree obj, gimple *stmt)
+{
+ tree arg0 = gimple_call_arg (stmt, 0);
+ tree arg1 = gimple_call_arg (stmt, 1);
+ tree lhs = gimple_call_lhs (stmt);
+ gimple *g;
+
+ if (!lhs)
+ {
+ gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
+ gsi_remove (&gsi, true);
+ return;
+ }
+ gimple *final_stmt = gsi_stmt (m_gsi);
+ tree type = TREE_TYPE (lhs);
+ if (TREE_CODE (type) == COMPLEX_TYPE)
+ type = TREE_TYPE (type);
+ int prec = TYPE_PRECISION (type);
+ int prec0 = range_to_prec (arg0, stmt);
+ int prec1 = range_to_prec (arg1, stmt);
+ int prec2 = ((prec0 < 0) == (prec1 < 0)
+ ? MAX (prec0 < 0 ? -prec0 : prec0,
+ prec1 < 0 ? -prec1 : prec1) + 1
+ : MAX (prec0 < 0 ? -prec0 : prec0 + 1,
+ prec1 < 0 ? -prec1 : prec1 + 1) + 1);
+ int prec3 = MAX (prec0 < 0 ? -prec0 : prec0,
+ prec1 < 0 ? -prec1 : prec1);
+ prec3 = MAX (prec3, prec);
+ tree var = NULL_TREE;
+ tree orig_obj = obj;
+ if (obj == NULL_TREE
+ && TREE_CODE (type) == BITINT_TYPE
+ && bitint_precision_kind (type) >= bitint_prec_large
+ && m_names
+ && bitmap_bit_p (m_names, SSA_NAME_VERSION (lhs)))
+ {
+ int part = var_to_partition (m_map, lhs);
+ gcc_assert (m_vars[part] != NULL_TREE);
+ obj = m_vars[part];
+ if (TREE_TYPE (lhs) == type)
+ orig_obj = obj;
+ }
+ if (TREE_CODE (type) != BITINT_TYPE
+ || bitint_precision_kind (type) < bitint_prec_large)
+ {
+ unsigned HOST_WIDE_INT nelts = CEIL (prec, limb_prec);
+ tree atype = build_array_type_nelts (m_limb_type, nelts);
+ var = create_tmp_var (atype);
+ }
+
+ enum tree_code code;
+ switch (gimple_call_internal_fn (stmt))
+ {
+ case IFN_ADD_OVERFLOW:
+ case IFN_UBSAN_CHECK_ADD:
+ code = PLUS_EXPR;
+ break;
+ case IFN_SUB_OVERFLOW:
+ case IFN_UBSAN_CHECK_SUB:
+ code = MINUS_EXPR;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ unsigned start, end;
+ bool check_zero;
+ tree ovf = arith_overflow (code, type, prec, prec0, prec1, prec2,
+ &start, &end, &check_zero);
+
+ unsigned startlimb, endlimb;
+ if (ovf)
+ {
+ startlimb = ~0U;
+ endlimb = ~0U;
+ }
+ else
+ {
+ startlimb = start / limb_prec;
+ endlimb = (end - 1) / limb_prec;
+ }
+
+ int prec4 = ovf != NULL_TREE ? prec : prec3;
+ bitint_prec_kind kind = bitint_precision_kind (prec4);
+ unsigned cnt, rem = 0, fin = 0;
+ tree idx = NULL_TREE, idx_first = NULL_TREE, idx_next = NULL_TREE;
+ bool last_ovf = (ovf == NULL_TREE
+ && CEIL (prec2, limb_prec) > CEIL (prec3, limb_prec));
+ if (kind != bitint_prec_huge)
+ cnt = CEIL (prec4, limb_prec) + last_ovf;
+ else
+ {
+ rem = (prec4 % (2 * limb_prec));
+ fin = (prec4 - rem) / limb_prec;
+ cnt = 2 + CEIL (rem, limb_prec) + last_ovf;
+ idx = idx_first = create_loop (size_zero_node, &idx_next);
+ }
+
+ if (kind == bitint_prec_huge)
+ m_upwards_2limb = fin;
+ m_upwards = true;
+
+ tree type0 = TREE_TYPE (arg0);
+ tree type1 = TREE_TYPE (arg1);
+ if (TYPE_PRECISION (type0) < prec3)
+ {
+ type0 = build_bitint_type (prec3, TYPE_UNSIGNED (type0));
+ if (TREE_CODE (arg0) == INTEGER_CST)
+ arg0 = fold_convert (type0, arg0);
+ }
+ if (TYPE_PRECISION (type1) < prec3)
+ {
+ type1 = build_bitint_type (prec3, TYPE_UNSIGNED (type1));
+ if (TREE_CODE (arg1) == INTEGER_CST)
+ arg1 = fold_convert (type1, arg1);
+ }
+ unsigned int data_cnt = 0;
+ tree last_rhs1 = NULL_TREE, last_rhs2 = NULL_TREE;
+ tree cmp = build_zero_cst (m_limb_type);
+ unsigned prec_limbs = CEIL ((unsigned) prec, limb_prec);
+ tree ovf_out = NULL_TREE, cmp_out = NULL_TREE;
+ for (unsigned i = 0; i < cnt; i++)
+ {
+ m_data_cnt = 0;
+ tree rhs1, rhs2;
+ if (kind != bitint_prec_huge)
+ idx = size_int (i);
+ else if (i >= 2)
+ idx = size_int (fin + (i > 2));
+ if (!last_ovf || i < cnt - 1)
+ {
+ if (type0 != TREE_TYPE (arg0))
+ rhs1 = handle_cast (type0, arg0, idx);
+ else
+ rhs1 = handle_operand (arg0, idx);
+ if (type1 != TREE_TYPE (arg1))
+ rhs2 = handle_cast (type1, arg1, idx);
+ else
+ rhs2 = handle_operand (arg1, idx);
+ if (i == 0)
+ data_cnt = m_data_cnt;
+ if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (rhs1)))
+ rhs1 = add_cast (m_limb_type, rhs1);
+ if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (rhs2)))
+ rhs2 = add_cast (m_limb_type, rhs2);
+ last_rhs1 = rhs1;
+ last_rhs2 = rhs2;
+ }
+ else
+ {
+ m_data_cnt = data_cnt;
+ if (TYPE_UNSIGNED (type0))
+ rhs1 = build_zero_cst (m_limb_type);
+ else
+ {
+ rhs1 = add_cast (signed_type_for (m_limb_type), last_rhs1);
+ if (TREE_CODE (rhs1) == INTEGER_CST)
+ rhs1 = build_int_cst (m_limb_type,
+ tree_int_cst_sgn (rhs1) < 0 ? -1 : 0);
+ else
+ {
+ tree lpm1 = build_int_cst (unsigned_type_node,
+ limb_prec - 1);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (rhs1)),
+ RSHIFT_EXPR, rhs1, lpm1);
+ insert_before (g);
+ rhs1 = add_cast (m_limb_type, gimple_assign_lhs (g));
+ }
+ }
+ if (TYPE_UNSIGNED (type1))
+ rhs2 = build_zero_cst (m_limb_type);
+ else
+ {
+ rhs2 = add_cast (signed_type_for (m_limb_type), last_rhs2);
+ if (TREE_CODE (rhs2) == INTEGER_CST)
+ rhs2 = build_int_cst (m_limb_type,
+ tree_int_cst_sgn (rhs2) < 0 ? -1 : 0);
+ else
+ {
+ tree lpm1 = build_int_cst (unsigned_type_node,
+ limb_prec - 1);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (rhs2)),
+ RSHIFT_EXPR, rhs2, lpm1);
+ insert_before (g);
+ rhs2 = add_cast (m_limb_type, gimple_assign_lhs (g));
+ }
+ }
+ }
+ tree rhs = handle_plus_minus (code, rhs1, rhs2, idx);
+ if (ovf != boolean_false_node)
+ {
+ if (tree_fits_uhwi_p (idx))
+ {
+ unsigned limb = tree_to_uhwi (idx);
+ if (limb >= startlimb && limb <= endlimb)
+ {
+ tree l = arith_overflow_extract_bits (start, end, rhs,
+ limb, check_zero);
+ tree this_ovf = make_ssa_name (boolean_type_node);
+ if (ovf == NULL_TREE && !check_zero)
+ {
+ cmp = l;
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ PLUS_EXPR, l,
+ build_int_cst (m_limb_type, 1));
+ insert_before (g);
+ g = gimple_build_assign (this_ovf, GT_EXPR,
+ gimple_assign_lhs (g),
+ build_int_cst (m_limb_type, 1));
+ }
+ else
+ g = gimple_build_assign (this_ovf, NE_EXPR, l, cmp);
+ insert_before (g);
+ if (ovf == NULL_TREE)
+ ovf = this_ovf;
+ else
+ {
+ tree b = make_ssa_name (boolean_type_node);
+ g = gimple_build_assign (b, BIT_IOR_EXPR, ovf, this_ovf);
+ insert_before (g);
+ ovf = b;
+ }
+ }
+ }
+ else if (startlimb < fin)
+ {
+ if (m_first && startlimb + 2 < fin)
+ {
+ tree data_out;
+ ovf = prepare_data_in_out (boolean_false_node, idx, &data_out);
+ ovf_out = m_data.pop ();
+ m_data.pop ();
+ if (!check_zero)
+ {
+ cmp = prepare_data_in_out (cmp, idx, &data_out);
+ cmp_out = m_data.pop ();
+ m_data.pop ();
+ }
+ }
+ if (i != 0 || startlimb != fin - 1)
+ {
+ tree_code cmp_code;
+ bool single_comparison
+ = (startlimb + 2 >= fin || (startlimb & 1) != (i & 1));
+ if (!single_comparison)
+ {
+ cmp_code = GE_EXPR;
+ if (!check_zero && (start % limb_prec) == 0)
+ single_comparison = true;
+ }
+ else if ((startlimb & 1) == (i & 1))
+ cmp_code = EQ_EXPR;
+ else
+ cmp_code = GT_EXPR;
+ g = gimple_build_cond (cmp_code, idx, size_int (startlimb),
+ NULL_TREE, NULL_TREE);
+ edge edge_true_true, edge_true_false, edge_false;
+ gimple *g2 = NULL;
+ if (!single_comparison)
+ g2 = gimple_build_cond (EQ_EXPR, idx,
+ size_int (startlimb), NULL_TREE,
+ NULL_TREE);
+ if_then_if_then_else (g, g2, profile_probability::likely (),
+ profile_probability::unlikely (),
+ edge_true_true, edge_true_false,
+ edge_false);
+ unsigned tidx = startlimb + (cmp_code == GT_EXPR);
+ tree l = arith_overflow_extract_bits (start, end, rhs, tidx,
+ check_zero);
+ tree this_ovf = make_ssa_name (boolean_type_node);
+ if (cmp_code != GT_EXPR && !check_zero)
+ {
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ PLUS_EXPR, l,
+ build_int_cst (m_limb_type, 1));
+ insert_before (g);
+ g = gimple_build_assign (this_ovf, GT_EXPR,
+ gimple_assign_lhs (g),
+ build_int_cst (m_limb_type, 1));
+ }
+ else
+ g = gimple_build_assign (this_ovf, NE_EXPR, l, cmp);
+ insert_before (g);
+ if (cmp_code == GT_EXPR)
+ {
+ tree t = make_ssa_name (boolean_type_node);
+ g = gimple_build_assign (t, BIT_IOR_EXPR, ovf, this_ovf);
+ insert_before (g);
+ this_ovf = t;
+ }
+ tree this_ovf2 = NULL_TREE;
+ if (!single_comparison)
+ {
+ m_gsi = gsi_after_labels (edge_true_true->src);
+ tree t = make_ssa_name (boolean_type_node);
+ g = gimple_build_assign (t, NE_EXPR, rhs, cmp);
+ insert_before (g);
+ this_ovf2 = make_ssa_name (boolean_type_node);
+ g = gimple_build_assign (this_ovf2, BIT_IOR_EXPR,
+ ovf, t);
+ insert_before (g);
+ }
+ m_gsi = gsi_after_labels (edge_true_false->dest);
+ tree t;
+ if (i == 1 && ovf_out)
+ t = ovf_out;
+ else
+ t = make_ssa_name (boolean_type_node);
+ gphi *phi = create_phi_node (t, edge_true_false->dest);
+ add_phi_arg (phi, this_ovf, edge_true_false,
+ UNKNOWN_LOCATION);
+ add_phi_arg (phi, ovf ? ovf
+ : boolean_false_node, edge_false,
+ UNKNOWN_LOCATION);
+ if (edge_true_true)
+ add_phi_arg (phi, this_ovf2, edge_true_true,
+ UNKNOWN_LOCATION);
+ ovf = t;
+ if (!check_zero && cmp_code != GT_EXPR)
+ {
+ t = cmp_out ? cmp_out : make_ssa_name (m_limb_type);
+ phi = create_phi_node (t, edge_true_false->dest);
+ add_phi_arg (phi, l, edge_true_false, UNKNOWN_LOCATION);
+ add_phi_arg (phi, cmp, edge_false, UNKNOWN_LOCATION);
+ if (edge_true_true)
+ add_phi_arg (phi, cmp, edge_true_true,
+ UNKNOWN_LOCATION);
+ cmp = t;
+ }
+ }
+ }
+ }
+
+ if (var || obj)
+ {
+ if (tree_fits_uhwi_p (idx) && tree_to_uhwi (idx) >= prec_limbs)
+ ;
+ else if (!tree_fits_uhwi_p (idx)
+ && (unsigned) prec < (fin - (i == 0)) * limb_prec)
+ {
+ bool single_comparison
+ = (((unsigned) prec % limb_prec) == 0
+ || prec_limbs + 1 >= fin
+ || (prec_limbs & 1) == (i & 1));
+ g = gimple_build_cond (LE_EXPR, idx, size_int (prec_limbs - 1),
+ NULL_TREE, NULL_TREE);
+ gimple *g2 = NULL;
+ if (!single_comparison)
+ g2 = gimple_build_cond (LT_EXPR, idx,
+ size_int (prec_limbs - 1),
+ NULL_TREE, NULL_TREE);
+ edge edge_true_true, edge_true_false, edge_false;
+ if_then_if_then_else (g, g2, profile_probability::likely (),
+ profile_probability::likely (),
+ edge_true_true, edge_true_false,
+ edge_false);
+ tree l = limb_access (type, var ? var : obj, idx, true);
+ g = gimple_build_assign (l, rhs);
+ insert_before (g);
+ if (!single_comparison)
+ {
+ m_gsi = gsi_after_labels (edge_true_true->src);
+ l = limb_access (type, var ? var : obj,
+ size_int (prec_limbs - 1), true);
+ if (!useless_type_conversion_p (TREE_TYPE (l),
+ TREE_TYPE (rhs)))
+ rhs = add_cast (TREE_TYPE (l), rhs);
+ g = gimple_build_assign (l, rhs);
+ insert_before (g);
+ }
+ m_gsi = gsi_after_labels (edge_true_false->dest);
+ }
+ else
+ {
+ tree l = limb_access (type, var ? var : obj, idx, true);
+ if (!useless_type_conversion_p (TREE_TYPE (l), TREE_TYPE (rhs)))
+ rhs = add_cast (TREE_TYPE (l), rhs);
+ g = gimple_build_assign (l, rhs);
+ insert_before (g);
+ }
+ }
+ m_first = false;
+ if (kind == bitint_prec_huge && i <= 1)
+ {
+ if (i == 0)
+ {
+ idx = make_ssa_name (sizetype);
+ g = gimple_build_assign (idx, PLUS_EXPR, idx_first,
+ size_one_node);
+ insert_before (g);
+ }
+ else
+ {
+ g = gimple_build_assign (idx_next, PLUS_EXPR, idx_first,
+ size_int (2));
+ insert_before (g);
+ g = gimple_build_cond (NE_EXPR, idx_next, size_int (fin),
+ NULL_TREE, NULL_TREE);
+ insert_before (g);
+ m_gsi = gsi_for_stmt (final_stmt);
+ }
+ }
+ }
+
+ finish_arith_overflow (var, obj, type, ovf, lhs, orig_obj, stmt, code);
+}
+
+/* Lower a .MUL_OVERFLOW call with at least one large/huge _BitInt
+ argument or return type _Complex large/huge _BitInt. */
+
+void
+bitint_large_huge::lower_mul_overflow (tree obj, gimple *stmt)
+{
+ tree arg0 = gimple_call_arg (stmt, 0);
+ tree arg1 = gimple_call_arg (stmt, 1);
+ tree lhs = gimple_call_lhs (stmt);
+ if (!lhs)
+ {
+ gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
+ gsi_remove (&gsi, true);
+ return;
+ }
+ gimple *final_stmt = gsi_stmt (m_gsi);
+ tree type = TREE_TYPE (lhs);
+ if (TREE_CODE (type) == COMPLEX_TYPE)
+ type = TREE_TYPE (type);
+ int prec = TYPE_PRECISION (type), prec0, prec1;
+ arg0 = handle_operand_addr (arg0, stmt, NULL, &prec0);
+ arg1 = handle_operand_addr (arg1, stmt, NULL, &prec1);
+ int prec2 = ((prec0 < 0 ? -prec0 : prec0)
+ + (prec1 < 0 ? -prec1 : prec1)
+ + ((prec0 < 0) != (prec1 < 0)));
+ tree var = NULL_TREE;
+ tree orig_obj = obj;
+ bool force_var = false;
+ if (obj == NULL_TREE
+ && TREE_CODE (type) == BITINT_TYPE
+ && bitint_precision_kind (type) >= bitint_prec_large
+ && m_names
+ && bitmap_bit_p (m_names, SSA_NAME_VERSION (lhs)))
+ {
+ int part = var_to_partition (m_map, lhs);
+ gcc_assert (m_vars[part] != NULL_TREE);
+ obj = m_vars[part];
+ if (TREE_TYPE (lhs) == type)
+ orig_obj = obj;
+ }
+ else if (obj != NULL_TREE && DECL_P (obj))
+ {
+ for (int i = 0; i < 2; ++i)
+ {
+ tree arg = i ? arg1 : arg0;
+ if (TREE_CODE (arg) == ADDR_EXPR)
+ arg = TREE_OPERAND (arg, 0);
+ if (get_base_address (arg) == obj)
+ {
+ force_var = true;
+ break;
+ }
+ }
+ }
+ if (obj == NULL_TREE
+ || force_var
+ || TREE_CODE (type) != BITINT_TYPE
+ || bitint_precision_kind (type) < bitint_prec_large
+ || prec2 > (CEIL (prec, limb_prec) * limb_prec * (orig_obj ? 1 : 2)))
+ {
+ unsigned HOST_WIDE_INT nelts = CEIL (MAX (prec, prec2), limb_prec);
+ tree atype = build_array_type_nelts (m_limb_type, nelts);
+ var = create_tmp_var (atype);
+ }
+ tree addr = build_fold_addr_expr (var ? var : obj);
+ addr = force_gimple_operand_gsi (&m_gsi, addr, true,
+ NULL_TREE, true, GSI_SAME_STMT);
+ tree sitype = lang_hooks.types.type_for_mode (SImode, 0);
+ gimple *g
+ = gimple_build_call_internal (IFN_MULBITINT, 6,
+ addr, build_int_cst (sitype,
+ MAX (prec2, prec)),
+ arg0, build_int_cst (sitype, prec0),
+ arg1, build_int_cst (sitype, prec1));
+ insert_before (g);
+
+ unsigned start, end;
+ bool check_zero;
+ tree ovf = arith_overflow (MULT_EXPR, type, prec, prec0, prec1, prec2,
+ &start, &end, &check_zero);
+ if (ovf == NULL_TREE)
+ {
+ unsigned startlimb = start / limb_prec;
+ unsigned endlimb = (end - 1) / limb_prec;
+ unsigned cnt;
+ bool use_loop = false;
+ if (startlimb == endlimb)
+ cnt = 1;
+ else if (startlimb + 1 == endlimb)
+ cnt = 2;
+ else if ((end % limb_prec) == 0)
+ {
+ cnt = 2;
+ use_loop = true;
+ }
+ else
+ {
+ cnt = 3;
+ use_loop = startlimb + 2 < endlimb;
+ }
+ if (cnt == 1)
+ {
+ tree l = limb_access (NULL_TREE, var ? var : obj,
+ size_int (startlimb), true);
+ g = gimple_build_assign (make_ssa_name (m_limb_type), l);
+ insert_before (g);
+ l = arith_overflow_extract_bits (start, end, gimple_assign_lhs (g),
+ startlimb, check_zero);
+ ovf = make_ssa_name (boolean_type_node);
+ if (check_zero)
+ g = gimple_build_assign (ovf, NE_EXPR, l,
+ build_zero_cst (m_limb_type));
+ else
+ {
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ PLUS_EXPR, l,
+ build_int_cst (m_limb_type, 1));
+ insert_before (g);
+ g = gimple_build_assign (ovf, GT_EXPR, gimple_assign_lhs (g),
+ build_int_cst (m_limb_type, 1));
+ }
+ insert_before (g);
+ }
+ else
+ {
+ basic_block edge_bb = NULL;
+ gimple_stmt_iterator gsi = m_gsi;
+ gsi_prev (&gsi);
+ edge e = split_block (gsi_bb (gsi), gsi_stmt (gsi));
+ edge_bb = e->src;
+ m_gsi = gsi_last_bb (edge_bb);
+ if (!gsi_end_p (m_gsi))
+ gsi_next (&m_gsi);
+
+ tree cmp = build_zero_cst (m_limb_type);
+ for (unsigned i = 0; i < cnt; i++)
+ {
+ tree idx, idx_next = NULL_TREE;
+ if (i == 0)
+ idx = size_int (startlimb);
+ else if (i == 2)
+ idx = size_int (endlimb);
+ else if (use_loop)
+ idx = create_loop (size_int (startlimb + 1), &idx_next);
+ else
+ idx = size_int (startlimb + 1);
+ tree l = limb_access (NULL_TREE, var ? var : obj, idx, true);
+ g = gimple_build_assign (make_ssa_name (m_limb_type), l);
+ insert_before (g);
+ l = gimple_assign_lhs (g);
+ if (i == 0 || i == 2)
+ l = arith_overflow_extract_bits (start, end, l,
+ tree_to_uhwi (idx),
+ check_zero);
+ if (i == 0 && !check_zero)
+ {
+ cmp = l;
+ g = gimple_build_assign (make_ssa_name (m_limb_type),
+ PLUS_EXPR, l,
+ build_int_cst (m_limb_type, 1));
+ insert_before (g);
+ g = gimple_build_cond (GT_EXPR, gimple_assign_lhs (g),
+ build_int_cst (m_limb_type, 1),
+ NULL_TREE, NULL_TREE);
+ }
+ else
+ g = gimple_build_cond (NE_EXPR, l, cmp, NULL_TREE, NULL_TREE);
+ insert_before (g);
+ edge e1 = split_block (gsi_bb (m_gsi), g);
+ e1->flags = EDGE_FALSE_VALUE;
+ edge e2 = make_edge (e1->src, gimple_bb (final_stmt),
+ EDGE_TRUE_VALUE);
+ e1->probability = profile_probability::likely ();
+ e2->probability = e1->probability.invert ();
+ if (i == 0)
+ set_immediate_dominator (CDI_DOMINATORS, e2->dest, e2->src);
+ m_gsi = gsi_after_labels (e1->dest);
+ if (i == 1 && use_loop)
+ {
+ g = gimple_build_assign (idx_next, PLUS_EXPR, idx,
+ size_one_node);
+ insert_before (g);
+ g = gimple_build_cond (NE_EXPR, idx_next,
+ size_int (endlimb + (cnt == 1)),
+ NULL_TREE, NULL_TREE);
+ insert_before (g);
+ edge true_edge, false_edge;
+ extract_true_false_edges_from_block (gsi_bb (m_gsi),
+ &true_edge,
+ &false_edge);
+ m_gsi = gsi_after_labels (false_edge->dest);
+ }
+ }
+
+ ovf = make_ssa_name (boolean_type_node);
+ basic_block bb = gimple_bb (final_stmt);
+ gphi *phi = create_phi_node (ovf, bb);
+ edge e1 = find_edge (gsi_bb (m_gsi), bb);
+ edge_iterator ei;
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ tree val = e == e1 ? boolean_false_node : boolean_true_node;
+ add_phi_arg (phi, val, e, UNKNOWN_LOCATION);
+ }
+ m_gsi = gsi_for_stmt (final_stmt);
+ }
+ }
+
+ finish_arith_overflow (var, obj, type, ovf, lhs, orig_obj, stmt, MULT_EXPR);
+}
+
+/* Lower REALPART_EXPR or IMAGPART_EXPR stmt extracting part of result from
+ .{ADD,SUB,MUL}_OVERFLOW call. */
+
+void
+bitint_large_huge::lower_cplxpart_stmt (tree obj, gimple *stmt)
+{
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ rhs1 = TREE_OPERAND (rhs1, 0);
+ if (obj == NULL_TREE)
+ {
+ int part = var_to_partition (m_map, gimple_assign_lhs (stmt));
+ gcc_assert (m_vars[part] != NULL_TREE);
+ obj = m_vars[part];
+ }
+ if (TREE_CODE (rhs1) == SSA_NAME
+ && (m_names == NULL
+ || !bitmap_bit_p (m_names, SSA_NAME_VERSION (rhs1))))
+ {
+ lower_call (obj, SSA_NAME_DEF_STMT (rhs1));
+ return;
+ }
+ int part = var_to_partition (m_map, rhs1);
+ gcc_assert (m_vars[part] != NULL_TREE);
+ tree var = m_vars[part];
+ unsigned HOST_WIDE_INT nelts
+ = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (obj))) / limb_prec;
+ tree atype = build_array_type_nelts (m_limb_type, nelts);
+ if (!useless_type_conversion_p (atype, TREE_TYPE (obj)))
+ obj = build1 (VIEW_CONVERT_EXPR, atype, obj);
+ tree off = build_int_cst (build_pointer_type (TREE_TYPE (var)),
+ gimple_assign_rhs_code (stmt) == REALPART_EXPR
+ ? 0 : nelts * m_limb_size);
+ tree v2 = build2 (MEM_REF, atype, build_fold_addr_expr (var), off);
+ gimple *g = gimple_build_assign (obj, v2);
+ insert_before (g);
+}
+
+/* Lower COMPLEX_EXPR stmt. */
+
+void
+bitint_large_huge::lower_complexexpr_stmt (gimple *stmt)
+{
+ tree lhs = gimple_assign_lhs (stmt);
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ tree rhs2 = gimple_assign_rhs2 (stmt);
+ int part = var_to_partition (m_map, lhs);
+ gcc_assert (m_vars[part] != NULL_TREE);
+ lhs = m_vars[part];
+ unsigned HOST_WIDE_INT nelts
+ = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (rhs1))) / limb_prec;
+ tree atype = build_array_type_nelts (m_limb_type, nelts);
+ tree zero = build_zero_cst (build_pointer_type (TREE_TYPE (lhs)));
+ tree v1 = build2 (MEM_REF, atype, build_fold_addr_expr (lhs), zero);
+ tree v2;
+ if (TREE_CODE (rhs1) == SSA_NAME)
+ {
+ part = var_to_partition (m_map, rhs1);
+ gcc_assert (m_vars[part] != NULL_TREE);
+ v2 = m_vars[part];
+ }
+ else if (integer_zerop (rhs1))
+ v2 = build_zero_cst (atype);
+ else
+ v2 = tree_output_constant_def (rhs1);
+ if (!useless_type_conversion_p (atype, TREE_TYPE (v2)))
+ v2 = build1 (VIEW_CONVERT_EXPR, atype, v2);
+ gimple *g = gimple_build_assign (v1, v2);
+ insert_before (g);
+ tree off = fold_convert (build_pointer_type (TREE_TYPE (lhs)),
+ TYPE_SIZE_UNIT (atype));
+ v1 = build2 (MEM_REF, atype, build_fold_addr_expr (lhs), off);
+ if (TREE_CODE (rhs2) == SSA_NAME)
+ {
+ part = var_to_partition (m_map, rhs2);
+ gcc_assert (m_vars[part] != NULL_TREE);
+ v2 = m_vars[part];
+ }
+ else if (integer_zerop (rhs2))
+ v2 = build_zero_cst (atype);
+ else
+ v2 = tree_output_constant_def (rhs2);
+ if (!useless_type_conversion_p (atype, TREE_TYPE (v2)))
+ v2 = build1 (VIEW_CONVERT_EXPR, atype, v2);
+ g = gimple_build_assign (v1, v2);
+ insert_before (g);
+}
+
+/* Lower a call statement with one or more large/huge _BitInt
+ arguments or large/huge _BitInt return value. */
+
+void
+bitint_large_huge::lower_call (tree obj, gimple *stmt)
+{
+ gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
+ unsigned int nargs = gimple_call_num_args (stmt);
+ if (gimple_call_internal_p (stmt))
+ switch (gimple_call_internal_fn (stmt))
+ {
+ case IFN_ADD_OVERFLOW:
+ case IFN_SUB_OVERFLOW:
+ case IFN_UBSAN_CHECK_ADD:
+ case IFN_UBSAN_CHECK_SUB:
+ lower_addsub_overflow (obj, stmt);
+ return;
+ case IFN_MUL_OVERFLOW:
+ case IFN_UBSAN_CHECK_MUL:
+ lower_mul_overflow (obj, stmt);
+ return;
+ default:
+ break;
+ }
+ for (unsigned int i = 0; i < nargs; ++i)
+ {
+ tree arg = gimple_call_arg (stmt, i);
+ if (TREE_CODE (arg) != SSA_NAME
+ || TREE_CODE (TREE_TYPE (arg)) != BITINT_TYPE
+ || bitint_precision_kind (TREE_TYPE (arg)) <= bitint_prec_middle)
+ continue;
+ int p = var_to_partition (m_map, arg);
+ tree v = m_vars[p];
+ gcc_assert (v != NULL_TREE);
+ if (!types_compatible_p (TREE_TYPE (arg), TREE_TYPE (v)))
+ v = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (arg), v);
+ arg = make_ssa_name (TREE_TYPE (arg));
+ gimple *g = gimple_build_assign (arg, v);
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+ gimple_call_set_arg (stmt, i, arg);
+ if (m_preserved == NULL)
+ m_preserved = BITMAP_ALLOC (NULL);
+ bitmap_set_bit (m_preserved, SSA_NAME_VERSION (arg));
+ }
+ tree lhs = gimple_call_lhs (stmt);
+ if (lhs
+ && TREE_CODE (lhs) == SSA_NAME
+ && TREE_CODE (TREE_TYPE (lhs)) == BITINT_TYPE
+ && bitint_precision_kind (TREE_TYPE (lhs)) >= bitint_prec_large)
+ {
+ int p = var_to_partition (m_map, lhs);
+ tree v = m_vars[p];
+ gcc_assert (v != NULL_TREE);
+ if (!types_compatible_p (TREE_TYPE (lhs), TREE_TYPE (v)))
+ v = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), v);
+ gimple_call_set_lhs (stmt, v);
+ SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
+ }
+ update_stmt (stmt);
+}
+
+/* Lower __asm STMT which involves large/huge _BitInt values. */
+
+void
+bitint_large_huge::lower_asm (gimple *stmt)
+{
+ gasm *g = as_a <gasm *> (stmt);
+ unsigned noutputs = gimple_asm_noutputs (g);
+ unsigned ninputs = gimple_asm_ninputs (g);
+
+ for (unsigned i = 0; i < noutputs; ++i)
+ {
+ tree t = gimple_asm_output_op (g, i);
+ tree s = TREE_VALUE (t);
+ if (TREE_CODE (s) == SSA_NAME
+ && TREE_CODE (TREE_TYPE (s)) == BITINT_TYPE
+ && bitint_precision_kind (TREE_TYPE (s)) >= bitint_prec_large)
+ {
+ int part = var_to_partition (m_map, s);
+ gcc_assert (m_vars[part] != NULL_TREE);
+ TREE_VALUE (t) = m_vars[part];
+ }
+ }
+ for (unsigned i = 0; i < ninputs; ++i)
+ {
+ tree t = gimple_asm_input_op (g, i);
+ tree s = TREE_VALUE (t);
+ if (TREE_CODE (s) == SSA_NAME
+ && TREE_CODE (TREE_TYPE (s)) == BITINT_TYPE
+ && bitint_precision_kind (TREE_TYPE (s)) >= bitint_prec_large)
+ {
+ int part = var_to_partition (m_map, s);
+ gcc_assert (m_vars[part] != NULL_TREE);
+ TREE_VALUE (t) = m_vars[part];
+ }
+ }
+ update_stmt (stmt);
+}
+
+/* Lower statement STMT which involves large/huge _BitInt values
+ into code accessing individual limbs. */
+
+void
+bitint_large_huge::lower_stmt (gimple *stmt)
+{
+ m_first = true;
+ m_lhs = NULL_TREE;
+ m_data.truncate (0);
+ m_data_cnt = 0;
+ m_gsi = gsi_for_stmt (stmt);
+ m_after_stmt = NULL;
+ m_bb = NULL;
+ m_init_gsi = m_gsi;
+ gsi_prev (&m_init_gsi);
+ m_preheader_bb = NULL;
+ m_upwards_2limb = 0;
+ m_upwards = false;
+ m_var_msb = false;
+ m_cast_conditional = false;
+ m_bitfld_load = 0;
+ m_loc = gimple_location (stmt);
+ if (is_gimple_call (stmt))
+ {
+ lower_call (NULL_TREE, stmt);
+ return;
+ }
+ if (gimple_code (stmt) == GIMPLE_ASM)
+ {
+ lower_asm (stmt);
+ return;
+ }
+ tree lhs = NULL_TREE, cmp_op1 = NULL_TREE, cmp_op2 = NULL_TREE;
+ tree_code cmp_code = comparison_op (stmt, &cmp_op1, &cmp_op2);
+ bool eq_p = (cmp_code == EQ_EXPR || cmp_code == NE_EXPR);
+ bool mergeable_cast_p = false;
+ bool final_cast_p = false;
+ if (gimple_assign_cast_p (stmt))
+ {
+ lhs = gimple_assign_lhs (stmt);
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ if (TREE_CODE (TREE_TYPE (lhs)) == BITINT_TYPE
+ && bitint_precision_kind (TREE_TYPE (lhs)) >= bitint_prec_large
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+ mergeable_cast_p = true;
+ else if (TREE_CODE (TREE_TYPE (rhs1)) == BITINT_TYPE
+ && bitint_precision_kind (TREE_TYPE (rhs1)) >= bitint_prec_large
+ && INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
+ {
+ final_cast_p = true;
+ if (TREE_CODE (rhs1) == SSA_NAME
+ && (m_names == NULL
+ || !bitmap_bit_p (m_names, SSA_NAME_VERSION (rhs1))))
+ {
+ gimple *g = SSA_NAME_DEF_STMT (rhs1);
+ if (is_gimple_assign (g)
+ && gimple_assign_rhs_code (g) == IMAGPART_EXPR)
+ {
+ tree rhs2 = TREE_OPERAND (gimple_assign_rhs1 (g), 0);
+ if (TREE_CODE (rhs2) == SSA_NAME
+ && (m_names == NULL
+ || !bitmap_bit_p (m_names, SSA_NAME_VERSION (rhs2))))
+ {
+ g = SSA_NAME_DEF_STMT (rhs2);
+ int ovf = optimizable_arith_overflow (g);
+ if (ovf == 2)
+ /* If .{ADD,SUB,MUL}_OVERFLOW has both REALPART_EXPR
+ and IMAGPART_EXPR uses, where the latter is cast to
+ non-_BitInt, it will be optimized when handling
+ the REALPART_EXPR. */
+ return;
+ if (ovf == 1)
+ {
+ lower_call (NULL_TREE, g);
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+ if (gimple_store_p (stmt))
+ {
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ if (TREE_CODE (rhs1) == SSA_NAME
+ && (m_names == NULL
+ || !bitmap_bit_p (m_names, SSA_NAME_VERSION (rhs1))))
+ {
+ gimple *g = SSA_NAME_DEF_STMT (rhs1);
+ m_loc = gimple_location (g);
+ lhs = gimple_assign_lhs (stmt);
+ if (is_gimple_assign (g) && !mergeable_op (g))
+ switch (gimple_assign_rhs_code (g))
+ {
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ lower_shift_stmt (lhs, g);
+ handled:
+ m_gsi = gsi_for_stmt (stmt);
+ unlink_stmt_vdef (stmt);
+ release_ssa_name (gimple_vdef (stmt));
+ gsi_remove (&m_gsi, true);
+ return;
+ case MULT_EXPR:
+ case TRUNC_DIV_EXPR:
+ case TRUNC_MOD_EXPR:
+ lower_muldiv_stmt (lhs, g);
+ goto handled;
+ case FIX_TRUNC_EXPR:
+ lower_float_conv_stmt (lhs, g);
+ goto handled;
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ lower_cplxpart_stmt (lhs, g);
+ goto handled;
+ default:
+ break;
+ }
+ else if (optimizable_arith_overflow (g) == 3)
+ {
+ lower_call (lhs, g);
+ goto handled;
+ }
+ m_loc = gimple_location (stmt);
+ }
+ }
+ if (mergeable_op (stmt)
+ || gimple_store_p (stmt)
+ || gimple_assign_load_p (stmt)
+ || eq_p
+ || mergeable_cast_p)
+ {
+ lhs = lower_mergeable_stmt (stmt, cmp_code, cmp_op1, cmp_op2);
+ if (!eq_p)
+ return;
+ }
+ else if (cmp_code != ERROR_MARK)
+ lhs = lower_comparison_stmt (stmt, cmp_code, cmp_op1, cmp_op2);
+ if (cmp_code != ERROR_MARK)
+ {
+ if (gimple_code (stmt) == GIMPLE_COND)
+ {
+ gcond *cstmt = as_a <gcond *> (stmt);
+ gimple_cond_set_lhs (cstmt, lhs);
+ gimple_cond_set_rhs (cstmt, boolean_false_node);
+ gimple_cond_set_code (cstmt, cmp_code);
+ update_stmt (stmt);
+ return;
+ }
+ if (gimple_assign_rhs_code (stmt) == COND_EXPR)
+ {
+ tree cond = build2 (cmp_code, boolean_type_node, lhs,
+ boolean_false_node);
+ gimple_assign_set_rhs1 (stmt, cond);
+ lhs = gimple_assign_lhs (stmt);
+ gcc_assert (TREE_CODE (TREE_TYPE (lhs)) != BITINT_TYPE
+ || (bitint_precision_kind (TREE_TYPE (lhs))
+ <= bitint_prec_middle));
+ update_stmt (stmt);
+ return;
+ }
+ gimple_assign_set_rhs1 (stmt, lhs);
+ gimple_assign_set_rhs2 (stmt, boolean_false_node);
+ gimple_assign_set_rhs_code (stmt, cmp_code);
+ update_stmt (stmt);
+ return;
+ }
+ if (final_cast_p)
+ {
+ tree lhs_type = TREE_TYPE (lhs);
+ /* Add support for 3 or more limbs filled in from normal integral
+ type if this assert fails. If no target chooses limb mode smaller
+ than half of largest supported normal integral type, this will not
+ be needed. */
+ gcc_assert (TYPE_PRECISION (lhs_type) <= 2 * limb_prec);
+ gimple *g;
+ if (TREE_CODE (lhs_type) == BITINT_TYPE
+ && bitint_precision_kind (lhs_type) == bitint_prec_middle)
+ lhs_type = build_nonstandard_integer_type (TYPE_PRECISION (lhs_type),
+ TYPE_UNSIGNED (lhs_type));
+ m_data_cnt = 0;
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ tree r1 = handle_operand (rhs1, size_int (0));
+ if (!useless_type_conversion_p (lhs_type, TREE_TYPE (r1)))
+ r1 = add_cast (lhs_type, r1);
+ if (TYPE_PRECISION (lhs_type) > limb_prec)
+ {
+ m_data_cnt = 0;
+ m_first = false;
+ tree r2 = handle_operand (rhs1, size_int (1));
+ r2 = add_cast (lhs_type, r2);
+ g = gimple_build_assign (make_ssa_name (lhs_type), LSHIFT_EXPR, r2,
+ build_int_cst (unsigned_type_node,
+ limb_prec));
+ insert_before (g);
+ g = gimple_build_assign (make_ssa_name (lhs_type), BIT_IOR_EXPR, r1,
+ gimple_assign_lhs (g));
+ insert_before (g);
+ r1 = gimple_assign_lhs (g);
+ }
+ if (lhs_type != TREE_TYPE (lhs))
+ g = gimple_build_assign (lhs, NOP_EXPR, r1);
+ else
+ g = gimple_build_assign (lhs, r1);
+ gsi_replace (&m_gsi, g, true);
+ return;
+ }
+ if (is_gimple_assign (stmt))
+ switch (gimple_assign_rhs_code (stmt))
+ {
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ lower_shift_stmt (NULL_TREE, stmt);
+ return;
+ case MULT_EXPR:
+ case TRUNC_DIV_EXPR:
+ case TRUNC_MOD_EXPR:
+ lower_muldiv_stmt (NULL_TREE, stmt);
+ return;
+ case FIX_TRUNC_EXPR:
+ case FLOAT_EXPR:
+ lower_float_conv_stmt (NULL_TREE, stmt);
+ return;
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ lower_cplxpart_stmt (NULL_TREE, stmt);
+ return;
+ case COMPLEX_EXPR:
+ lower_complexexpr_stmt (stmt);
+ return;
+ default:
+ break;
+ }
+ gcc_unreachable ();
+}
+
+/* Helper for walk_non_aliased_vuses. Determine if we arrived at
+ the desired memory state. */
+
+void *
+vuse_eq (ao_ref *, tree vuse1, void *data)
+{
+ tree vuse2 = (tree) data;
+ if (vuse1 == vuse2)
+ return data;
+
+ return NULL;
+}
+
+/* Return true if STMT uses a library function and needs to take
+ address of its inputs. We need to avoid bit-fields in those
+ cases. */
+
+bool
+stmt_needs_operand_addr (gimple *stmt)
+{
+ if (is_gimple_assign (stmt))
+ switch (gimple_assign_rhs_code (stmt))
+ {
+ case MULT_EXPR:
+ case TRUNC_DIV_EXPR:
+ case TRUNC_MOD_EXPR:
+ case FLOAT_EXPR:
+ return true;
+ default:
+ break;
+ }
+ else if (gimple_call_internal_p (stmt, IFN_MUL_OVERFLOW)
+ || gimple_call_internal_p (stmt, IFN_UBSAN_CHECK_MUL))
+ return true;
+ return false;
+}
+
+/* Dominator walker used to discover which large/huge _BitInt
+ loads could be sunk into all their uses. */
+
+class bitint_dom_walker : public dom_walker
+{
+public:
+ bitint_dom_walker (bitmap names, bitmap loads)
+ : dom_walker (CDI_DOMINATORS), m_names (names), m_loads (loads) {}
+
+ edge before_dom_children (basic_block) final override;
+
+private:
+ bitmap m_names, m_loads;
+};
+
+edge
+bitint_dom_walker::before_dom_children (basic_block bb)
+{
+ gphi *phi = get_virtual_phi (bb);
+ tree vop;
+ if (phi)
+ vop = gimple_phi_result (phi);
+ else if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ vop = NULL_TREE;
+ else
+ vop = (tree) get_immediate_dominator (CDI_DOMINATORS, bb)->aux;
+
+ auto_vec<tree, 16> worklist;
+ for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+ !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+ if (is_gimple_debug (stmt))
+ continue;
+
+ if (!vop && gimple_vuse (stmt))
+ vop = gimple_vuse (stmt);
+
+ tree cvop = vop;
+ if (gimple_vdef (stmt))
+ vop = gimple_vdef (stmt);
+
+ tree lhs = gimple_get_lhs (stmt);
+ if (lhs
+ && TREE_CODE (lhs) == SSA_NAME
+ && TREE_CODE (TREE_TYPE (lhs)) != BITINT_TYPE
+ && bitint_precision_kind (TREE_TYPE (lhs)) >= bitint_prec_large
+ && !bitmap_bit_p (m_names, SSA_NAME_VERSION (lhs)))
+ /* If lhs of stmt is large/huge _BitInt SSA_NAME not in m_names,
+ it means it will be handled in a loop or straight line code
+ at the location of its (ultimate) immediate use, so for
+ vop checking purposes check these only at the ultimate
+ immediate use. */
+ continue;
+
+ ssa_op_iter oi;
+ use_operand_p use_p;
+ FOR_EACH_SSA_USE_OPERAND (use_p, stmt, oi, SSA_OP_USE)
+ {
+ tree s = USE_FROM_PTR (use_p);
+ if (TREE_CODE (TREE_TYPE (s)) == BITINT_TYPE
+ && bitint_precision_kind (TREE_TYPE (s)) >= bitint_prec_large)
+ worklist.safe_push (s);
+ }
+
+ bool needs_operand_addr = stmt_needs_operand_addr (stmt);
+ while (worklist.length () > 0)
+ {
+ tree s = worklist.pop ();
+
+ if (!bitmap_bit_p (m_names, SSA_NAME_VERSION (s)))
+ {
+ gimple *g = SSA_NAME_DEF_STMT (s);
+ needs_operand_addr |= stmt_needs_operand_addr (g);
+ FOR_EACH_SSA_USE_OPERAND (use_p, g, oi, SSA_OP_USE)
+ {
+ tree s2 = USE_FROM_PTR (use_p);
+ if (TREE_CODE (TREE_TYPE (s2)) == BITINT_TYPE
+ && (bitint_precision_kind (TREE_TYPE (s2))
+ >= bitint_prec_large))
+ worklist.safe_push (s2);
+ }
+ continue;
+ }
+ if (!SSA_NAME_OCCURS_IN_ABNORMAL_PHI (s)
+ && gimple_assign_cast_p (SSA_NAME_DEF_STMT (s)))
+ {
+ tree rhs = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (s));
+ if (TREE_CODE (rhs) == SSA_NAME
+ && bitmap_bit_p (m_loads, SSA_NAME_VERSION (rhs)))
+ s = rhs;
+ else
+ continue;
+ }
+ else if (!bitmap_bit_p (m_loads, SSA_NAME_VERSION (s)))
+ continue;
+
+ tree rhs1 = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (s));
+ if (needs_operand_addr
+ && TREE_CODE (rhs1) == COMPONENT_REF
+ && DECL_BIT_FIELD_TYPE (TREE_OPERAND (rhs1, 1)))
+ {
+ tree fld = TREE_OPERAND (rhs1, 1);
+ /* For little-endian, we can allow as inputs bit-fields
+ which start at a limb boundary. */
+ if (DECL_OFFSET_ALIGN (fld) >= TYPE_ALIGN (TREE_TYPE (rhs1))
+ && tree_fits_uhwi_p (DECL_FIELD_BIT_OFFSET (fld))
+ && (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld))
+ % limb_prec) == 0)
+ ;
+ else
+ {
+ bitmap_clear_bit (m_loads, SSA_NAME_VERSION (s));
+ continue;
+ }
+ }
+
+ ao_ref ref;
+ ao_ref_init (&ref, rhs1);
+ tree lvop = gimple_vuse (SSA_NAME_DEF_STMT (s));
+ unsigned limit = 64;
+ tree vuse = cvop;
+ if (vop != cvop
+ && is_gimple_assign (stmt)
+ && gimple_store_p (stmt)
+ && !operand_equal_p (lhs,
+ gimple_assign_rhs1 (SSA_NAME_DEF_STMT (s)),
+ 0))
+ vuse = vop;
+ if (vuse != lvop
+ && walk_non_aliased_vuses (&ref, vuse, false, vuse_eq,
+ NULL, NULL, limit, lvop) == NULL)
+ bitmap_clear_bit (m_loads, SSA_NAME_VERSION (s));
+ }
+ }
+
+ bb->aux = (void *) vop;
+ return NULL;
+}
+
+}
+
+/* Replacement for normal processing of STMT in tree-ssa-coalesce.cc
+ build_ssa_conflict_graph.
+ The differences are:
+ 1) don't process assignments with large/huge _BitInt lhs not in NAMES
+ 2) for large/huge _BitInt multiplication/division/modulo process def
+ only after processing uses rather than before to make uses conflict
+ with the definition
+ 3) for large/huge _BitInt uses not in NAMES mark the uses of their
+ SSA_NAME_DEF_STMT (recursively), because those uses will be sunk into
+ the final statement. */
+
+void
+build_bitint_stmt_ssa_conflicts (gimple *stmt, live_track *live,
+ ssa_conflicts *graph, bitmap names,
+ void (*def) (live_track *, tree,
+ ssa_conflicts *),
+ void (*use) (live_track *, tree))
+{
+ bool muldiv_p = false;
+ tree lhs = NULL_TREE;
+ if (is_gimple_assign (stmt))
+ {
+ lhs = gimple_assign_lhs (stmt);
+ if (TREE_CODE (lhs) == SSA_NAME
+ && TREE_CODE (TREE_TYPE (lhs)) == BITINT_TYPE
+ && bitint_precision_kind (TREE_TYPE (lhs)) >= bitint_prec_large)
+ {
+ if (!bitmap_bit_p (names, SSA_NAME_VERSION (lhs)))
+ return;
+ switch (gimple_assign_rhs_code (stmt))
+ {
+ case MULT_EXPR:
+ case TRUNC_DIV_EXPR:
+ case TRUNC_MOD_EXPR:
+ muldiv_p = true;
+ default:
+ break;
+ }
+ }
+ }
+
+ ssa_op_iter iter;
+ tree var;
+ if (!muldiv_p)
+ {
+ /* For stmts with more than one SSA_NAME definition pretend all the
+ SSA_NAME outputs but the first one are live at this point, so
+ that conflicts are added in between all those even when they are
+ actually not really live after the asm, because expansion might
+ copy those into pseudos after the asm and if multiple outputs
+ share the same partition, it might overwrite those that should
+ be live. E.g.
+ asm volatile (".." : "=r" (a) : "=r" (b) : "0" (a), "1" (a));
+ return a;
+ See PR70593. */
+ bool first = true;
+ FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_DEF)
+ if (first)
+ first = false;
+ else
+ use (live, var);
+
+ FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_DEF)
+ def (live, var, graph);
+ }
+
+ auto_vec<tree, 16> worklist;
+ FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_USE)
+ if (TREE_CODE (TREE_TYPE (var)) == BITINT_TYPE
+ && bitint_precision_kind (TREE_TYPE (var)) >= bitint_prec_large)
+ {
+ if (bitmap_bit_p (names, SSA_NAME_VERSION (var)))
+ use (live, var);
+ else
+ worklist.safe_push (var);
+ }
+
+ while (worklist.length () > 0)
+ {
+ tree s = worklist.pop ();
+ FOR_EACH_SSA_TREE_OPERAND (var, SSA_NAME_DEF_STMT (s), iter, SSA_OP_USE)
+ if (TREE_CODE (TREE_TYPE (var)) == BITINT_TYPE
+ && bitint_precision_kind (TREE_TYPE (var)) >= bitint_prec_large)
+ {
+ if (bitmap_bit_p (names, SSA_NAME_VERSION (var)))
+ use (live, var);
+ else
+ worklist.safe_push (var);
+ }
+ }
+
+ if (muldiv_p)
+ def (live, lhs, graph);
+}
+
+/* Entry point for _BitInt(N) operation lowering during optimization. */
+
+static unsigned int
+gimple_lower_bitint (void)
+{
+ small_max_prec = mid_min_prec = large_min_prec = huge_min_prec = 0;
+ limb_prec = 0;
+
+ unsigned int i;
+ for (i = 0; i < num_ssa_names; ++i)
+ {
+ tree s = ssa_name (i);
+ if (s == NULL)
+ continue;
+ tree type = TREE_TYPE (s);
+ if (TREE_CODE (type) == COMPLEX_TYPE)
+ type = TREE_TYPE (type);
+ if (TREE_CODE (type) == BITINT_TYPE
+ && bitint_precision_kind (type) != bitint_prec_small)
+ break;
+ /* We need to also rewrite stores of large/huge _BitInt INTEGER_CSTs
+ into memory. Such functions could have no large/huge SSA_NAMEs. */
+ if (SSA_NAME_IS_VIRTUAL_OPERAND (s))
+ {
+ gimple *g = SSA_NAME_DEF_STMT (s);
+ if (is_gimple_assign (g) && gimple_store_p (g))
+ {
+ tree t = gimple_assign_rhs1 (g);
+ if (TREE_CODE (TREE_TYPE (t)) == BITINT_TYPE
+ && (bitint_precision_kind (TREE_TYPE (t))
+ >= bitint_prec_large))
+ break;
+ }
+ }
+ }
+ if (i == num_ssa_names)
+ return 0;
+
+ basic_block bb;
+ auto_vec<gimple *, 4> switch_statements;
+ FOR_EACH_BB_FN (bb, cfun)
+ {
+ if (gswitch *swtch = safe_dyn_cast <gswitch *> (*gsi_last_bb (bb)))
+ {
+ tree idx = gimple_switch_index (swtch);
+ if (TREE_CODE (TREE_TYPE (idx)) != BITINT_TYPE
+ || bitint_precision_kind (TREE_TYPE (idx)) < bitint_prec_large)
+ continue;
+
+ if (optimize)
+ group_case_labels_stmt (swtch);
+ switch_statements.safe_push (swtch);
+ }
+ }
+
+ if (!switch_statements.is_empty ())
+ {
+ bool expanded = false;
+ gimple *stmt;
+ unsigned int j;
+ i = 0;
+ FOR_EACH_VEC_ELT (switch_statements, j, stmt)
+ {
+ gswitch *swtch = as_a<gswitch *> (stmt);
+ tree_switch_conversion::switch_decision_tree dt (swtch);
+ expanded |= dt.analyze_switch_statement ();
+ }
+
+ if (expanded)
+ {
+ free_dominance_info (CDI_DOMINATORS);
+ free_dominance_info (CDI_POST_DOMINATORS);
+ mark_virtual_operands_for_renaming (cfun);
+ cleanup_tree_cfg (TODO_update_ssa);
+ }
+ }
+
+ struct bitint_large_huge large_huge;
+ bool has_large_huge_parm_result = false;
+ bool has_large_huge = false;
+ unsigned int ret = 0, first_large_huge = ~0U;
+ bool edge_insertions = false;
+ for (; i < num_ssa_names; ++i)
+ {
+ tree s = ssa_name (i);
+ if (s == NULL)
+ continue;
+ tree type = TREE_TYPE (s);
+ if (TREE_CODE (type) == COMPLEX_TYPE)
+ type = TREE_TYPE (type);
+ if (TREE_CODE (type) == BITINT_TYPE
+ && bitint_precision_kind (type) >= bitint_prec_large)
+ {
+ if (first_large_huge == ~0U)
+ first_large_huge = i;
+ gimple *stmt = SSA_NAME_DEF_STMT (s), *g;
+ gimple_stmt_iterator gsi;
+ tree_code rhs_code;
+ /* Unoptimize certain constructs to simpler alternatives to
+ avoid having to lower all of them. */
+ if (is_gimple_assign (stmt))
+ switch (rhs_code = gimple_assign_rhs_code (stmt))
+ {
+ default:
+ break;
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ {
+ first_large_huge = 0;
+ location_t loc = gimple_location (stmt);
+ gsi = gsi_for_stmt (stmt);
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ tree type = TREE_TYPE (rhs1);
+ tree n = gimple_assign_rhs2 (stmt), m;
+ tree p = build_int_cst (TREE_TYPE (n),
+ TYPE_PRECISION (type));
+ if (TREE_CODE (n) == INTEGER_CST)
+ m = fold_build2 (MINUS_EXPR, TREE_TYPE (n), p, n);
+ else
+ {
+ m = make_ssa_name (TREE_TYPE (n));
+ g = gimple_build_assign (m, MINUS_EXPR, p, n);
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+ gimple_set_location (g, loc);
+ }
+ if (!TYPE_UNSIGNED (type))
+ {
+ tree utype = build_bitint_type (TYPE_PRECISION (type),
+ 1);
+ if (TREE_CODE (rhs1) == INTEGER_CST)
+ rhs1 = fold_convert (utype, rhs1);
+ else
+ {
+ tree t = make_ssa_name (type);
+ g = gimple_build_assign (t, NOP_EXPR, rhs1);
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+ gimple_set_location (g, loc);
+ }
+ }
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (rhs1)),
+ rhs_code == LROTATE_EXPR
+ ? LSHIFT_EXPR : RSHIFT_EXPR,
+ rhs1, n);
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+ gimple_set_location (g, loc);
+ tree op1 = gimple_assign_lhs (g);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (rhs1)),
+ rhs_code == LROTATE_EXPR
+ ? RSHIFT_EXPR : LSHIFT_EXPR,
+ rhs1, m);
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+ gimple_set_location (g, loc);
+ tree op2 = gimple_assign_lhs (g);
+ tree lhs = gimple_assign_lhs (stmt);
+ if (!TYPE_UNSIGNED (type))
+ {
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (op1)),
+ BIT_IOR_EXPR, op1, op2);
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+ gimple_set_location (g, loc);
+ g = gimple_build_assign (lhs, NOP_EXPR,
+ gimple_assign_lhs (g));
+ }
+ else
+ g = gimple_build_assign (lhs, BIT_IOR_EXPR, op1, op2);
+ gsi_replace (&gsi, g, true);
+ gimple_set_location (g, loc);
+ }
+ break;
+ case ABS_EXPR:
+ case ABSU_EXPR:
+ case MIN_EXPR:
+ case MAX_EXPR:
+ case COND_EXPR:
+ first_large_huge = 0;
+ gsi = gsi_for_stmt (stmt);
+ tree lhs = gimple_assign_lhs (stmt);
+ tree rhs1 = gimple_assign_rhs1 (stmt), rhs2 = NULL_TREE;
+ location_t loc = gimple_location (stmt);
+ if (rhs_code == ABS_EXPR)
+ g = gimple_build_cond (LT_EXPR, rhs1,
+ build_zero_cst (TREE_TYPE (rhs1)),
+ NULL_TREE, NULL_TREE);
+ else if (rhs_code == ABSU_EXPR)
+ {
+ rhs2 = make_ssa_name (TREE_TYPE (lhs));
+ g = gimple_build_assign (rhs2, NOP_EXPR, rhs1);
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+ gimple_set_location (g, loc);
+ g = gimple_build_cond (LT_EXPR, rhs1,
+ build_zero_cst (TREE_TYPE (rhs1)),
+ NULL_TREE, NULL_TREE);
+ rhs1 = rhs2;
+ }
+ else if (rhs_code == MIN_EXPR || rhs_code == MAX_EXPR)
+ {
+ rhs2 = gimple_assign_rhs2 (stmt);
+ if (TREE_CODE (rhs1) == INTEGER_CST)
+ std::swap (rhs1, rhs2);
+ g = gimple_build_cond (LT_EXPR, rhs1, rhs2,
+ NULL_TREE, NULL_TREE);
+ if (rhs_code == MAX_EXPR)
+ std::swap (rhs1, rhs2);
+ }
+ else
+ {
+ g = gimple_build_cond (NE_EXPR, rhs1,
+ build_zero_cst (TREE_TYPE (rhs1)),
+ NULL_TREE, NULL_TREE);
+ rhs1 = gimple_assign_rhs2 (stmt);
+ rhs2 = gimple_assign_rhs3 (stmt);
+ }
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+ gimple_set_location (g, loc);
+ edge e1 = split_block (gsi_bb (gsi), g);
+ edge e2 = split_block (e1->dest, (gimple *) NULL);
+ edge e3 = make_edge (e1->src, e2->dest, EDGE_FALSE_VALUE);
+ e3->probability = profile_probability::even ();
+ e1->flags = EDGE_TRUE_VALUE;
+ e1->probability = e3->probability.invert ();
+ if (dom_info_available_p (CDI_DOMINATORS))
+ set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src);
+ if (rhs_code == ABS_EXPR || rhs_code == ABSU_EXPR)
+ {
+ gsi = gsi_after_labels (e1->dest);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (rhs1)),
+ NEGATE_EXPR, rhs1);
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+ gimple_set_location (g, loc);
+ rhs2 = gimple_assign_lhs (g);
+ std::swap (rhs1, rhs2);
+ }
+ gsi = gsi_for_stmt (stmt);
+ gsi_remove (&gsi, true);
+ gphi *phi = create_phi_node (lhs, e2->dest);
+ add_phi_arg (phi, rhs1, e2, UNKNOWN_LOCATION);
+ add_phi_arg (phi, rhs2, e3, UNKNOWN_LOCATION);
+ break;
+ }
+ }
+ /* We need to also rewrite stores of large/huge _BitInt INTEGER_CSTs
+ into memory. Such functions could have no large/huge SSA_NAMEs. */
+ else if (SSA_NAME_IS_VIRTUAL_OPERAND (s))
+ {
+ gimple *g = SSA_NAME_DEF_STMT (s);
+ if (is_gimple_assign (g) && gimple_store_p (g))
+ {
+ tree t = gimple_assign_rhs1 (g);
+ if (TREE_CODE (TREE_TYPE (t)) == BITINT_TYPE
+ && (bitint_precision_kind (TREE_TYPE (t))
+ >= bitint_prec_large))
+ has_large_huge = true;
+ }
+ }
+ }
+ for (i = first_large_huge; i < num_ssa_names; ++i)
+ {
+ tree s = ssa_name (i);
+ if (s == NULL)
+ continue;
+ tree type = TREE_TYPE (s);
+ if (TREE_CODE (type) == COMPLEX_TYPE)
+ type = TREE_TYPE (type);
+ if (TREE_CODE (type) == BITINT_TYPE
+ && bitint_precision_kind (type) >= bitint_prec_large)
+ {
+ use_operand_p use_p;
+ gimple *use_stmt;
+ has_large_huge = true;
+ if (optimize
+ && optimizable_arith_overflow (SSA_NAME_DEF_STMT (s)))
+ continue;
+ /* Ignore large/huge _BitInt SSA_NAMEs which have single use in
+ the same bb and could be handled in the same loop with the
+ immediate use. */
+ if (optimize
+ && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (s)
+ && single_imm_use (s, &use_p, &use_stmt)
+ && gimple_bb (SSA_NAME_DEF_STMT (s)) == gimple_bb (use_stmt))
+ {
+ if (mergeable_op (SSA_NAME_DEF_STMT (s)))
+ {
+ if (mergeable_op (use_stmt))
+ continue;
+ tree_code cmp_code = comparison_op (use_stmt, NULL, NULL);
+ if (cmp_code == EQ_EXPR || cmp_code == NE_EXPR)
+ continue;
+ if (gimple_assign_cast_p (use_stmt))
+ {
+ tree lhs = gimple_assign_lhs (use_stmt);
+ if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
+ continue;
+ }
+ else if (gimple_store_p (use_stmt)
+ && is_gimple_assign (use_stmt)
+ && !gimple_has_volatile_ops (use_stmt)
+ && !stmt_ends_bb_p (use_stmt))
+ continue;
+ }
+ if (gimple_assign_cast_p (SSA_NAME_DEF_STMT (s)))
+ {
+ tree rhs1 = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (s));
+ if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
+ && ((is_gimple_assign (use_stmt)
+ && (gimple_assign_rhs_code (use_stmt)
+ != COMPLEX_EXPR))
+ || gimple_code (use_stmt) == GIMPLE_COND)
+ && (!gimple_store_p (use_stmt)
+ || (is_gimple_assign (use_stmt)
+ && !gimple_has_volatile_ops (use_stmt)
+ && !stmt_ends_bb_p (use_stmt)))
+ && (TREE_CODE (rhs1) != SSA_NAME
+ || !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1)))
+ {
+ if (TREE_CODE (TREE_TYPE (rhs1)) != BITINT_TYPE
+ || (bitint_precision_kind (TREE_TYPE (rhs1))
+ < bitint_prec_large)
+ || (TYPE_PRECISION (TREE_TYPE (rhs1))
+ >= TYPE_PRECISION (TREE_TYPE (s)))
+ || mergeable_op (SSA_NAME_DEF_STMT (s)))
+ continue;
+ /* Prevent merging a widening non-mergeable cast
+ on result of some narrower mergeable op
+ together with later mergeable operations. E.g.
+ result of _BitInt(223) addition shouldn't be
+ sign-extended to _BitInt(513) and have another
+ _BitInt(513) added to it, as handle_plus_minus
+ with its PHI node handling inside of handle_cast
+ will not work correctly. An exception is if
+ use_stmt is a store, this is handled directly
+ in lower_mergeable_stmt. */
+ if (TREE_CODE (rhs1) != SSA_NAME
+ || !has_single_use (rhs1)
+ || (gimple_bb (SSA_NAME_DEF_STMT (rhs1))
+ != gimple_bb (SSA_NAME_DEF_STMT (s)))
+ || !mergeable_op (SSA_NAME_DEF_STMT (rhs1))
+ || gimple_store_p (use_stmt))
+ continue;
+ if (gimple_assign_cast_p (SSA_NAME_DEF_STMT (rhs1)))
+ {
+ /* Another exception is if the widening cast is
+ from mergeable same precision cast from something
+ not mergeable. */
+ tree rhs2
+ = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (rhs1));
+ if (TREE_CODE (TREE_TYPE (rhs2)) == BITINT_TYPE
+ && (TYPE_PRECISION (TREE_TYPE (rhs1))
+ == TYPE_PRECISION (TREE_TYPE (rhs2))))
+ {
+ if (TREE_CODE (rhs2) != SSA_NAME
+ || !has_single_use (rhs2)
+ || (gimple_bb (SSA_NAME_DEF_STMT (rhs2))
+ != gimple_bb (SSA_NAME_DEF_STMT (s)))
+ || !mergeable_op (SSA_NAME_DEF_STMT (rhs2)))
+ continue;
+ }
+ }
+ }
+ }
+ if (is_gimple_assign (SSA_NAME_DEF_STMT (s)))
+ switch (gimple_assign_rhs_code (SSA_NAME_DEF_STMT (s)))
+ {
+ case IMAGPART_EXPR:
+ {
+ tree rhs1 = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (s));
+ rhs1 = TREE_OPERAND (rhs1, 0);
+ if (TREE_CODE (rhs1) == SSA_NAME)
+ {
+ gimple *g = SSA_NAME_DEF_STMT (rhs1);
+ if (optimizable_arith_overflow (g))
+ continue;
+ }
+ }
+ /* FALLTHRU */
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ case MULT_EXPR:
+ case TRUNC_DIV_EXPR:
+ case TRUNC_MOD_EXPR:
+ case FIX_TRUNC_EXPR:
+ case REALPART_EXPR:
+ if (gimple_store_p (use_stmt)
+ && is_gimple_assign (use_stmt)
+ && !gimple_has_volatile_ops (use_stmt)
+ && !stmt_ends_bb_p (use_stmt))
+ {
+ tree lhs = gimple_assign_lhs (use_stmt);
+ /* As multiply/division passes address of the lhs
+ to library function and that assumes it can extend
+ it to whole number of limbs, avoid merging those
+ with bit-field stores. Don't allow it for
+ shifts etc. either, so that the bit-field store
+ handling doesn't have to be done everywhere. */
+ if (TREE_CODE (lhs) == COMPONENT_REF
+ && DECL_BIT_FIELD_TYPE (TREE_OPERAND (lhs, 1)))
+ break;
+ continue;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Also ignore uninitialized uses. */
+ if (SSA_NAME_IS_DEFAULT_DEF (s)
+ && (!SSA_NAME_VAR (s) || VAR_P (SSA_NAME_VAR (s))))
+ continue;
+
+ if (!large_huge.m_names)
+ large_huge.m_names = BITMAP_ALLOC (NULL);
+ bitmap_set_bit (large_huge.m_names, SSA_NAME_VERSION (s));
+ if (has_single_use (s))
+ {
+ if (!large_huge.m_single_use_names)
+ large_huge.m_single_use_names = BITMAP_ALLOC (NULL);
+ bitmap_set_bit (large_huge.m_single_use_names,
+ SSA_NAME_VERSION (s));
+ }
+ if (SSA_NAME_VAR (s)
+ && ((TREE_CODE (SSA_NAME_VAR (s)) == PARM_DECL
+ && SSA_NAME_IS_DEFAULT_DEF (s))
+ || TREE_CODE (SSA_NAME_VAR (s)) == RESULT_DECL))
+ has_large_huge_parm_result = true;
+ if (optimize
+ && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (s)
+ && gimple_assign_load_p (SSA_NAME_DEF_STMT (s))
+ && !gimple_has_volatile_ops (SSA_NAME_DEF_STMT (s))
+ && !stmt_ends_bb_p (SSA_NAME_DEF_STMT (s)))
+ {
+ use_operand_p use_p;
+ imm_use_iterator iter;
+ bool optimizable_load = true;
+ FOR_EACH_IMM_USE_FAST (use_p, iter, s)
+ {
+ gimple *use_stmt = USE_STMT (use_p);
+ if (is_gimple_debug (use_stmt))
+ continue;
+ if (gimple_code (use_stmt) == GIMPLE_PHI
+ || is_gimple_call (use_stmt))
+ {
+ optimizable_load = false;
+ break;
+ }
+ }
+
+ ssa_op_iter oi;
+ FOR_EACH_SSA_USE_OPERAND (use_p, SSA_NAME_DEF_STMT (s),
+ oi, SSA_OP_USE)
+ {
+ tree s2 = USE_FROM_PTR (use_p);
+ if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (s2))
+ {
+ optimizable_load = false;
+ break;
+ }
+ }
+
+ if (optimizable_load && !stmt_ends_bb_p (SSA_NAME_DEF_STMT (s)))
+ {
+ if (!large_huge.m_loads)
+ large_huge.m_loads = BITMAP_ALLOC (NULL);
+ bitmap_set_bit (large_huge.m_loads, SSA_NAME_VERSION (s));
+ }
+ }
+ }
+ /* We need to also rewrite stores of large/huge _BitInt INTEGER_CSTs
+ into memory. Such functions could have no large/huge SSA_NAMEs. */
+ else if (SSA_NAME_IS_VIRTUAL_OPERAND (s))
+ {
+ gimple *g = SSA_NAME_DEF_STMT (s);
+ if (is_gimple_assign (g) && gimple_store_p (g))
+ {
+ tree t = gimple_assign_rhs1 (g);
+ if (TREE_CODE (TREE_TYPE (t)) == BITINT_TYPE
+ && bitint_precision_kind (TREE_TYPE (t)) >= bitint_prec_large)
+ has_large_huge = true;
+ }
+ }
+ }
+
+ if (large_huge.m_names || has_large_huge)
+ {
+ ret = TODO_update_ssa_only_virtuals | TODO_cleanup_cfg;
+ calculate_dominance_info (CDI_DOMINATORS);
+ if (optimize)
+ enable_ranger (cfun);
+ if (large_huge.m_loads)
+ {
+ basic_block entry = ENTRY_BLOCK_PTR_FOR_FN (cfun);
+ entry->aux = NULL;
+ bitint_dom_walker (large_huge.m_names,
+ large_huge.m_loads).walk (entry);
+ bitmap_and_compl_into (large_huge.m_names, large_huge.m_loads);
+ clear_aux_for_blocks ();
+ BITMAP_FREE (large_huge.m_loads);
+ }
+ large_huge.m_limb_type = build_nonstandard_integer_type (limb_prec, 1);
+ large_huge.m_limb_size
+ = tree_to_uhwi (TYPE_SIZE_UNIT (large_huge.m_limb_type));
+ }
+ if (large_huge.m_names)
+ {
+ large_huge.m_map
+ = init_var_map (num_ssa_names, NULL, large_huge.m_names);
+ coalesce_ssa_name (large_huge.m_map);
+ partition_view_normal (large_huge.m_map);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "After Coalescing:\n");
+ dump_var_map (dump_file, large_huge.m_map);
+ }
+ large_huge.m_vars
+ = XCNEWVEC (tree, num_var_partitions (large_huge.m_map));
+ bitmap_iterator bi;
+ if (has_large_huge_parm_result)
+ EXECUTE_IF_SET_IN_BITMAP (large_huge.m_names, 0, i, bi)
+ {
+ tree s = ssa_name (i);
+ if (SSA_NAME_VAR (s)
+ && ((TREE_CODE (SSA_NAME_VAR (s)) == PARM_DECL
+ && SSA_NAME_IS_DEFAULT_DEF (s))
+ || TREE_CODE (SSA_NAME_VAR (s)) == RESULT_DECL))
+ {
+ int p = var_to_partition (large_huge.m_map, s);
+ if (large_huge.m_vars[p] == NULL_TREE)
+ {
+ large_huge.m_vars[p] = SSA_NAME_VAR (s);
+ mark_addressable (SSA_NAME_VAR (s));
+ }
+ }
+ }
+ tree atype = NULL_TREE;
+ EXECUTE_IF_SET_IN_BITMAP (large_huge.m_names, 0, i, bi)
+ {
+ tree s = ssa_name (i);
+ int p = var_to_partition (large_huge.m_map, s);
+ if (large_huge.m_vars[p] != NULL_TREE)
+ continue;
+ if (atype == NULL_TREE
+ || !tree_int_cst_equal (TYPE_SIZE (atype),
+ TYPE_SIZE (TREE_TYPE (s))))
+ {
+ unsigned HOST_WIDE_INT nelts
+ = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (s))) / limb_prec;
+ atype = build_array_type_nelts (large_huge.m_limb_type, nelts);
+ }
+ large_huge.m_vars[p] = create_tmp_var (atype, "bitint");
+ mark_addressable (large_huge.m_vars[p]);
+ }
+ }
+
+ FOR_EACH_BB_REVERSE_FN (bb, cfun)
+ {
+ gimple_stmt_iterator prev;
+ for (gimple_stmt_iterator gsi = gsi_last_bb (bb); !gsi_end_p (gsi);
+ gsi = prev)
+ {
+ prev = gsi;
+ gsi_prev (&prev);
+ ssa_op_iter iter;
+ gimple *stmt = gsi_stmt (gsi);
+ if (is_gimple_debug (stmt))
+ continue;
+ bitint_prec_kind kind = bitint_prec_small;
+ tree t;
+ FOR_EACH_SSA_TREE_OPERAND (t, stmt, iter, SSA_OP_ALL_OPERANDS)
+ if (TREE_CODE (TREE_TYPE (t)) == BITINT_TYPE)
+ {
+ bitint_prec_kind this_kind
+ = bitint_precision_kind (TREE_TYPE (t));
+ if (this_kind > kind)
+ kind = this_kind;
+ }
+ if (is_gimple_assign (stmt) && gimple_store_p (stmt))
+ {
+ t = gimple_assign_rhs1 (stmt);
+ if (TREE_CODE (TREE_TYPE (t)) == BITINT_TYPE)
+ {
+ bitint_prec_kind this_kind
+ = bitint_precision_kind (TREE_TYPE (t));
+ if (this_kind > kind)
+ kind = this_kind;
+ }
+ }
+ if (is_gimple_call (stmt))
+ {
+ t = gimple_call_lhs (stmt);
+ if (t
+ && TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (t))) == BITINT_TYPE)
+ {
+ bitint_prec_kind this_kind
+ = bitint_precision_kind (TREE_TYPE (TREE_TYPE (t)));
+ if (this_kind > kind)
+ kind = this_kind;
+ }
+ }
+ if (kind == bitint_prec_small)
+ continue;
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_CALL:
+ /* For now. We'll need to handle some internal functions and
+ perhaps some builtins. */
+ if (kind == bitint_prec_middle)
+ continue;
+ break;
+ case GIMPLE_ASM:
+ if (kind == bitint_prec_middle)
+ continue;
+ break;
+ case GIMPLE_RETURN:
+ continue;
+ case GIMPLE_ASSIGN:
+ if (gimple_clobber_p (stmt))
+ continue;
+ if (kind >= bitint_prec_large)
+ break;
+ if (gimple_assign_single_p (stmt))
+ /* No need to lower copies, loads or stores. */
+ continue;
+ if (gimple_assign_cast_p (stmt))
+ {
+ tree lhs = gimple_assign_lhs (stmt);
+ tree rhs = gimple_assign_rhs1 (stmt);
+ if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
+ && (TYPE_PRECISION (TREE_TYPE (lhs))
+ == TYPE_PRECISION (TREE_TYPE (rhs))))
+ /* No need to lower casts to same precision. */
+ continue;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (kind == bitint_prec_middle)
+ {
+ tree type = NULL_TREE;
+ /* Middle _BitInt(N) is rewritten to casts to INTEGER_TYPEs
+ with the same precision and back. */
+ if (tree lhs = gimple_get_lhs (stmt))
+ if (TREE_CODE (TREE_TYPE (lhs)) == BITINT_TYPE
+ && (bitint_precision_kind (TREE_TYPE (lhs))
+ == bitint_prec_middle))
+ {
+ int prec = TYPE_PRECISION (TREE_TYPE (lhs));
+ int uns = TYPE_UNSIGNED (TREE_TYPE (lhs));
+ type = build_nonstandard_integer_type (prec, uns);
+ tree lhs2 = make_ssa_name (type);
+ gimple *g = gimple_build_assign (lhs, NOP_EXPR, lhs2);
+ gsi_insert_after (&gsi, g, GSI_SAME_STMT);
+ gimple_set_lhs (stmt, lhs2);
+ }
+ unsigned int nops = gimple_num_ops (stmt);
+ for (unsigned int i = 0; i < nops; ++i)
+ if (tree op = gimple_op (stmt, i))
+ {
+ tree nop = maybe_cast_middle_bitint (&gsi, op, type);
+ if (nop != op)
+ gimple_set_op (stmt, i, nop);
+ else if (COMPARISON_CLASS_P (op))
+ {
+ TREE_OPERAND (op, 0)
+ = maybe_cast_middle_bitint (&gsi,
+ TREE_OPERAND (op, 0),
+ type);
+ TREE_OPERAND (op, 1)
+ = maybe_cast_middle_bitint (&gsi,
+ TREE_OPERAND (op, 1),
+ type);
+ }
+ else if (TREE_CODE (op) == CASE_LABEL_EXPR)
+ {
+ CASE_LOW (op)
+ = maybe_cast_middle_bitint (&gsi, CASE_LOW (op),
+ type);
+ CASE_HIGH (op)
+ = maybe_cast_middle_bitint (&gsi, CASE_HIGH (op),
+ type);
+ }
+ }
+ update_stmt (stmt);
+ continue;
+ }
+
+ if (tree lhs = gimple_get_lhs (stmt))
+ if (TREE_CODE (lhs) == SSA_NAME)
+ {
+ tree type = TREE_TYPE (lhs);
+ if (TREE_CODE (type) == COMPLEX_TYPE)
+ type = TREE_TYPE (type);
+ if (TREE_CODE (type) == BITINT_TYPE
+ && bitint_precision_kind (type) >= bitint_prec_large
+ && (large_huge.m_names == NULL
+ || !bitmap_bit_p (large_huge.m_names,
+ SSA_NAME_VERSION (lhs))))
+ continue;
+ }
+
+ large_huge.lower_stmt (stmt);
+ }
+
+ tree atype = NULL_TREE;
+ for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
+ gsi_next (&gsi))
+ {
+ gphi *phi = gsi.phi ();
+ tree lhs = gimple_phi_result (phi);
+ if (TREE_CODE (TREE_TYPE (lhs)) != BITINT_TYPE
+ || bitint_precision_kind (TREE_TYPE (lhs)) < bitint_prec_large)
+ continue;
+ int p1 = var_to_partition (large_huge.m_map, lhs);
+ gcc_assert (large_huge.m_vars[p1] != NULL_TREE);
+ tree v1 = large_huge.m_vars[p1];
+ for (unsigned i = 0; i < gimple_phi_num_args (phi); ++i)
+ {
+ tree arg = gimple_phi_arg_def (phi, i);
+ edge e = gimple_phi_arg_edge (phi, i);
+ gimple *g;
+ switch (TREE_CODE (arg))
+ {
+ case INTEGER_CST:
+ if (integer_zerop (arg) && VAR_P (v1))
+ {
+ tree zero = build_zero_cst (TREE_TYPE (v1));
+ g = gimple_build_assign (v1, zero);
+ gsi_insert_on_edge (e, g);
+ edge_insertions = true;
+ break;
+ }
+ int ext;
+ unsigned int min_prec, prec, rem;
+ tree c;
+ prec = TYPE_PRECISION (TREE_TYPE (arg));
+ rem = prec % (2 * limb_prec);
+ min_prec = bitint_min_cst_precision (arg, ext);
+ if (min_prec > prec - rem - 2 * limb_prec
+ && min_prec > (unsigned) limb_prec)
+ /* Constant which has enough significant bits that it
+ isn't worth trying to save .rodata space by extending
+ from smaller number. */
+ min_prec = prec;
+ else
+ min_prec = CEIL (min_prec, limb_prec) * limb_prec;
+ if (min_prec == 0)
+ c = NULL_TREE;
+ else if (min_prec == prec)
+ c = tree_output_constant_def (arg);
+ else if (min_prec == (unsigned) limb_prec)
+ c = fold_convert (large_huge.m_limb_type, arg);
+ else
+ {
+ tree ctype = build_bitint_type (min_prec, 1);
+ c = tree_output_constant_def (fold_convert (ctype, arg));
+ }
+ if (c)
+ {
+ if (VAR_P (v1) && min_prec == prec)
+ {
+ tree v2 = build1 (VIEW_CONVERT_EXPR,
+ TREE_TYPE (v1), c);
+ g = gimple_build_assign (v1, v2);
+ gsi_insert_on_edge (e, g);
+ edge_insertions = true;
+ break;
+ }
+ if (TREE_CODE (TREE_TYPE (c)) == INTEGER_TYPE)
+ g = gimple_build_assign (build1 (VIEW_CONVERT_EXPR,
+ TREE_TYPE (c), v1),
+ c);
+ else
+ {
+ unsigned HOST_WIDE_INT nelts
+ = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (c)))
+ / limb_prec;
+ tree vtype
+ = build_array_type_nelts (large_huge.m_limb_type,
+ nelts);
+ g = gimple_build_assign (build1 (VIEW_CONVERT_EXPR,
+ vtype, v1),
+ build1 (VIEW_CONVERT_EXPR,
+ vtype, c));
+ }
+ gsi_insert_on_edge (e, g);
+ }
+ if (ext == 0)
+ {
+ unsigned HOST_WIDE_INT nelts
+ = (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (v1)))
+ - min_prec) / limb_prec;
+ tree vtype
+ = build_array_type_nelts (large_huge.m_limb_type,
+ nelts);
+ tree ptype = build_pointer_type (TREE_TYPE (v1));
+ tree off = fold_convert (ptype,
+ TYPE_SIZE_UNIT (TREE_TYPE (c)));
+ tree vd = build2 (MEM_REF, vtype,
+ build_fold_addr_expr (v1), off);
+ g = gimple_build_assign (vd, build_zero_cst (vtype));
+ }
+ else
+ {
+ tree vd = v1;
+ if (c)
+ {
+ tree ptype = build_pointer_type (TREE_TYPE (v1));
+ tree off
+ = fold_convert (ptype,
+ TYPE_SIZE_UNIT (TREE_TYPE (c)));
+ vd = build2 (MEM_REF, large_huge.m_limb_type,
+ build_fold_addr_expr (v1), off);
+ }
+ vd = build_fold_addr_expr (vd);
+ unsigned HOST_WIDE_INT nbytes
+ = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (v1)));
+ if (c)
+ nbytes
+ -= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (c)));
+ tree fn = builtin_decl_implicit (BUILT_IN_MEMSET);
+ g = gimple_build_call (fn, 3, vd,
+ integer_minus_one_node,
+ build_int_cst (sizetype,
+ nbytes));
+ }
+ gsi_insert_on_edge (e, g);
+ edge_insertions = true;
+ break;
+ default:
+ gcc_unreachable ();
+ case SSA_NAME:
+ if (gimple_code (SSA_NAME_DEF_STMT (arg)) == GIMPLE_NOP)
+ {
+ if (large_huge.m_names == NULL
+ || !bitmap_bit_p (large_huge.m_names,
+ SSA_NAME_VERSION (arg)))
+ continue;
+ }
+ int p2 = var_to_partition (large_huge.m_map, arg);
+ if (p1 == p2)
+ continue;
+ gcc_assert (large_huge.m_vars[p2] != NULL_TREE);
+ tree v2 = large_huge.m_vars[p2];
+ if (VAR_P (v1) && VAR_P (v2))
+ g = gimple_build_assign (v1, v2);
+ else if (VAR_P (v1))
+ g = gimple_build_assign (v1, build1 (VIEW_CONVERT_EXPR,
+ TREE_TYPE (v1), v2));
+ else if (VAR_P (v2))
+ g = gimple_build_assign (build1 (VIEW_CONVERT_EXPR,
+ TREE_TYPE (v2), v1), v2);
+ else
+ {
+ if (atype == NULL_TREE
+ || !tree_int_cst_equal (TYPE_SIZE (atype),
+ TYPE_SIZE (TREE_TYPE (lhs))))
+ {
+ unsigned HOST_WIDE_INT nelts
+ = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (lhs)))
+ / limb_prec;
+ atype
+ = build_array_type_nelts (large_huge.m_limb_type,
+ nelts);
+ }
+ g = gimple_build_assign (build1 (VIEW_CONVERT_EXPR,
+ atype, v1),
+ build1 (VIEW_CONVERT_EXPR,
+ atype, v2));
+ }
+ gsi_insert_on_edge (e, g);
+ edge_insertions = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (large_huge.m_names || has_large_huge)
+ {
+ gimple *nop = NULL;
+ for (i = 0; i < num_ssa_names; ++i)
+ {
+ tree s = ssa_name (i);
+ if (s == NULL_TREE)
+ continue;
+ tree type = TREE_TYPE (s);
+ if (TREE_CODE (type) == COMPLEX_TYPE)
+ type = TREE_TYPE (type);
+ if (TREE_CODE (type) == BITINT_TYPE
+ && bitint_precision_kind (type) >= bitint_prec_large)
+ {
+ if (large_huge.m_preserved
+ && bitmap_bit_p (large_huge.m_preserved,
+ SSA_NAME_VERSION (s)))
+ continue;
+ gimple *g = SSA_NAME_DEF_STMT (s);
+ if (gimple_code (g) == GIMPLE_NOP)
+ {
+ if (SSA_NAME_VAR (s))
+ set_ssa_default_def (cfun, SSA_NAME_VAR (s), NULL_TREE);
+ release_ssa_name (s);
+ continue;
+ }
+ if (gimple_code (g) != GIMPLE_ASM)
+ {
+ gimple_stmt_iterator gsi = gsi_for_stmt (g);
+ bool save_vta = flag_var_tracking_assignments;
+ flag_var_tracking_assignments = false;
+ gsi_remove (&gsi, true);
+ flag_var_tracking_assignments = save_vta;
+ }
+ if (nop == NULL)
+ nop = gimple_build_nop ();
+ SSA_NAME_DEF_STMT (s) = nop;
+ release_ssa_name (s);
+ }
+ }
+ if (optimize)
+ disable_ranger (cfun);
+ }
+
+ if (edge_insertions)
+ gsi_commit_edge_inserts ();
+
+ return ret;
+}
+
+namespace {
+
+const pass_data pass_data_lower_bitint =
+{
+ GIMPLE_PASS, /* type */
+ "bitintlower", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_NONE, /* tv_id */
+ PROP_ssa, /* properties_required */
+ PROP_gimple_lbitint, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
+
+class pass_lower_bitint : public gimple_opt_pass
+{
+public:
+ pass_lower_bitint (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_lower_bitint, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ opt_pass * clone () final override { return new pass_lower_bitint (m_ctxt); }
+ unsigned int execute (function *) final override
+ {
+ return gimple_lower_bitint ();
+ }
+
+}; // class pass_lower_bitint
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_lower_bitint (gcc::context *ctxt)
+{
+ return new pass_lower_bitint (ctxt);
+}
+
+
+namespace {
+
+const pass_data pass_data_lower_bitint_O0 =
+{
+ GIMPLE_PASS, /* type */
+ "bitintlower0", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_NONE, /* tv_id */
+ PROP_cfg, /* properties_required */
+ PROP_gimple_lbitint, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
+
+class pass_lower_bitint_O0 : public gimple_opt_pass
+{
+public:
+ pass_lower_bitint_O0 (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_lower_bitint_O0, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ bool gate (function *fun) final override
+ {
+ /* With errors, normal optimization passes are not run. If we don't
+ lower bitint operations at all, rtl expansion will abort. */
+ return !(fun->curr_properties & PROP_gimple_lbitint);
+ }
+
+ unsigned int execute (function *) final override
+ {
+ return gimple_lower_bitint ();
+ }
+
+}; // class pass_lower_bitint_O0
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_lower_bitint_O0 (gcc::context *ctxt)
+{
+ return new pass_lower_bitint_O0 (ctxt);
+}
diff --git a/gcc/gimple-lower-bitint.h b/gcc/gimple-lower-bitint.h
new file mode 100644
index 0000000..60b1598
--- /dev/null
+++ b/gcc/gimple-lower-bitint.h
@@ -0,0 +1,31 @@
+/* Header file for gimple-lower-bitint.cc exports.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_LOWER_BITINT_H
+#define GCC_GIMPLE_LOWER_BITINT_H
+
+class live_track;
+struct ssa_conflicts;
+extern void build_bitint_stmt_ssa_conflicts (gimple *, live_track *,
+ ssa_conflicts *, bitmap,
+ void (*) (live_track *, tree,
+ ssa_conflicts *),
+ void (*) (live_track *, tree));
+
+#endif /* GCC_GIMPLE_LOWER_BITINT_H */
diff --git a/gcc/passes.def b/gcc/passes.def
index ef5a21a..4110a47 100644
--- a/gcc/passes.def
+++ b/gcc/passes.def
@@ -237,6 +237,7 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_tail_recursion);
NEXT_PASS (pass_ch);
NEXT_PASS (pass_lower_complex);
+ NEXT_PASS (pass_lower_bitint);
NEXT_PASS (pass_sra);
/* The dom pass will also resolve all __builtin_constant_p calls
that are still there to 0. This has to be done after some
@@ -386,6 +387,7 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_strip_predict_hints, false /* early_p */);
/* Lower remaining pieces of GIMPLE. */
NEXT_PASS (pass_lower_complex);
+ NEXT_PASS (pass_lower_bitint);
NEXT_PASS (pass_lower_vector_ssa);
NEXT_PASS (pass_lower_switch);
/* Perform simple scalar cleanup which is constant/copy propagation. */
@@ -429,6 +431,7 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_lower_vaarg);
NEXT_PASS (pass_lower_vector);
NEXT_PASS (pass_lower_complex_O0);
+ NEXT_PASS (pass_lower_bitint_O0);
NEXT_PASS (pass_sancov_O0);
NEXT_PASS (pass_lower_switch_O0);
NEXT_PASS (pass_asan_O0);
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index 57865cd..eba2d54 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -229,6 +229,7 @@ protected:
have completed. */
#define PROP_assumptions_done (1 << 19) /* Assume function kept
around. */
+#define PROP_gimple_lbitint (1 << 20) /* lowered large _BitInt */
#define PROP_gimple \
(PROP_gimple_any | PROP_gimple_lcf | PROP_gimple_leh | PROP_gimple_lomp)
@@ -420,6 +421,8 @@ extern gimple_opt_pass *make_pass_strip_predict_hints (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_rebuild_frequencies (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_lower_complex_O0 (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_lower_complex (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lower_bitint_O0 (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lower_bitint (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_lower_switch (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_lower_switch_O0 (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_lower_vector (gcc::context *ctxt);
diff --git a/gcc/tree-ssa-coalesce.cc b/gcc/tree-ssa-coalesce.cc
index e9de9d4d..482764e 100644
--- a/gcc/tree-ssa-coalesce.cc
+++ b/gcc/tree-ssa-coalesce.cc
@@ -38,6 +38,7 @@ along with GCC; see the file COPYING3. If not see
#include "explow.h"
#include "tree-dfa.h"
#include "stor-layout.h"
+#include "gimple-lower-bitint.h"
/* This set of routines implements a coalesce_list. This is an object which
is used to track pairs of ssa_names which are desirable to coalesce
@@ -914,6 +915,14 @@ build_ssa_conflict_graph (tree_live_info_p liveinfo)
else if (is_gimple_debug (stmt))
continue;
+ if (map->bitint)
+ {
+ build_bitint_stmt_ssa_conflicts (stmt, live, graph, map->bitint,
+ live_track_process_def,
+ live_track_process_use);
+ continue;
+ }
+
/* For stmts with more than one SSA_NAME definition pretend all the
SSA_NAME outputs but the first one are live at this point, so
that conflicts are added in between all those even when they are
@@ -1058,6 +1067,8 @@ create_coalesce_list_for_region (var_map map, bitmap used_in_copy)
if (virtual_operand_p (res))
continue;
ver = SSA_NAME_VERSION (res);
+ if (map->bitint && !bitmap_bit_p (map->bitint, ver))
+ continue;
/* Register ssa_names and coalesces between the args and the result
of all PHI. */
@@ -1106,6 +1117,8 @@ create_coalesce_list_for_region (var_map map, bitmap used_in_copy)
{
v1 = SSA_NAME_VERSION (lhs);
v2 = SSA_NAME_VERSION (rhs1);
+ if (map->bitint && !bitmap_bit_p (map->bitint, v1))
+ break;
cost = coalesce_cost_bb (bb);
add_coalesce (cl, v1, v2, cost);
bitmap_set_bit (used_in_copy, v1);
@@ -1124,12 +1137,16 @@ create_coalesce_list_for_region (var_map map, bitmap used_in_copy)
if (!rhs1)
break;
tree lhs = ssa_default_def (cfun, res);
+ if (map->bitint && !lhs)
+ break;
gcc_assert (lhs);
if (TREE_CODE (rhs1) == SSA_NAME
&& gimple_can_coalesce_p (lhs, rhs1))
{
v1 = SSA_NAME_VERSION (lhs);
v2 = SSA_NAME_VERSION (rhs1);
+ if (map->bitint && !bitmap_bit_p (map->bitint, v1))
+ break;
cost = coalesce_cost_bb (bb);
add_coalesce (cl, v1, v2, cost);
bitmap_set_bit (used_in_copy, v1);
@@ -1177,6 +1194,8 @@ create_coalesce_list_for_region (var_map map, bitmap used_in_copy)
v1 = SSA_NAME_VERSION (outputs[match]);
v2 = SSA_NAME_VERSION (input);
+ if (map->bitint && !bitmap_bit_p (map->bitint, v1))
+ continue;
if (gimple_can_coalesce_p (outputs[match], input))
{
@@ -1651,6 +1670,33 @@ compute_optimized_partition_bases (var_map map, bitmap used_in_copies,
}
}
+ if (map->bitint
+ && flag_tree_coalesce_vars
+ && (optimize > 1 || parts < 500))
+ for (i = 0; i < (unsigned) parts; ++i)
+ {
+ tree s1 = partition_to_var (map, i);
+ int p1 = partition_find (tentative, i);
+ for (unsigned j = i + 1; j < (unsigned) parts; ++j)
+ {
+ tree s2 = partition_to_var (map, j);
+ if (s1 == s2)
+ continue;
+ if (tree_int_cst_equal (TYPE_SIZE (TREE_TYPE (s1)),
+ TYPE_SIZE (TREE_TYPE (s2))))
+ {
+ int p2 = partition_find (tentative, j);
+
+ if (p1 == p2)
+ continue;
+
+ partition_union (tentative, p1, p2);
+ if (partition_find (tentative, i) != p1)
+ break;
+ }
+ }
+ }
+
map->partition_to_base_index = XCNEWVEC (int, parts);
auto_vec<unsigned int> index_map (parts);
if (parts)
@@ -1692,6 +1738,101 @@ compute_optimized_partition_bases (var_map map, bitmap used_in_copies,
partition_delete (tentative);
}
+/* For the bitint lowering pass, try harder. Partitions which contain
+ SSA_NAME default def of a PARM_DECL or have RESULT_DECL need to have
+ compatible types because they will use that RESULT_DECL or PARM_DECL.
+ Other partitions can have even incompatible _BitInt types, as long
+ as they have the same size - those will use VAR_DECLs which are just
+ arrays of the limbs. */
+
+static void
+coalesce_bitint (var_map map, ssa_conflicts *graph)
+{
+ unsigned n = num_var_partitions (map);
+ if (optimize <= 1 && n > 500)
+ return;
+
+ bool try_same_size = false;
+ FILE *debug_file = (dump_flags & TDF_DETAILS) ? dump_file : NULL;
+ for (unsigned i = 0; i < n; ++i)
+ {
+ tree s1 = partition_to_var (map, i);
+ if ((unsigned) var_to_partition (map, s1) != i)
+ continue;
+ int v1 = SSA_NAME_VERSION (s1);
+ for (unsigned j = i + 1; j < n; ++j)
+ {
+ tree s2 = partition_to_var (map, j);
+ if (s1 == s2 || (unsigned) var_to_partition (map, s2) != j)
+ continue;
+ if (!types_compatible_p (TREE_TYPE (s1), TREE_TYPE (s2)))
+ {
+ if (!try_same_size
+ && tree_int_cst_equal (TYPE_SIZE (TREE_TYPE (s1)),
+ TYPE_SIZE (TREE_TYPE (s2))))
+ try_same_size = true;
+ continue;
+ }
+ int v2 = SSA_NAME_VERSION (s2);
+ if (attempt_coalesce (map, graph, v1, v2, debug_file)
+ && partition_to_var (map, i) != s1)
+ break;
+ }
+ }
+
+ if (!try_same_size)
+ return;
+
+ unsigned i;
+ bitmap_iterator bi;
+ bitmap same_type = NULL;
+
+ EXECUTE_IF_SET_IN_BITMAP (map->bitint, 0, i, bi)
+ {
+ tree s = ssa_name (i);
+ if (!SSA_NAME_VAR (s))
+ continue;
+ if (TREE_CODE (SSA_NAME_VAR (s)) != RESULT_DECL
+ && (TREE_CODE (SSA_NAME_VAR (s)) != PARM_DECL
+ || !SSA_NAME_IS_DEFAULT_DEF (s)))
+ continue;
+ if (same_type == NULL)
+ same_type = BITMAP_ALLOC (NULL);
+ int p = var_to_partition (map, s);
+ bitmap_set_bit (same_type, p);
+ }
+
+ for (i = 0; i < n; ++i)
+ {
+ if (same_type && bitmap_bit_p (same_type, i))
+ continue;
+ tree s1 = partition_to_var (map, i);
+ if ((unsigned) var_to_partition (map, s1) != i)
+ continue;
+ int v1 = SSA_NAME_VERSION (s1);
+ for (unsigned j = i + 1; j < n; ++j)
+ {
+ if (same_type && bitmap_bit_p (same_type, j))
+ continue;
+
+ tree s2 = partition_to_var (map, j);
+ if (s1 == s2 || (unsigned) var_to_partition (map, s2) != j)
+ continue;
+
+ if (!tree_int_cst_equal (TYPE_SIZE (TREE_TYPE (s1)),
+ TYPE_SIZE (TREE_TYPE (s2))))
+ continue;
+
+ int v2 = SSA_NAME_VERSION (s2);
+ if (attempt_coalesce (map, graph, v1, v2, debug_file)
+ && partition_to_var (map, i) != s1)
+ break;
+ }
+ }
+
+ BITMAP_FREE (same_type);
+}
+
/* Given an initial var_map MAP, coalesce variables and return a partition map
with the resulting coalesce. Note that this function is called in either
live range computation context or out-of-ssa context, indicated by MAP. */
@@ -1709,6 +1850,8 @@ coalesce_ssa_name (var_map map)
if (map->outofssa_p)
populate_coalesce_list_for_outofssa (cl, used_in_copies);
bitmap_list_view (used_in_copies);
+ if (map->bitint)
+ bitmap_ior_into (used_in_copies, map->bitint);
if (dump_file && (dump_flags & TDF_DETAILS))
dump_var_map (dump_file, map);
@@ -1756,6 +1899,9 @@ coalesce_ssa_name (var_map map)
((dump_flags & TDF_DETAILS) ? dump_file : NULL));
delete_coalesce_list (cl);
+
+ if (map->bitint && flag_tree_coalesce_vars)
+ coalesce_bitint (map, graph);
+
ssa_conflicts_delete (graph);
}
-
diff --git a/gcc/tree-ssa-live.cc b/gcc/tree-ssa-live.cc
index 61789fa..8d8a318 100644
--- a/gcc/tree-ssa-live.cc
+++ b/gcc/tree-ssa-live.cc
@@ -77,10 +77,11 @@ var_map_base_fini (var_map map)
}
/* Create a variable partition map of SIZE for region, initialize and return
it. Region is a loop if LOOP is non-NULL, otherwise is the current
- function. */
+ function. If BITINT is non-NULL, only SSA_NAMEs from that bitmap
+ will be coalesced. */
var_map
-init_var_map (int size, class loop *loop)
+init_var_map (int size, class loop *loop, bitmap bitint)
{
var_map map;
@@ -109,7 +110,8 @@ init_var_map (int size, class loop *loop)
else
{
map->bmp_bbs = NULL;
- map->outofssa_p = true;
+ map->outofssa_p = bitint == NULL;
+ map->bitint = bitint;
basic_block bb;
FOR_EACH_BB_FN (bb, cfun)
map->vec_bbs.safe_push (bb);
diff --git a/gcc/tree-ssa-live.h b/gcc/tree-ssa-live.h
index d175ad7..73191dc 100644
--- a/gcc/tree-ssa-live.h
+++ b/gcc/tree-ssa-live.h
@@ -70,6 +70,10 @@ typedef struct _var_map
/* Vector of basic block in the region. */
vec<basic_block> vec_bbs;
+ /* If non-NULL, only coalesce SSA_NAMEs from this bitmap, and try harder
+ for those (for bitint lowering pass). */
+ bitmap bitint;
+
/* True if this map is for out-of-ssa, otherwise for live range
computation. When for out-of-ssa, it also means the var map is computed
for whole current function. */
@@ -80,7 +84,7 @@ typedef struct _var_map
/* Value used to represent no partition number. */
#define NO_PARTITION -1
-extern var_map init_var_map (int, class loop* = NULL);
+extern var_map init_var_map (int, class loop * = NULL, bitmap = NULL);
extern void delete_var_map (var_map);
extern int var_union (var_map, tree, tree);
extern void partition_view_normal (var_map);
@@ -100,7 +104,7 @@ inline bool
region_contains_p (var_map map, basic_block bb)
{
/* It's possible that the function is called with ENTRY_BLOCK/EXIT_BLOCK. */
- if (map->outofssa_p)
+ if (map->outofssa_p || map->bitint)
return (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK);
return bitmap_bit_p (map->bmp_bbs, bb->index);